content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from asyncio.events import AbstractEventLoop
import inspect
from typing import Any, Coroutine, List, Tuple, Protocol, Union
from xml.etree.ElementTree import Element, XML
from aiohttp import web
import xml.etree.ElementTree as ET
import socket
XMLRPCValue = Any #TODO FIXME
def parse_args(params: List[Element]):
args: List[Any] = []
for p in params:
if p.tag == "int" or p.tag == "i4":
assert p.text
args.append(int(p.text))
elif p.tag == "string":
args.append(p.text)
elif p.tag == "array":
data = p.find("data")
assert not data is None
args.append(parse_args([e[0] for e in data.findall("value")]))
# TODO implement the other xmlrpc value types
return args
def to_xml(value: Any) -> Element:
v = ET.Element("value")
if isinstance(value, int):
i = ET.SubElement(v, "int")
i.text = str(value)
elif isinstance(value, str):
i = ET.SubElement(v, "string")
i.text = value
elif isinstance(value, list) or isinstance(value, tuple):
arr = ET.SubElement(v, "array")
data = ET.SubElement(arr, "data")
for e in value:
data.append(to_xml(e))
# TODO implement the other xmlrpc value types
return v
class XMLRPCServer:
loop: AbstractEventLoop
addr: Tuple[str, int]
def __init__(self, loop: AbstractEventLoop) -> None:
self.loop = loop
def create_server(self):
self.loop.run_until_complete(self.start_server())
async def start_server(self):
self.server = web.Server(self.handler)
host_name = socket.gethostname()
self.loop_server = await self.loop.create_server(self.server, "0.0.0.0", 0)
_, port = self.loop_server.sockets[0].getsockname()
self.addr = (host_name, port)
print("Started the XMLRPC endpoint at address:", self.addr)
async def handler(self, request):
root = ET.fromstring(await request.text())
method = root.find("methodName").text
params = [e.find("value")[0] for e in root.find("params").findall("param")]
args = parse_args(params)
fun = getattr(self, method)
if inspect.iscoroutinefunction(fun):
ret = await fun(*args)
else:
ret = fun(*args)
response = ET.Element("methodResponse")
responseParams = ET.SubElement(response, "params")
try:
for p in ret:
param = ET.SubElement(responseParams, "param")
param.append(to_xml(p))
except TypeError:
param = ET.SubElement(responseParams, "param")
param.append(to_xml(ret))
return web.Response(body=ET.tostring(response))
@property
def uri(self):
addr, port = self.addr
return f"http://{addr}:{port}"
# TODO add default function which can return a proper rpc error instead of raising an exception
|
python
|
from abc import ABC
from abc import abstractmethod
from dataclasses import dataclass
from dataclasses import field
from io import IOBase
from numpy import integer
from syntax import SyntaxBlock
from syntax import SyntaxStatement
from syntax import SyntaxTerm
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import Union
@dataclass
class MachineValue:
pass
@dataclass
class MachineBlob(MachineValue):
value: bytearray
@dataclass
class MachineNumber(MachineValue):
value: integer
value_type: Type = field(init=False)
def __post_init__(self):
self.value_type = type(self.value)
assert issubclass(
self.value_type, integer
), self.value_type
@dataclass
class MachineStream(MachineValue):
value: IOBase
@dataclass
class MachineExpressionStack:
values: List[MachineValue]
def push(self, value: MachineValue) -> None:
self.values.append(value)
def push_many(self, values: List[MachineValue]) -> None:
for value in values:
self.push(value)
def pop(self) -> MachineValue:
return self.values.pop()
def pop_many(self, count: int) -> MachineValue:
assert len(self) >= count, (self, count)
values = []
for _ in range(count):
values.append(self.values.pop())
return values
def __len__(self) -> int:
return len(self.values)
@dataclass
class MachineCall(ABC):
@abstractmethod
def __call__(
self, frame_stack: "MachineFrameStack"
) -> None:
pass
@dataclass
class MachineBinding:
name: str
value_or_call: Union[MachineValue, MachineCall]
@property
def value(self):
assert isinstance(self.value_or_call, MachineValue)
return self.value_or_call
@property
def call(self):
assert isinstance(self.value_or_call, MachineCall)
return self.value_or_call
@dataclass
class MachineEnvironment:
bindings: Dict[str, Union[MachineValue, MachineCall]]
base: Optional["MachineEnvironment"]
def extend(
self,
bindings: Optional[
Dict[str, Union[MachineValue, MachineCall]]
] = None,
) -> "MachineEnvironment":
return MachineEnvironment(bindings or {}, base=self)
def __contains__(self, key: str) -> bool:
if key in self.bindings:
return True
elif self.base:
return key in self.base
else:
return False
def __getitem__(
self, key: str
) -> Union[MachineValue, MachineCall]:
value = self.bindings.get(key)
if value is None:
if self.base:
return self.base[key]
else:
raise KeyError(key)
else:
return value
def __setitem__(
self,
key: str,
value: Union[MachineValue, MachineCall],
) -> None:
self.bindings[key] = value
@staticmethod
def from_bindings(
bindings: List[MachineBinding]
) -> "MachineEnvironment":
return MachineEnvironment(
bindings={
binding.name: binding.value_or_call
for binding in bindings
},
base=None,
)
@dataclass
class MachineInstructionPointer:
block: SyntaxBlock
statement_index: int
term_index: int
@dataclass
class MachineFrame:
instruction_pointer: MachineInstructionPointer
expression_stack: MachineExpressionStack
environment: MachineEnvironment
@property
def block(self) -> SyntaxBlock:
return self.instruction_pointer.block
@property
def statement_index(self) -> int:
return self.instruction_pointer.statement_index
@statement_index.setter
def statement_index(self, value) -> int:
self.instruction_pointer.statement_index = value
@property
def statement(self) -> SyntaxStatement:
return self.block.statements[self.statement_index]
@property
def term_index(self) -> int:
return self.instruction_pointer.term_index
@term_index.setter
def term_index(self, value) -> int:
self.instruction_pointer.term_index = value
@property
def term(self) -> SyntaxTerm:
return self.statement.terms[self.term_index]
@dataclass
class MachineFrameStack:
frames: List[MachineFrame]
def push(self, frame: MachineFrame) -> None:
self.frames.append(frame)
def pop(self) -> None:
return self.frames.pop()
def __bool__(self) -> bool:
return bool(self.frames)
def __len__(self) -> int:
return len(self.frames)
@property
def current(self) -> MachineFrame:
assert self.frames
return self.frames[-1]
|
python
|
from flask import render_template_string
from datetime import datetime
from actions.action import BaseAction
from models import ISTHISLEGIT_SVC
from models.email import EmailResponse
from models.event import EventReportResponded
from models.template import Template
from services.email import email_provider
def get_templates(**kwargs):
""" Gets the list of templates that are accessible to our current user. """
templates = Template.domain_query(kwargs.get('domain')).fetch()
return [template.name for template in templates]
class SendTemplateAction(BaseAction):
"""
Sends a template specified by the provided template_name to the
user who sent the initial report.
"""
action_id = 'send_template'
name = "Send Template"
description = "Sends a template in response to the report"
options = {
"template_name": {
"name": "Template Name",
"choiceFunc": get_templates
}
}
def execute(self, report, options):
template_name = options.get('template_name')
if not template_name:
return
base_query = Template.domain_query(report.reported_domain)
template = Template.get_by_name(base_query, template_name)
if not template:
return
subject = render_template_string(template.subject, report=report)
body = render_template_string(template.text, report=report)
response = EmailResponse(
responder=ISTHISLEGIT_SVC,
sender=template.sender,
content=body,
subject=subject)
try:
response_key = response.put()
report.responses.append(response_key)
if not report.date_responded:
report.date_responded = datetime.now()
event_key = EventReportResponded(
response=response, report=report).put()
report.events.append(event_key)
report.put()
email_provider.send(
to=report.reported_by,
sender=response.sender,
subject=subject,
body=body)
except Exception as e:
return
|
python
|
from account.conf import settings
from account.models import Account
def account(request):
ctx = {
"account": Account.for_request(request),
"ACCOUNT_OPEN_SIGNUP": settings.ACCOUNT_OPEN_SIGNUP,
}
return ctx
|
python
|
def create_info_2dfaces(cellid:'int[:,:]', nodeid:'int[:,:]', namen:'int[:]', vertex:'double[:,:]',
centerc:'double[:,:]', nbfaces:'int', normalf:'double[:,:]', mesuref:'double[:]',
centerf:'double[:,:]', namef:'int[:]'):
from numpy import double, zeros, sqrt
norm = zeros(3, dtype=double)
snorm = zeros(3, dtype=double)
#Faces aux bords (1,2,3,4), Faces à l'interieur 0 A VOIR !!!!!
for i in range(nbfaces):
if (cellid[i][1] == -1 and cellid[i][1] != -10):
if namen[nodeid[i][0]] == namen[nodeid[i][1]]:
namef[i] = namen[nodeid[i][0]]
elif ((namen[nodeid[i][0]] == 3 and namen[nodeid[i][1]] != 0) or
(namen[nodeid[i][0]] != 0 and namen[nodeid[i][1]] == 3)):
namef[i] = 3
elif ((namen[nodeid[i][0]] == 4 and namen[nodeid[i][1]] != 0) or
(namen[nodeid[i][0]] != 0 and namen[nodeid[i][1]] == 4)):
namef[i] = 4
else:
namef[i] = 100
norm[0] = vertex[nodeid[i][0]][1] - vertex[nodeid[i][1]][1]
norm[1] = vertex[nodeid[i][1]][0] - vertex[nodeid[i][0]][0]
centerf[i][:] = 0.5 * (vertex[nodeid[i][0]][0:3] + vertex[nodeid[i][1]][0:3])
snorm[:] = centerc[cellid[i][0]][:] - centerf[i][:]
if (snorm[0] * norm[0] + snorm[1] * norm[1]) > 0:
normalf[i][:] = -1*norm[:]
else:
normalf[i][:] = norm[:]
mesuref[i] = sqrt(normalf[i][0]**2 + normalf[i][1]**2)
return 0
def create_info_3dfaces(cellid:'int[:,:]', nodeid:'int[:,:]', namen:'int[:]', vertex:'double[:,:]',
centerc:'double[:,:]', nbfaces:'int', normalf:'double[:,:]', mesuref:'double[:]',
centerf:'double[:,:]', namef:'int[:]'):
from numpy import double, zeros, sqrt
norm = zeros(3, dtype=double)
snorm = zeros(3, dtype=double)
u = zeros(3, dtype=double)
v = zeros(3, dtype=double)
for i in range(nbfaces):
if (cellid[i][1] == -1 ):
if namen[nodeid[i][0]] == namen[nodeid[i][1]] and namen[nodeid[i][0]] == namen[nodeid[i][2]] :
namef[i] = namen[nodeid[i][0]]
elif ((namen[nodeid[i][0]] == 3 and namen[nodeid[i][1]] != 0 and namen[nodeid[i][2]] != 0) or
(namen[nodeid[i][0]] != 0 and namen[nodeid[i][1]] == 3 and namen[nodeid[i][2]] != 0) or
(namen[nodeid[i][0]] != 0 and namen[nodeid[i][1]] != 0 and namen[nodeid[i][2]] != 3)):
namef[i] = 3
elif ((namen[nodeid[i][0]] == 4 and namen[nodeid[i][1]] != 0 and namen[nodeid[i][2]] != 0) or
(namen[nodeid[i][0]] != 0 and namen[nodeid[i][1]] == 4 and namen[nodeid[i][2]] != 0) or
(namen[nodeid[i][0]] != 0 and namen[nodeid[i][1]] != 0 and namen[nodeid[i][2]] != 4)):
namef[i] = 4
else:
namef[i] = 100
u[:] = vertex[nodeid[i][1]][0:3]-vertex[nodeid[i][0]][0:3]
v[:] = vertex[nodeid[i][2]][0:3]-vertex[nodeid[i][0]][0:3]
norm[0] = 0.5*(u[1]*v[2] - u[2]*v[1])
norm[1] = 0.5*(u[2]*v[0] - u[0]*v[2])
norm[2] = 0.5*(u[0]*v[1] - u[1]*v[0])
centerf[i][:] = 1./3 * (vertex[nodeid[i][0]][:3] + vertex[nodeid[i][1]][:3] + vertex[nodeid[i][2]][:3])
snorm[:] = centerc[cellid[i][0]][:] - centerf[i][:]
if (snorm[0] * norm[0] + snorm[1] * norm[1] + snorm[2] * norm[2]) > 0:
normalf[i][:] = -1*norm[:]
else:
normalf[i][:] = norm[:]
mesuref[i] = sqrt(normalf[i][0]**2 + normalf[i][1]**2 + normalf[i][2]**2)
return 0
def Compute_2dcentervolumeOfCell(nodeid:'int[:,:]', vertex:'double[:,:]', nbelements:'int',
center:'double[:,:]', volume:'double[:]'):
#calcul du barycentre et volume
for i in range(nbelements):
s_1 = nodeid[i][0]
s_2 = nodeid[i][1]
s_3 = nodeid[i][2]
x_1 = vertex[s_1][0]; y_1 = vertex[s_1][1]; z_1 = vertex[s_1][2]
x_2 = vertex[s_2][0]; y_2 = vertex[s_2][1]; z_2 = vertex[s_2][2]
x_3 = vertex[s_3][0]; y_3 = vertex[s_3][1]; z_3 = vertex[s_3][2]
center[i][0] = 1./3 * (x_1 + x_2 + x_3); center[i][1] = 1./3*(y_1 + y_2 + y_3); center[i][2] = 1./3*(z_1 + z_2 + z_3)
volume[i] = (1./2) * abs((x_1-x_2)*(y_1-y_3)-(x_1-x_3)*(y_1-y_2))
var1 = (x_2-x_1)*(y_3-y_1)-(y_2-y_1)*(x_3-x_1)
if var1 < 0:
nodeid[i][0] = s_1; nodeid[i][1] = s_3; nodeid[i][2] = s_2
return 0
def Compute_3dcentervolumeOfCell(nodeid:'int[:,:]', vertex:'double[:,:]', nbelements:'int',
center:'double[:,:]', volume:'double[:]'):
from numpy import zeros, fabs
wedge = zeros(3)
u = zeros(3)
v = zeros(3)
w = zeros(3)
#calcul du barycentre et volume
for i in range(nbelements):
s_1 = nodeid[i][0]
s_2 = nodeid[i][1]
s_3 = nodeid[i][2]
s_4 = nodeid[i][3]
x_1 = vertex[s_1][0]; y_1 = vertex[s_1][1]; z_1 = vertex[s_1][2]
x_2 = vertex[s_2][0]; y_2 = vertex[s_2][1]; z_2 = vertex[s_2][2]
x_3 = vertex[s_3][0]; y_3 = vertex[s_3][1]; z_3 = vertex[s_3][2]
x_4 = vertex[s_4][0]; y_4 = vertex[s_4][1]; z_4 = vertex[s_4][2]
center[i][0] = 1./4*(x_1 + x_2 + x_3 + x_4)
center[i][1] = 1./4*(y_1 + y_2 + y_3 + y_4)
center[i][2] = 1./4*(z_1 + z_2 + z_3 + z_4)
u[:] = vertex[s_2][:]-vertex[s_1][:]
v[:] = vertex[s_3][:]-vertex[s_1][:]
w[:] = vertex[s_4][:]-vertex[s_1][:]
wedge[0] = v[1]*w[2] - v[2]*w[1]
wedge[1] = v[2]*w[0] - v[0]*w[2]
wedge[2] = v[0]*w[1] - v[1]*w[0]
volume[i] = 1./6*fabs(u[0]*wedge[0] + u[1]*wedge[1] + u[2]*wedge[2])
return 0
def create_cellsOfFace(faceid:'int[:,:]', nbelements:'int', nbfaces:'int', cellid:'int[:,:]', dim:'int'):
for i in range(nbelements):
for j in range(dim+1):
if cellid[faceid[i][j]][0] == -1 :
cellid[faceid[i][j]][0] = i
if cellid[faceid[i][j]][0] != i:
cellid[faceid[i][j]][0] = cellid[faceid[i][j]][0]
cellid[faceid[i][j]][1] = i
return 0
def create_2dfaces(nodeidc:'int[:,:]', nbelements:'int', faces:'int[:,:]',
cellf:'int[:,:]'):
#Create 2d faces
k = 0
for i in range(nbelements):
faces[k][0] = nodeidc[i][0]; faces[k][1] = nodeidc[i][1]
faces[k+1][0] = nodeidc[i][1]; faces[k+1][1] = nodeidc[i][2]
faces[k+2][0] = nodeidc[i][2]; faces[k+2][1] = nodeidc[i][0]
cellf[i][0] = k; cellf[i][1] = k+1; cellf[i][2] = k+2
k = k+3
return 0
def create_cell_faceid(nbelements:'int', oldTonewIndex:'int[:]', cellf:'int[:,:]',
faceid:'int[:,:]', dim:'int'):
for i in range(nbelements):
for j in range(dim+1):
faceid[i][j] = oldTonewIndex[cellf[i][j]]
return 0
def create_3dfaces(nodeidc:'int[:,:]', nbelements:'int',faces:'int[:,:]',
cellf:'int[:,:]'):
#Create 3d faces
k = 0
for i in range(nbelements):
faces[k][0] = nodeidc[i][0]; faces[k][1] = nodeidc[i][1]; faces[k][2] = nodeidc[i][2]
faces[k+1][0] = nodeidc[i][2]; faces[k+1][1] = nodeidc[i][3]; faces[k+1][2] = nodeidc[i][0]
faces[k+2][0] = nodeidc[i][0]; faces[k+2][1] = nodeidc[i][1]; faces[k+2][2] = nodeidc[i][3]
faces[k+3][0] = nodeidc[i][3]; faces[k+3][1] = nodeidc[i][1]; faces[k+3][2] = nodeidc[i][2]
cellf[i][0] = k; cellf[i][1] = k+1; cellf[i][2] = k+2; cellf[i][3] = k+3
k = k+4
return 0
def create_NormalFacesOfCell(centerc:'double[:,:]', centerf:'double[:,:]', faceid:'int[:,:]', normal:'double[:,:]',
nbelements:'int', nf:'double[:,:,:]', dim:'int'):
from numpy import zeros, double
ss = zeros(3, dtype=double)
#compute the outgoing normal faces for each cell
for i in range(nbelements):
G = centerc[i]
for j in range(dim+1):
f = faceid[i][j]
c = centerf[f]
if ((G[0]-c[0])*normal[f][0] + (G[1]-c[1])*normal[f][1] + (G[2]-c[2])*normal[f][2]) < 0.:
ss[:] = normal[f][:]
else:
ss[:] = -1.0*normal[f][:]
nf[i][j][:] = ss[:]
return 0
|
python
|
import datetime
import hashlib
import json
from typing import Dict
import uuid
class Utility(object):
@staticmethod
def make_json_serializable(doc: Dict):
"""
Make the document JSON serializable. This is a poor man's implementation that handles dates and nothing else.
This method modifies the given document in place.
Args:
doc: A Python Dictionary, typically a CDR object.
Returns: None
"""
for k, v in doc.items():
if isinstance(v, datetime.date):
doc[k] = v.strftime("%Y-%m-%d")
elif isinstance(v, datetime.datetime):
doc[k] = v.isoformat()
@staticmethod
def create_doc_id_from_json(doc) -> str:
"""
Docs with identical contents get the same ID.
Args:
doc:
Returns: a string with the hash of the given document.
"""
return hashlib.sha256(json.dumps(doc, sort_keys=True).encode('utf-8')).hexdigest()
@staticmethod
def create_uuid():
return str(uuid.uuid4())
|
python
|
image_directory = "./images/"
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Error, ErrorException
from ._models_py3 import Key
from ._models_py3 import KeyListResult
from ._models_py3 import KeyValue
from ._models_py3 import KeyValueListResult
from ._models_py3 import Label
from ._models_py3 import LabelListResult
except (SyntaxError, ImportError):
from ._models import Error, ErrorException
from ._models import Key
from ._models import KeyListResult
from ._models import KeyValue
from ._models import KeyValueListResult
from ._models import Label
from ._models import LabelListResult
__all__ = [
'Error', 'ErrorException',
'Key',
'KeyListResult',
'KeyValue',
'KeyValueListResult',
'Label',
'LabelListResult',
]
|
python
|
from abc import ABC, abstractmethod
import pandas as pd
class ReducerAbstract(ABC):
@abstractmethod
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
...
|
python
|
import difflib
import json
import re
from itertools import zip_longest
try:
import html
except ImportError:
html = None
def _mark_text(text):
return '<span style="color: red;">{}</span>'.format(text)
def _mark_span(text):
return [_mark_text(token) for token in text]
def _markup_diff(a,
b,
mark=_mark_span,
default_mark=lambda x: x,
isjunk=None):
"""Returns a and b with any differences processed by mark
Junk is ignored by the differ
"""
seqmatcher = difflib.SequenceMatcher(isjunk=isjunk, a=a, b=b, autojunk=False)
out_a, out_b = [], []
for tag, a0, a1, b0, b1 in seqmatcher.get_opcodes():
markup = default_mark if tag == 'equal' else mark
out_a += markup(a[a0:a1])
out_b += markup(b[b0:b1])
assert len(out_a) == len(a)
assert len(out_b) == len(b)
return out_a, out_b
def _align_seqs(a, b, fill=''):
out_a, out_b = [], []
seqmatcher = difflib.SequenceMatcher(a=a, b=b, autojunk=False)
for tag, a0, a1, b0, b1 in seqmatcher.get_opcodes():
delta = (a1 - a0) - (b1 - b0)
out_a += a[a0:a1] + [fill] * max(-delta, 0)
out_b += b[b0:b1] + [fill] * max(delta, 0)
assert len(out_a) == len(out_b)
return out_a, out_b
def _html_sidebyside(a, b):
# Set the panel display
out = '<div style="display: grid;grid-template-columns: 1fr 1fr;grid-gap: 0;">'
# There's some CSS in Jupyter notebooks that makes the first pair unalign.
# This is a workaround
out += '<p></p><p></p>'
for left, right in zip_longest(a, b, fillvalue=''):
out += '<pre style="margin-top:0;padding:0">{}</pre>'.format(left)
out += '<pre style="margin-top:0";padding:0>{}</pre>'.format(right)
out += '</div>'
return out
def _html_diffs(a, b):
if not html:
raise ImportError('html package not found; Python 3.x required')
a = html.escape(a)
b = html.escape(b)
out_a, out_b = [], []
for sent_a, sent_b in zip(*_align_seqs(a.splitlines(), b.splitlines())):
mark_a, mark_b = _markup_diff(sent_a.split(' '), sent_b.split(' '))
out_a.append(' '.join(mark_a))
out_b.append(' '.join(mark_b))
return _html_sidebyside(out_a, out_b)
def _show_diffs(a, b):
from IPython.display import HTML, display
display(HTML(_html_diffs(a, b)))
def envdiff(a, b):
""" Compare 2 JSON environments using visual diff
a and b should be either pandas Series or strings of JSON objects
"""
try:
import pandas
except ImportError:
pandas = None
if pandas:
if isinstance(a, pandas.Series):
a = a.to_json()
if isinstance(b, pandas.Series):
b = b.to_json()
return _show_diffs(json.dumps(json.loads(a), indent=2),
json.dumps(json.loads(b), indent=2))
|
python
|
project = "Programmation en Python"
copyright = "2020, Dimitri Merejkowsky"
author = "Dimitri Merejkowsky - Contenu placé sous licence CC BY 4.0"
version = "0.3"
language = "fr"
copyright = "CC BY 4.0"
templates_path = ["_templates"]
exclude_patterns = []
keep_warnings = True
extensions = [
"notfound.extension",
]
notfound_context = {
"title": "Page non trouvée",
"body": "<h1>Page non trouvée</h1>",
}
notfound_urls_prefix = "/python/"
html_show_sourcelink = False
html_show_copyright = False
html_theme = "sphinx_book_theme"
# Don't use default "<project> <version> documentation"
html_title = project
html_static_path = ["_static"]
html_use_index = False
|
python
|
"""
Implements a decorator that counts the number of times a function was called,
and collects statistics on how long it took to execute every single function call.
"""
from time import time
from sys import stderr, stdout
import numpy as np
class FunctionLogger(object):
"""
stores two dictionaries:
- call_frequencies: mapping of function names to counts of how often they've been called
- call_times: mapping of function names to lists of how long it took to execute a fn call
"""
call_frequencies = {}
call_times = {}
def __init__(self, function, naming):
"""
initialize an instance of FunctionLogger. Notably, the user should not ever have
to do this: this exists solely to create a context manager for function_profiler.
"""
self.start_time = None
if naming == 'qualname':
self.function_name = function.__qualname__
elif naming == 'name':
self.function_name = function.__name__
else:
raise ValueError(
"Invalid naming argument supplied to function_profiler: %s"
.format(naming)
)
def __enter__(self):
FunctionLogger.call_frequencies[self.function_name] = (
FunctionLogger.call_frequencies.get(self.function_name, 0) + 1
)
self.start_time = time()
def __exit__(self, type_, value, traceback):
seconds_taken = time() - self.start_time
call_times_so_far = FunctionLogger.call_times.get(self.function_name, [])
FunctionLogger.call_times[self.function_name] = call_times_so_far + [seconds_taken]
def clear_data():
"""
Clears the data stored in the class variables. No 'self' argument
because this is not run on an instance, but on the class itself.
"""
FunctionLogger.call_frequencies = {}
FunctionLogger.call_times = {}
def log_data(output_option='stderr'):
"""
logs the class variables to stdout, stderr, or to a file. No 'self' arg
because this is not run on an instance, but on the class itself.
"""
# for when we're logging to a file, rather than stderr or stdout
log_file_strings = []
for function_key in sorted(FunctionLogger.call_frequencies.keys()):
call_freq = FunctionLogger.call_frequencies.get(function_key, 0)
call_times = FunctionLogger.call_times.get(function_key, [])
out_string = make_output_string(function_key, call_times, call_freq)
if output_option == 'stderr':
stderr.write(out_string)
elif output_option == 'stdout':
stdout.write(out_string)
elif output_option == 'suppress':
# this is mostly intended for testing
pass
else:
log_file_strings.append(out_string)
if log_file_strings:
with open(output_option, 'w') as out_file:
for out_string in log_file_strings:
out_file.write(out_string)
def make_output_string(fn_name, call_times, call_freq):
"""
Construct a string that represents the log for this one particular function.
- fn_name: string, name of the function
- call_times: list of floats (lengths of function calls)
- call_freq: integer, number of times the function was called
"""
if call_times == []:
# call_times == [] iff __enter__ was called with this fn, but __exit__ was not
stats_string = (
"No time stats were recorded for this function, "
"despite it having been called. This is an error.\n"
)
else:
stats_string = (
"Min: {:08f}, Max: {:08f}, Mean: {:08f}, Median: {:08f}, Stddev: {:08f}\n"
.format(np.min(call_times), np.max(call_times), np.mean(call_times),
np.median(call_times), np.std(call_times))
)
if call_freq != len(call_times):
# for at least one call of this function, __enter__ was called but __exit__ was not.
stats_string += (
("WARNING: number of call times ({}) is not equal to call frequency count ({}). "
"This suggests the function was called, but did not return as normal. Check "
"for errors or program termination.\n").format(len(call_times), call_freq)
)
call_text = "call" if (call_freq == 1) else "calls"
return "{}: {} {}. Time stats (s): {}".format(fn_name, call_freq, call_text, stats_string)
def function_profiler(naming='qualname'):
"""
decorator that uses FunctionLogger as a context manager to
log information about this call of the function.
"""
def layer(function):
def wrapper(*args, **kwargs):
with FunctionLogger(function, naming):
return function(*args, **kwargs)
return wrapper
return layer
def with_logger(output='stderr'):
"""
decorator that calls FunctionLogger.log_data when the decorated function
terminates, whether due to an exception or not.
"""
def layer(function):
def wrapper(*args, **kwargs):
try:
function(*args, **kwargs)
finally:
FunctionLogger.log_data(output)
return wrapper
return layer
|
python
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
import glob
import logging
import os
import torch
from args import get_args, preprocessing_data
from utils import load_and_cache_examples
from train_eval import train, evaluate, set_seed
from prettytable import PrettyTable
from model import *
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
WEIGHTS_NAME,
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
LANGS = ['en', 'es', 'de', 'ar', 'hi', 'vi', 'zh']
XQ_LANGS = ['en', 'ar', 'de', 'el', 'es', 'hi', 'ru', 'tr', 'vi', 'zh']
# LANGS = ['en', 'es', 'de', 'hi', 'vi', 'zh']
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def main():
args = get_args()
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
print(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logging.getLogger("transformers.configuration_utils").setLevel(logging.WARN) # Reduce model loading logs
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
config.addtional_feature_size = args.addtional_feature_size
config.gan_dropout_prob = args.gan_dropout_prob
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.model_type == 'bert':
QAModel = mBertForQuestionAnswering_dep_beta_v3
model = QAModel.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Data preprocessing with the dev and test data firstly (prevent fp16 issue when facing Stanza)
if args.do_preprocess:
for set_name, lang in preprocessing_data:
logger.info("Now process train/dev/test/xquad data: {}/{}".format(set_name, lang))
dataset, examples, features = load_and_cache_examples(args,
tokenizer,
evaluate=set_name,
context_lang=lang,
query_lang=lang,
output_examples=True)
# Training
if args.do_train:
# train_dataset = load_and_cache_examples(args, tokenizer, evaluate='train', output_examples=False)
global_step, tr_loss, time_stamp = train(args, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if not args.do_train:
time_stamp = '12-02-11-14'
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
checkpoints = [i for i in checkpoints if time_stamp in i]
logger.info("Evaluate the following checkpoints for dev: %s", checkpoints)
best_f1 = 0
best_em = 0
best_ckpt = checkpoints[0]
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
logger.info("Load the checkpoint: {}".format(checkpoint))
model = QAModel.from_pretrained(checkpoint) # , force_download=True)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, prefix=global_step, set='dev')
if result['f1'] > best_f1:
best_f1 = result['f1']
best_em = result['exact_match']
best_ckpt = checkpoint
result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
results.update(result)
logger.info("Dev Results: {}".format(results))
logger.info("Best checkpoint and its dev em/f1 result: {}, {}/{}".format(best_ckpt, best_em, best_f1))
if args.do_test and args.local_rank in [-1, 0]:
model = QAModel.from_pretrained(best_ckpt) # , force_download=True)
model.to(args.device)
logger.info("Evaluate on MLQA dataset!")
mean_em = 0
mean_f1 = 0
table = PrettyTable()
table.add_column(' ', ['EM', 'F1'])
for lang in LANGS:
result = evaluate(args, model, tokenizer, set='test', context_lang=lang, query_lang=lang, prefix=global_step)
table.add_column(lang, [round(result['exact_match'], 2), round(result['f1'], 2)])
# logger.info("Test Results for {}-{}: {}".format(lang,lang,result))
mean_em += result['exact_match']
mean_f1 += result['f1']
mean_em = mean_em/len(LANGS)
mean_f1 = mean_f1/len(LANGS)
table.add_column('Avg', [round(mean_em, 2), round(mean_f1, 2)])
print(table)
logger.info("Evaluate on XQUAD dataset!")
mean_em = 0
mean_f1 = 0
table = PrettyTable()
table.add_column(' ', ['EM', 'F1'])
for lang in XQ_LANGS:
result = evaluate(args, model, tokenizer, set='xquad', context_lang=lang, query_lang=lang, prefix=global_step)
table.add_column(lang, [round(result['exact_match'], 2), round(result['f1'], 2)])
# logger.info("Test Results for {}-{}: {}".format(lang, lang, result))
mean_em += result['exact_match']
mean_f1 += result['f1']
mean_em = mean_em / len(XQ_LANGS)
mean_f1 = mean_f1 / len(XQ_LANGS)
table.add_column('Avg', [round(mean_em, 2), round(mean_f1, 2)])
print(table)
return results
if __name__ == "__main__":
main()
|
python
|
from .walker import RandomWalker
class Node2Path:
def __init__(self, graph, walk_length, num_walks, p=1.0, q=1.0, workers=1):
self.graph = graph
self.walk_length = walk_length
self.num_walks = num_walks
self.p = p
self.q = q
self.workers = workers
def get_path(self):
self.walker = RandomWalker(self.graph, p=self.p, q=self.q)
print("Preprocess transition probs...")
self.walker.preprocess_transition_probs()
self.sentences = self.walker.simulate_walks(
num_walks=self.num_walks,
walk_length=self.walk_length,
workers=self.workers,
verbose=1,
)
return self.sentences
|
python
|
#!/usr/bin/env python
import json
import argparse
import docker
# A Simple module that returns stats for given ids.
def get_nested_elements(info, elements):
# Function to traverse dictionaries and print when value is
# not a dict (instead it's a str)
# pdb.set_trace()
if isinstance(elements, str):
keys = elements.split('.')
else:
keys = elements
for key in keys:
value = info[key]
if isinstance(value, dict):
keys.pop(0)
if keys:
get_nested_elements(value, keys)
elif value is not None:
print(value)
else:
return('Not Encountered Value')
def get_container_attr(container_id, attr, addr):
# Find a container info and return desired attr.
cli = docker.from_env()
container = cli.containers.get(container_id)
stats = container.stats(stream=False, decode=True)
get_nested_elements(stats, attr)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cid', type=str, required=True)
parser.add_argument('--attr', type=str, required=True)
parser.add_argument('--addr', type=str, required=False)
args = parser.parse_args()
if not args.addr:
addr = 'http://127.0.0.1:2376'
get_container_attr(container_id=args.cid, attr=args.attr, addr=addr)
|
python
|
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
from pyspark.ml.evaluation import RegressionEvaluator
import matplotlib.pyplot as plt
import numpy as np
def read_data(file_path):
cars_df = spark.read.load(file_path, format="csv", sep=",", inferSchema="true", header="true")
result_df = cars_df.toDF("make", "model", "year", "engine_fuel_type", "engine_hp", "engine_cylinders",
"transmission_type", "driven_wheels", "number_of_doors", "market_category", "vehicle_size",
"vehicle_style", "highway_mpg", "city_mpg", "popularity", "price")
result_df.select("make", "model", "year", "engine_hp", "number_of_doors", "highway_mpg", "city_mpg", "popularity",
"price").show(5)
return result_df
def split_data(data_df):
train_df, test_df = data_df.randomSplit([.8, .2], seed=42)
print(f"""Zbior danych wejsciowych podzielono na:\n
Rekordy trenujace:\t{train_df.count()}\n
Rekordy testujace:\t{test_df.count()}""")
return train_df, test_df
def train_model(train_dataset):
numeric_cols = ["year", "engine_hp", "number_of_doors", "highway_mpg", "city_mpg", "popularity"]
vec_assembler = VectorAssembler(inputCols=numeric_cols, outputCol="features", handleInvalid="skip")
vec_train_df = vec_assembler.transform(train_dataset)
vec_train_df.select("features", "price").show(10)
lr = LinearRegression(featuresCol="features", labelCol="price")
lr_model = lr.fit(vec_train_df)
year = round(lr_model.coefficients[0], 2)
engine_hp = round(lr_model.coefficients[1], 2)
number_of_doors = round(lr_model.coefficients[2], 2)
highway_mpg = round(lr_model.coefficients[3], 2)
city_mpg = round(lr_model.coefficients[4], 2)
popularity = round(lr_model.coefficients[5], 2)
b = round(lr_model.intercept, 2)
print(
f"""Wzor nauczonego modelu:
cena = {year}*rok + {engine_hp}*konie_mechaniczne + {number_of_doors}*drzwi + {highway_mpg}*mpg_autostrada
+ {city_mpg}*mpg_miasto + {popularity}*popularnosc + {b}""")
estimator = Pipeline(stages=[vec_assembler, lr])
trained_model = estimator.fit(train_dataset)
return trained_model
def make_predictions(trained_model, test_df):
prediction_df = trained_model.transform(test_df)
prediction_df.select("features", "price", "prediction").show(10)
return prediction_df
def evaluate_model(model):
regression_evaluator = RegressionEvaluator(
predictionCol="prediction",
labelCol="price",
metricName="rmse")
rmse = regression_evaluator.evaluate(model)
print(f"RMSE = {rmse:.1f}")
r2 = regression_evaluator.setMetricName("r2").evaluate(model)
print(f"R2 = {r2}")
def plot_histogram(real_data, prediction):
numbers_of_records = 2000
input_data = [np.log(row['price']) for row in real_data.take(numbers_of_records)]
predicted = [np.log(row['price']) for row in prediction.take(numbers_of_records)]
plt.figure()
plt.hist([predicted, input_data], bins=30, log=False)
plt.legend(('prognozowane ceny', 'rzeczywiste ceny'))
plt.xlabel('cena')
plt.ylabel('ilość')
plt.savefig('result_histogram.png')
if __name__ == '__main__':
spark = SparkSession.builder \
.appName("Ceny pojazdow") \
.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
data_path = """./data/car-data.csv"""
cars_df = read_data(data_path)
(train_df, test_df) = split_data(cars_df)
estimate_model = train_model(train_df)
predictions_df = make_predictions(estimate_model, test_df)
evaluate_model(predictions_df)
plot_histogram(cars_df, predictions_df)
spark.stop()
|
python
|
from collections import OrderedDict
from typing import Optional
from queryfs.db.schema import Schema
class File(Schema):
table_name: str = "files"
fields: OrderedDict[str, str] = OrderedDict(
{
"id": "integer primary key autoincrement",
"name": "text",
"hash": "text",
"ctime": "real",
"atime": "real",
"mtime": "real",
"size": "integer",
"directory_id": "integer null",
}
)
id: int = 0
name: str = ""
hash: str = ""
ctime: float = 0.0
atime: float = 0.0
mtime: float = 0.0
size: int = 0
directory_id: Optional[int] = 0
|
python
|
#!/usr/bin/env python2.7
import os
import sys
sys.path.append(os.path.realpath(__file__ + '/../../../../lib'))
import udf
from udf import useData, expectedFailure
class GetpassTest(udf.TestCase):
def setUp(self):
self.query('CREATE SCHEMA getpass', ignore_errors=True)
self.query('OPEN SCHEMA getpass', ignore_errors=True)
def tearDown(self):
self.query('DROP SCHEMA getpass CASCADE', ignore_errors=True)
def test_getuser(self):
self.query(udf.fixindent('''
CREATE OR REPLACE python3 SCALAR SCRIPT
get_user_from_passwd()
RETURNS VARCHAR(10000) AS
def run(ctx):
import getpass
return getpass.getuser()
/
'''))
rows = self.query("select get_user_from_passwd()")
expected = u"exadefusr"
self.assertEqual(expected ,rows[0][0])
if __name__ == '__main__':
udf.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
|
python
|
from scipy.io import netcdf
import numpy as np
import numpy.matlib
tave = 900
basedir = '/marconi_work/FUA34_MULTEI/stonge0_FUA34/rad_test/2nd_deriv/T1/'
basedir = '/marconi_work/FUA34_MULTEI/stonge0_FUA34/rad_test/2nd_deriv/rho_scan2/r0.001/'
basedir = '/marconi_work/FUA34_MULTEI/stonge0_FUA34/rad_test/fg_drive/rhoscan_hr/0.002/'
basedir = '/marconi_work/FUA34_MULTEI/stonge0_FUA34/rad_test/fg_drive/0.001d/'
basedir = '/marconi_work/FUA35_OXGK/stonge0/rad_scan/0.001d_aht/'
#basedir = '/marconi_work/FUA35_OXGK/stonge0/rad_scan/0.001t_2/'
right_file = basedir + 'left.out.nc'
center_file = basedir + 'center.out.nc'
left_file = basedir + 'right.out.nc'
right_nc = netcdf.netcdf_file(right_file,'r')
center_nc = netcdf.netcdf_file(center_file,'r')
left_nc = netcdf.netcdf_file(left_file,'r')
def read_stella_float(infile, var):
import numpy as np
try:
#print('a')
#arr = np.copy(infile.variables[var][:])
arr = infile.variables[var][:]
#print('b')
flag = True
except KeyError:
print('INFO: '+var+' not found in netcdf file')
arr =np.arange(1,dtype=float)
flag = FLAG
return arr, flag
def phi_vs_t_to_x(infile,var,ny,nx):
# t ntube z kx ky ri
avt, present = read_stella_float(infile,var)
#print('c')
avt_kxky = ny*nx*(avt[:,0,:,:,:,0] + 1j*avt[:,0,:,:,:,1])
#print('d')
arr = np.fft.ifft(avt_kxky,axis=2)
#print('e')
return arr
def mom_vs_t_to_x(infile,var,ny,nx):
#in: t nspec ntube z kx ky ri
#out: t z kx ky
avt, present = read_stella_float(infile,var)
avt_kxky = ny*nx*(avt[:,0,0,:,:,:,0] + 1j*avt[:,0,0,:,:,:,1])
arr = np.fft.ifft(avt_kxky,axis=2)
return arr
print('0')
naky = center_nc.dimensions['ky']
nakxl = left_nc.dimensions['kx']
nakxc = center_nc.dimensions['kx']
nakxr = right_nc.dimensions['kx']
ky = np.copy(center_nc.variables['ky'][:])
kxc = np.copy(center_nc.variables['kx'][:])
t = np.copy(center_nc.variables['t'][:])
nt = t.size
Lxc = 2.*np.pi/kxc[1]
dxc = Lxc / nakxc
zed = np.copy(center_nc.variables['zed'][:])
nzed = zed.size
omp = ((nzed+1)/2) - 1
delzed = zed[1]-zed[0]
radgrid = np.copy(center_nc.variables['rad_grid'][:])
fac = 2*np.ones(naky)
fac[0] = 1
jacobl = np.copy( left_nc.variables['jacob'][:])
jacobc = np.copy(center_nc.variables['jacob'][:])
jacobr = np.copy( right_nc.variables['jacob'][:])
# nalpha nzed
print('1')
dl_over_bl = np.squeeze(delzed*jacobl)
dl_over_bc = np.squeeze(delzed*jacobc)
dl_over_br = np.squeeze(delzed*jacobr)
dl_over_bl[nzed-1] = 0.0
dl_over_bc[nzed-1] = 0.0
dl_over_br[nzed-1] = 0.0
dl_over_bl = dl_over_bl/sum(dl_over_bl)
dl_over_bc = dl_over_bc/sum(dl_over_bc)
dl_over_br = dl_over_br/sum(dl_over_br)
dobl = np.transpose(np.matlib.tile(dl_over_bl,(naky,nakxl,1)))
dobc = np.transpose(np.matlib.tile(dl_over_bc,(naky,nakxc,1)))
dobr = np.transpose(np.matlib.tile(dl_over_br,(naky,nakxr,1)))
print('2')
# t spec x
pfluxl = np.copy( left_nc.variables['pflux_x'][:])
pfluxc = np.copy(center_nc.variables['pflux_x'][:])
pfluxr = np.copy( right_nc.variables['pflux_x'][:])
vfluxl = np.copy( left_nc.variables['vflux_x'][:])
vfluxc = np.copy(center_nc.variables['vflux_x'][:])
vfluxr = np.copy( right_nc.variables['vflux_x'][:])
qfluxl = np.copy( left_nc.variables['qflux_x'][:])
qfluxc = np.copy(center_nc.variables['qflux_x'][:])
qfluxr = np.copy( right_nc.variables['qflux_x'][:])
print('3')
densl_xky = mom_vs_t_to_x(left_nc,'density',naky,nakxc)
uparl_xky = mom_vs_t_to_x(left_nc,'upar',naky,nakxc)
templ_xky = mom_vs_t_to_x(left_nc,'temperature',naky,nakxc)
densc_xky = mom_vs_t_to_x(center_nc,'density',naky,nakxc)
uparc_xky = mom_vs_t_to_x(center_nc,'upar',naky,nakxc)
tempc_xky = mom_vs_t_to_x(center_nc,'temperature',naky,nakxc)
densr_xky = mom_vs_t_to_x(right_nc,'density',naky,nakxc)
uparr_xky = mom_vs_t_to_x(right_nc,'upar',naky,nakxc)
tempr_xky = mom_vs_t_to_x(right_nc,'temperature',naky,nakxc)
dens_zf = np.real(np.sum(dobc[:,:,0]*densc_xky[:,:,:,0],1))
upar_zf = np.real(np.sum(dobc[:,:,0]*uparc_xky[:,:,:,0],1))
temp_zf = np.real(np.sum(dobc[:,:,0]*tempc_xky[:,:,:,0],1))
densl_zf = np.real(np.sum(dobl[:,:,0]*densl_xky[:,:,:,0],1))
uparl_zf = np.real(np.sum(dobl[:,:,0]*uparl_xky[:,:,:,0],1))
templ_zf = np.real(np.sum(dobl[:,:,0]*templ_xky[:,:,:,0],1))
densr_zf = np.real(np.sum(dobr[:,:,0]*densr_xky[:,:,:,0],1))
uparr_zf = np.real(np.sum(dobr[:,:,0]*uparr_xky[:,:,:,0],1))
tempr_zf = np.real(np.sum(dobr[:,:,0]*tempr_xky[:,:,:,0],1))
dens_zero = np.mean(dens_zf,1)
upar_zero = np.mean(upar_zf,1)
temp_zero = np.mean(temp_zf,1)
print('4')
cout = open(basedir + 'left.fluxes_t','w')
cout.write('[1] t ')
cout.write('[2] x ')
cout.write('[3] flux_d')
cout.write('[4] flux_u')
cout.write('[5] flux_t')
cout.write('[6] dens ')
cout.write('[7] upar ')
cout.write('[8] temp ')
cout.write('\n')
print('5')
for i in range (0, nt):
for j in range (0, nakxl):
cout.write('%e ' % t[i])
cout.write('%e ' % (dxc*j))
cout.write('%e ' % pfluxl[i,0,j])
cout.write('%e ' % vfluxl[i,0,j])
cout.write('%e ' % qfluxl[i,0,j])
cout.write('%e ' % densl_zf[i,j])
cout.write('%e ' % uparl_zf[i,j])
cout.write('%e ' % templ_zf[i,j])
cout.write('\n')
cout.write('\n')
cout.close()
cout = open(basedir + 'center.fluxes_t','w')
cout.write('[1] t ')
cout.write('[2] x ')
cout.write('[3] x simp')
cout.write('[4] r ')
cout.write('[5] flux_d')
cout.write('[6] flux_u')
cout.write('[7] flux_t')
cout.write('[8] dens ')
cout.write('[9] upar ')
cout.write('[10] temp')
cout.write('\n')
print('6')
for i in range (0, nt):
for j in range (0, nakxc):
cout.write('%e ' % t[i])
cout.write('%e ' % radgrid[j,0])
cout.write('%e ' % (dxc*j))
cout.write('%e ' % radgrid[j,2])
cout.write('%e ' % pfluxc[i,0,j])
cout.write('%e ' % vfluxc[i,0,j])
cout.write('%e ' % qfluxc[i,0,j])
cout.write('%e ' % dens_zf[i,j])
cout.write('%e ' % upar_zf[i,j])
cout.write('%e ' % temp_zf[i,j])
cout.write('\n')
cout.write('\n')
cout.close()
cout = open(basedir + 'right.fluxes_t','w')
cout.write('[1] t ')
cout.write('[2] x ')
cout.write('[3] flux_d')
cout.write('[4] flux_u')
cout.write('[5] flux_t')
cout.write('[6] dens ')
cout.write('[7] upar ')
cout.write('[8] temp ')
cout.write('\n')
print('7')
for i in range (0, nt):
for j in range (0, nakxr):
cout.write('%e ' % t[i])
cout.write('%e ' % (dxc*j))
cout.write('%e ' % pfluxr[i,0,j])
cout.write('%e ' % vfluxr[i,0,j])
cout.write('%e ' % qfluxr[i,0,j])
cout.write('%e ' % densr_zf[i,j])
cout.write('%e ' % uparr_zf[i,j])
cout.write('%e ' % tempr_zf[i,j])
cout.write('\n')
cout.write('\n')
cout.close()
tind=nt-1
for i in range (0, nt):
if(t[i]> tave):
tind = i
break
print(str(tind) + ' ' + str(nt))
print('8')
plave = np.mean(pfluxl[tind:nt,0,:],0)
vlave = np.mean(vfluxl[tind:nt,0,:],0)
qlave = np.mean(qfluxl[tind:nt,0,:],0)
pcave = np.mean(pfluxc[tind:nt,0,:],0)
vcave = np.mean(vfluxc[tind:nt,0,:],0)
qcave = np.mean(qfluxc[tind:nt,0,:],0)
dlave = np.mean(densl_zf[tind:nt,:],0)
ulave = np.mean(uparl_zf[tind:nt,:],0)
tlave = np.mean(templ_zf[tind:nt,:],0)
dcave = np.mean(dens_zf[tind:nt,:],0)
ucave = np.mean(upar_zf[tind:nt,:],0)
tcave = np.mean(temp_zf[tind:nt,:],0)
drave = np.mean(densr_zf[tind:nt,:],0)
urave = np.mean(uparr_zf[tind:nt,:],0)
trave = np.mean(tempr_zf[tind:nt,:],0)
prave = np.mean(pfluxr[tind:nt,0,:],0)
vrave = np.mean(vfluxr[tind:nt,0,:],0)
qrave = np.mean(qfluxr[tind:nt,0,:],0)
print('9')
cout = open(basedir + 'center.prof_ave','w')
cout.write('#Average from t=' + str(t[tind])+ ' to t=' + str(t[nt-1]) + '\n')
cout.write('#')
cout.write('[1] x ')
cout.write('[2] x simp ')
cout.write('[3] r ')
cout.write('[4] pflux ')
cout.write('[5] vflux ')
cout.write('[6] qflux ')
cout.write('[7] dens ')
cout.write('[8] upar ')
cout.write('[9] temp ')
cout.write('\n')
for i in range (0, nakxc):
cout.write('%e ' % radgrid[i,0])
cout.write('%e ' % (dxc*i))
cout.write('%e ' % radgrid[i,2])
cout.write('%e ' % pcave[i])
cout.write('%e ' % vcave[i])
cout.write('%e ' % qcave[i])
cout.write('%e ' % dcave[i])
cout.write('%e ' % ucave[i])
cout.write('%e ' % tcave[i])
cout.write('\n')
cout.close()
cout = open(basedir + 'left.prof_ave','w')
cout.write('#Average from t=' + str(t[tind])+ ' to t=' + str(t[nt-1]) + '\n')
cout.write('#')
cout.write('[1] x ')
cout.write('[2] pflux ')
cout.write('[3] vflux ')
cout.write('[4] qflux ')
cout.write('[5] dens ')
cout.write('[7] upar ')
cout.write('[8] temp ')
cout.write('\n')
for i in range (0, nakxl):
cout.write('%e ' % (dxc*i-0.5*Lxc))
cout.write('%e ' % plave[i])
cout.write('%e ' % vlave[i])
cout.write('%e ' % qlave[i])
cout.write('%e ' % dlave[i])
cout.write('%e ' % ulave[i])
cout.write('%e ' % tlave[i])
cout.write('\n')
cout.close()
cout.close()
cout = open(basedir + 'right.prof_ave','w')
cout.write('#Average from t=' + str(t[tind])+ ' to t=' + str(t[nt-1]) + '\n')
cout.write('#')
cout.write('[1] x ')
cout.write('[2] pflux ')
cout.write('[3] vflux ')
cout.write('[4] qflux ')
cout.write('[5] dens ')
cout.write('[7] upar ')
cout.write('[8] temp ')
cout.write('\n')
for i in range (0, nakxr):
cout.write('%e ' % (dxc*i-0.5*Lxc))
cout.write('%e ' % prave[i])
cout.write('%e ' % vrave[i])
cout.write('%e ' % qrave[i])
cout.write('%e ' % drave[i])
cout.write('%e ' % urave[i])
cout.write('%e ' % trave[i])
cout.write('\n')
cout.close()
cout = open(basedir + 'center.zero_mode','w')
cout.write('[1] t ')
cout.write('[2] dens ')
cout.write('[3] upar ')
cout.write('[4] temp ')
cout.write('\n')
print('4')
for i in range (0, nt):
cout.write('%e ' % t[i])
cout.write('%e ' % dens_zero[i])
cout.write('%e ' % upar_zero[i])
cout.write('%e ' % temp_zero[i])
cout.write('\n')
cout.close()
exit()
|
python
|
"""Data structures supporting the who wrote this news crawler.
----
Copyright 2019 Data Driven Empathy LLC
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class Article:
"""Data structure describing an article."""
def __init__(self, source, source_feed, title, description, publish_date,
crawl_date, link, author):
"""Create a new article record.
Args:
source: The name of the agency that published this article.
source_feed: The name for the RSS feed, helping disambiguate if there are many.
title: The title for the article.
description: The description for the article.
publish_date: The datetime.datetime for when this article was published.
crawl_date: The datetime.datetime for when this article was crawled.
link: URL where the full article can be found.
author: The author of the article.
"""
self.__source = source
self.__source_feed = source_feed
self.__title = title
self.__description = description
self.__publish_date = publish_date
self.__crawl_date = crawl_date
self.__link = link
self.__author = author
def get_source(self):
"""Get the name of the agency that published this article.
Returns:
The name of the agency that published this article like NPR.
"""
return self.__source
def get_source_feed(self):
"""Get the bame for the RSS feed in which this application was found.
Returns:
The string name for the RSS feed, helping disambiguate if there are many.
"""
return self.__source_feed
def get_title(self):
"""Get the text of the article title.
Returns:
The title for the article as a string.
"""
return self.__title
def get_description(self):
"""Get the description contents for this article.
Returns:
The description for the article as a string.
"""
return self.__description
def get_publish_date(self):
"""Get the datetime for when this article was published.
Returns:
The datetime.datetime for when this article was published.
"""
return self.__publish_date
def get_crawl_date(self):
"""Get the datetime for when this article was crawled.
Returns:
The datetime.datetime for when this article was crawled.
"""
return self.__crawl_date
def get_link(self):
"""Get the URL at which the full article can be found.
Returns:
URL where the full article can be found.
"""
return self.__link
def get_author(self):
"""Get the name of the author if provided.
Returns:
The author of the article as a string. None if no author given.
"""
return self.__author
|
python
|
""" Module for controlling motors. """
__all__ = [
"stepper",
]
|
python
|
from rest_framework import serializers
from .models import CrashCourse, CourseChapter, ChapterSection
class CrashCourseSerializer(serializers.ModelSerializer):
no_of_chapter = serializers.SerializerMethodField()
class Meta:
model = CrashCourse
fields = ('id', 'title', 'slug', 'no_of_chapter')
def get_no_of_chapter(self, obj):
return obj.coursechapter_set.count()
class CourseChapterSerializer(serializers.ModelSerializer):
class Meta:
model = CourseChapter
fields = ('id', 'title', 'slug', 'course')
class ChapterSectionListSerializer(serializers.ModelSerializer):
class Meta:
model = ChapterSection
fields = ('id', 'title', 'slug', )
class ChapterSectionDetailSerializer(serializers.ModelSerializer):
class Meta:
model = ChapterSection
fields = ('id', 'title', 'slug', 'description')
|
python
|
#!/usr/bin/env python3
# Test whether a PUBLISH to a topic with QoS 2 results in the correct packet flow.
from mosq_test_helper import *
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("test-helper", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 128
publish_packet = mosq_test.gen_publish("qos1/timeout/test", qos=1, mid=mid, payload="timeout-message")
puback_packet = mosq_test.gen_puback(mid)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, connack_error="helper connack")
mosq_test.do_send_receive(sock, publish_packet, puback_packet, "helper puback")
rc = 0
sock.close()
exit(rc)
|
python
|
import time
class Logger:
def __init__(self) -> None:
pass
def log(self, message: str) -> None:
print(f'{time.ctime()}: {message}')
|
python
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.av1_adaptive_quant_mode import Av1AdaptiveQuantMode
from bitmovin_api_sdk.models.av1_key_placement_mode import Av1KeyPlacementMode
from bitmovin_api_sdk.models.color_config import ColorConfig
from bitmovin_api_sdk.models.display_aspect_ratio import DisplayAspectRatio
from bitmovin_api_sdk.models.encoding_mode import EncodingMode
from bitmovin_api_sdk.models.pixel_format import PixelFormat
from bitmovin_api_sdk.models.video_configuration import VideoConfiguration
import pprint
import six
class Av1VideoConfiguration(VideoConfiguration):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
width=None,
height=None,
bitrate=None,
rate=None,
pixel_format=None,
color_config=None,
sample_aspect_ratio_numerator=None,
sample_aspect_ratio_denominator=None,
display_aspect_ratio=None,
encoding_mode=None,
key_placement_mode=None,
adaptive_quant_mode=None,
lag_in_frames=None,
min_q=None,
max_q=None,
undershoot_pct=None,
overshoot_pct=None,
client_buffer_size=None,
client_initial_buffer_size=None,
client_optimal_buffer_size=None,
tile_columns=None,
tile_rows=None,
is_automatic_alt_ref_frames_enabled=None,
arnr_max_frames=None,
arnr_strength=None,
max_intra_rate=None,
is_lossless=None,
is_frame_parallel=None,
sharpness=None,
is_frame_boost_enabled=None,
noise_sensitivity=None,
min_gf_interval=None,
max_gf_interval=None,
num_tile_groups=None,
mtu_size=None):
# type: (string_types, string_types, string_types, datetime, datetime, dict, int, int, int, float, PixelFormat, ColorConfig, int, int, DisplayAspectRatio, EncodingMode, Av1KeyPlacementMode, Av1AdaptiveQuantMode, int, int, int, int, int, int, int, int, int, int, bool, int, int, int, bool, bool, int, bool, bool, int, int, int, int) -> None
super(Av1VideoConfiguration, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data, width=width, height=height, bitrate=bitrate, rate=rate, pixel_format=pixel_format, color_config=color_config, sample_aspect_ratio_numerator=sample_aspect_ratio_numerator, sample_aspect_ratio_denominator=sample_aspect_ratio_denominator, display_aspect_ratio=display_aspect_ratio, encoding_mode=encoding_mode)
self._key_placement_mode = None
self._adaptive_quant_mode = None
self._lag_in_frames = None
self._min_q = None
self._max_q = None
self._undershoot_pct = None
self._overshoot_pct = None
self._client_buffer_size = None
self._client_initial_buffer_size = None
self._client_optimal_buffer_size = None
self._tile_columns = None
self._tile_rows = None
self._is_automatic_alt_ref_frames_enabled = None
self._arnr_max_frames = None
self._arnr_strength = None
self._max_intra_rate = None
self._is_lossless = None
self._is_frame_parallel = None
self._sharpness = None
self._is_frame_boost_enabled = None
self._noise_sensitivity = None
self._min_gf_interval = None
self._max_gf_interval = None
self._num_tile_groups = None
self._mtu_size = None
self.discriminator = None
if key_placement_mode is not None:
self.key_placement_mode = key_placement_mode
if adaptive_quant_mode is not None:
self.adaptive_quant_mode = adaptive_quant_mode
if lag_in_frames is not None:
self.lag_in_frames = lag_in_frames
if min_q is not None:
self.min_q = min_q
if max_q is not None:
self.max_q = max_q
if undershoot_pct is not None:
self.undershoot_pct = undershoot_pct
if overshoot_pct is not None:
self.overshoot_pct = overshoot_pct
if client_buffer_size is not None:
self.client_buffer_size = client_buffer_size
if client_initial_buffer_size is not None:
self.client_initial_buffer_size = client_initial_buffer_size
if client_optimal_buffer_size is not None:
self.client_optimal_buffer_size = client_optimal_buffer_size
if tile_columns is not None:
self.tile_columns = tile_columns
if tile_rows is not None:
self.tile_rows = tile_rows
if is_automatic_alt_ref_frames_enabled is not None:
self.is_automatic_alt_ref_frames_enabled = is_automatic_alt_ref_frames_enabled
if arnr_max_frames is not None:
self.arnr_max_frames = arnr_max_frames
if arnr_strength is not None:
self.arnr_strength = arnr_strength
if max_intra_rate is not None:
self.max_intra_rate = max_intra_rate
if is_lossless is not None:
self.is_lossless = is_lossless
if is_frame_parallel is not None:
self.is_frame_parallel = is_frame_parallel
if sharpness is not None:
self.sharpness = sharpness
if is_frame_boost_enabled is not None:
self.is_frame_boost_enabled = is_frame_boost_enabled
if noise_sensitivity is not None:
self.noise_sensitivity = noise_sensitivity
if min_gf_interval is not None:
self.min_gf_interval = min_gf_interval
if max_gf_interval is not None:
self.max_gf_interval = max_gf_interval
if num_tile_groups is not None:
self.num_tile_groups = num_tile_groups
if mtu_size is not None:
self.mtu_size = mtu_size
@property
def openapi_types(self):
types = {}
if hasattr(super(Av1VideoConfiguration, self), 'openapi_types'):
types = getattr(super(Av1VideoConfiguration, self), 'openapi_types')
types.update({
'key_placement_mode': 'Av1KeyPlacementMode',
'adaptive_quant_mode': 'Av1AdaptiveQuantMode',
'lag_in_frames': 'int',
'min_q': 'int',
'max_q': 'int',
'undershoot_pct': 'int',
'overshoot_pct': 'int',
'client_buffer_size': 'int',
'client_initial_buffer_size': 'int',
'client_optimal_buffer_size': 'int',
'tile_columns': 'int',
'tile_rows': 'int',
'is_automatic_alt_ref_frames_enabled': 'bool',
'arnr_max_frames': 'int',
'arnr_strength': 'int',
'max_intra_rate': 'int',
'is_lossless': 'bool',
'is_frame_parallel': 'bool',
'sharpness': 'int',
'is_frame_boost_enabled': 'bool',
'noise_sensitivity': 'bool',
'min_gf_interval': 'int',
'max_gf_interval': 'int',
'num_tile_groups': 'int',
'mtu_size': 'int'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(Av1VideoConfiguration, self), 'attribute_map'):
attributes = getattr(super(Av1VideoConfiguration, self), 'attribute_map')
attributes.update({
'key_placement_mode': 'keyPlacementMode',
'adaptive_quant_mode': 'adaptiveQuantMode',
'lag_in_frames': 'lagInFrames',
'min_q': 'minQ',
'max_q': 'maxQ',
'undershoot_pct': 'undershootPct',
'overshoot_pct': 'overshootPct',
'client_buffer_size': 'clientBufferSize',
'client_initial_buffer_size': 'clientInitialBufferSize',
'client_optimal_buffer_size': 'clientOptimalBufferSize',
'tile_columns': 'tileColumns',
'tile_rows': 'tileRows',
'is_automatic_alt_ref_frames_enabled': 'isAutomaticAltRefFramesEnabled',
'arnr_max_frames': 'arnrMaxFrames',
'arnr_strength': 'arnrStrength',
'max_intra_rate': 'maxIntraRate',
'is_lossless': 'isLossless',
'is_frame_parallel': 'isFrameParallel',
'sharpness': 'sharpness',
'is_frame_boost_enabled': 'isFrameBoostEnabled',
'noise_sensitivity': 'noiseSensitivity',
'min_gf_interval': 'minGfInterval',
'max_gf_interval': 'maxGfInterval',
'num_tile_groups': 'numTileGroups',
'mtu_size': 'mtuSize'
})
return attributes
@property
def key_placement_mode(self):
# type: () -> Av1KeyPlacementMode
"""Gets the key_placement_mode of this Av1VideoConfiguration.
:return: The key_placement_mode of this Av1VideoConfiguration.
:rtype: Av1KeyPlacementMode
"""
return self._key_placement_mode
@key_placement_mode.setter
def key_placement_mode(self, key_placement_mode):
# type: (Av1KeyPlacementMode) -> None
"""Sets the key_placement_mode of this Av1VideoConfiguration.
:param key_placement_mode: The key_placement_mode of this Av1VideoConfiguration.
:type: Av1KeyPlacementMode
"""
if key_placement_mode is not None:
if not isinstance(key_placement_mode, Av1KeyPlacementMode):
raise TypeError("Invalid type for `key_placement_mode`, type has to be `Av1KeyPlacementMode`")
self._key_placement_mode = key_placement_mode
@property
def adaptive_quant_mode(self):
# type: () -> Av1AdaptiveQuantMode
"""Gets the adaptive_quant_mode of this Av1VideoConfiguration.
:return: The adaptive_quant_mode of this Av1VideoConfiguration.
:rtype: Av1AdaptiveQuantMode
"""
return self._adaptive_quant_mode
@adaptive_quant_mode.setter
def adaptive_quant_mode(self, adaptive_quant_mode):
# type: (Av1AdaptiveQuantMode) -> None
"""Sets the adaptive_quant_mode of this Av1VideoConfiguration.
:param adaptive_quant_mode: The adaptive_quant_mode of this Av1VideoConfiguration.
:type: Av1AdaptiveQuantMode
"""
if adaptive_quant_mode is not None:
if not isinstance(adaptive_quant_mode, Av1AdaptiveQuantMode):
raise TypeError("Invalid type for `adaptive_quant_mode`, type has to be `Av1AdaptiveQuantMode`")
self._adaptive_quant_mode = adaptive_quant_mode
@property
def lag_in_frames(self):
# type: () -> int
"""Gets the lag_in_frames of this Av1VideoConfiguration.
Number of frames to look ahead for alternate reference frame selection
:return: The lag_in_frames of this Av1VideoConfiguration.
:rtype: int
"""
return self._lag_in_frames
@lag_in_frames.setter
def lag_in_frames(self, lag_in_frames):
# type: (int) -> None
"""Sets the lag_in_frames of this Av1VideoConfiguration.
Number of frames to look ahead for alternate reference frame selection
:param lag_in_frames: The lag_in_frames of this Av1VideoConfiguration.
:type: int
"""
if lag_in_frames is not None:
if lag_in_frames is not None and lag_in_frames > 25:
raise ValueError("Invalid value for `lag_in_frames`, must be a value less than or equal to `25`")
if lag_in_frames is not None and lag_in_frames < 0:
raise ValueError("Invalid value for `lag_in_frames`, must be a value greater than or equal to `0`")
if not isinstance(lag_in_frames, int):
raise TypeError("Invalid type for `lag_in_frames`, type has to be `int`")
self._lag_in_frames = lag_in_frames
@property
def min_q(self):
# type: () -> int
"""Gets the min_q of this Av1VideoConfiguration.
Minimum (best quality) quantizer
:return: The min_q of this Av1VideoConfiguration.
:rtype: int
"""
return self._min_q
@min_q.setter
def min_q(self, min_q):
# type: (int) -> None
"""Sets the min_q of this Av1VideoConfiguration.
Minimum (best quality) quantizer
:param min_q: The min_q of this Av1VideoConfiguration.
:type: int
"""
if min_q is not None:
if not isinstance(min_q, int):
raise TypeError("Invalid type for `min_q`, type has to be `int`")
self._min_q = min_q
@property
def max_q(self):
# type: () -> int
"""Gets the max_q of this Av1VideoConfiguration.
Maximum (worst quality) quantizer
:return: The max_q of this Av1VideoConfiguration.
:rtype: int
"""
return self._max_q
@max_q.setter
def max_q(self, max_q):
# type: (int) -> None
"""Sets the max_q of this Av1VideoConfiguration.
Maximum (worst quality) quantizer
:param max_q: The max_q of this Av1VideoConfiguration.
:type: int
"""
if max_q is not None:
if not isinstance(max_q, int):
raise TypeError("Invalid type for `max_q`, type has to be `int`")
self._max_q = max_q
@property
def undershoot_pct(self):
# type: () -> int
"""Gets the undershoot_pct of this Av1VideoConfiguration.
Rate control adaptation undershoot control
:return: The undershoot_pct of this Av1VideoConfiguration.
:rtype: int
"""
return self._undershoot_pct
@undershoot_pct.setter
def undershoot_pct(self, undershoot_pct):
# type: (int) -> None
"""Sets the undershoot_pct of this Av1VideoConfiguration.
Rate control adaptation undershoot control
:param undershoot_pct: The undershoot_pct of this Av1VideoConfiguration.
:type: int
"""
if undershoot_pct is not None:
if undershoot_pct is not None and undershoot_pct > 1000:
raise ValueError("Invalid value for `undershoot_pct`, must be a value less than or equal to `1000`")
if undershoot_pct is not None and undershoot_pct < 0:
raise ValueError("Invalid value for `undershoot_pct`, must be a value greater than or equal to `0`")
if not isinstance(undershoot_pct, int):
raise TypeError("Invalid type for `undershoot_pct`, type has to be `int`")
self._undershoot_pct = undershoot_pct
@property
def overshoot_pct(self):
# type: () -> int
"""Gets the overshoot_pct of this Av1VideoConfiguration.
Rate control adaptation overshoot control
:return: The overshoot_pct of this Av1VideoConfiguration.
:rtype: int
"""
return self._overshoot_pct
@overshoot_pct.setter
def overshoot_pct(self, overshoot_pct):
# type: (int) -> None
"""Sets the overshoot_pct of this Av1VideoConfiguration.
Rate control adaptation overshoot control
:param overshoot_pct: The overshoot_pct of this Av1VideoConfiguration.
:type: int
"""
if overshoot_pct is not None:
if overshoot_pct is not None and overshoot_pct > 1000:
raise ValueError("Invalid value for `overshoot_pct`, must be a value less than or equal to `1000`")
if overshoot_pct is not None and overshoot_pct < 0:
raise ValueError("Invalid value for `overshoot_pct`, must be a value greater than or equal to `0`")
if not isinstance(overshoot_pct, int):
raise TypeError("Invalid type for `overshoot_pct`, type has to be `int`")
self._overshoot_pct = overshoot_pct
@property
def client_buffer_size(self):
# type: () -> int
"""Gets the client_buffer_size of this Av1VideoConfiguration.
Decoder buffer size in milliseconds
:return: The client_buffer_size of this Av1VideoConfiguration.
:rtype: int
"""
return self._client_buffer_size
@client_buffer_size.setter
def client_buffer_size(self, client_buffer_size):
# type: (int) -> None
"""Sets the client_buffer_size of this Av1VideoConfiguration.
Decoder buffer size in milliseconds
:param client_buffer_size: The client_buffer_size of this Av1VideoConfiguration.
:type: int
"""
if client_buffer_size is not None:
if not isinstance(client_buffer_size, int):
raise TypeError("Invalid type for `client_buffer_size`, type has to be `int`")
self._client_buffer_size = client_buffer_size
@property
def client_initial_buffer_size(self):
# type: () -> int
"""Gets the client_initial_buffer_size of this Av1VideoConfiguration.
Decoder buffer initial size in milliseconds
:return: The client_initial_buffer_size of this Av1VideoConfiguration.
:rtype: int
"""
return self._client_initial_buffer_size
@client_initial_buffer_size.setter
def client_initial_buffer_size(self, client_initial_buffer_size):
# type: (int) -> None
"""Sets the client_initial_buffer_size of this Av1VideoConfiguration.
Decoder buffer initial size in milliseconds
:param client_initial_buffer_size: The client_initial_buffer_size of this Av1VideoConfiguration.
:type: int
"""
if client_initial_buffer_size is not None:
if not isinstance(client_initial_buffer_size, int):
raise TypeError("Invalid type for `client_initial_buffer_size`, type has to be `int`")
self._client_initial_buffer_size = client_initial_buffer_size
@property
def client_optimal_buffer_size(self):
# type: () -> int
"""Gets the client_optimal_buffer_size of this Av1VideoConfiguration.
Decoder buffer optimal size in milliseconds
:return: The client_optimal_buffer_size of this Av1VideoConfiguration.
:rtype: int
"""
return self._client_optimal_buffer_size
@client_optimal_buffer_size.setter
def client_optimal_buffer_size(self, client_optimal_buffer_size):
# type: (int) -> None
"""Sets the client_optimal_buffer_size of this Av1VideoConfiguration.
Decoder buffer optimal size in milliseconds
:param client_optimal_buffer_size: The client_optimal_buffer_size of this Av1VideoConfiguration.
:type: int
"""
if client_optimal_buffer_size is not None:
if not isinstance(client_optimal_buffer_size, int):
raise TypeError("Invalid type for `client_optimal_buffer_size`, type has to be `int`")
self._client_optimal_buffer_size = client_optimal_buffer_size
@property
def tile_columns(self):
# type: () -> int
"""Gets the tile_columns of this Av1VideoConfiguration.
Number of tile columns to use, log2
:return: The tile_columns of this Av1VideoConfiguration.
:rtype: int
"""
return self._tile_columns
@tile_columns.setter
def tile_columns(self, tile_columns):
# type: (int) -> None
"""Sets the tile_columns of this Av1VideoConfiguration.
Number of tile columns to use, log2
:param tile_columns: The tile_columns of this Av1VideoConfiguration.
:type: int
"""
if tile_columns is not None:
if tile_columns is not None and tile_columns > 6:
raise ValueError("Invalid value for `tile_columns`, must be a value less than or equal to `6`")
if tile_columns is not None and tile_columns < 0:
raise ValueError("Invalid value for `tile_columns`, must be a value greater than or equal to `0`")
if not isinstance(tile_columns, int):
raise TypeError("Invalid type for `tile_columns`, type has to be `int`")
self._tile_columns = tile_columns
@property
def tile_rows(self):
# type: () -> int
"""Gets the tile_rows of this Av1VideoConfiguration.
Number of tile rows to use, log2
:return: The tile_rows of this Av1VideoConfiguration.
:rtype: int
"""
return self._tile_rows
@tile_rows.setter
def tile_rows(self, tile_rows):
# type: (int) -> None
"""Sets the tile_rows of this Av1VideoConfiguration.
Number of tile rows to use, log2
:param tile_rows: The tile_rows of this Av1VideoConfiguration.
:type: int
"""
if tile_rows is not None:
if tile_rows is not None and tile_rows > 2:
raise ValueError("Invalid value for `tile_rows`, must be a value less than or equal to `2`")
if tile_rows is not None and tile_rows < 0:
raise ValueError("Invalid value for `tile_rows`, must be a value greater than or equal to `0`")
if not isinstance(tile_rows, int):
raise TypeError("Invalid type for `tile_rows`, type has to be `int`")
self._tile_rows = tile_rows
@property
def is_automatic_alt_ref_frames_enabled(self):
# type: () -> bool
"""Gets the is_automatic_alt_ref_frames_enabled of this Av1VideoConfiguration.
Enable automatic set and use alf frames
:return: The is_automatic_alt_ref_frames_enabled of this Av1VideoConfiguration.
:rtype: bool
"""
return self._is_automatic_alt_ref_frames_enabled
@is_automatic_alt_ref_frames_enabled.setter
def is_automatic_alt_ref_frames_enabled(self, is_automatic_alt_ref_frames_enabled):
# type: (bool) -> None
"""Sets the is_automatic_alt_ref_frames_enabled of this Av1VideoConfiguration.
Enable automatic set and use alf frames
:param is_automatic_alt_ref_frames_enabled: The is_automatic_alt_ref_frames_enabled of this Av1VideoConfiguration.
:type: bool
"""
if is_automatic_alt_ref_frames_enabled is not None:
if not isinstance(is_automatic_alt_ref_frames_enabled, bool):
raise TypeError("Invalid type for `is_automatic_alt_ref_frames_enabled`, type has to be `bool`")
self._is_automatic_alt_ref_frames_enabled = is_automatic_alt_ref_frames_enabled
@property
def arnr_max_frames(self):
# type: () -> int
"""Gets the arnr_max_frames of this Av1VideoConfiguration.
The max number of frames to create arf
:return: The arnr_max_frames of this Av1VideoConfiguration.
:rtype: int
"""
return self._arnr_max_frames
@arnr_max_frames.setter
def arnr_max_frames(self, arnr_max_frames):
# type: (int) -> None
"""Sets the arnr_max_frames of this Av1VideoConfiguration.
The max number of frames to create arf
:param arnr_max_frames: The arnr_max_frames of this Av1VideoConfiguration.
:type: int
"""
if arnr_max_frames is not None:
if not isinstance(arnr_max_frames, int):
raise TypeError("Invalid type for `arnr_max_frames`, type has to be `int`")
self._arnr_max_frames = arnr_max_frames
@property
def arnr_strength(self):
# type: () -> int
"""Gets the arnr_strength of this Av1VideoConfiguration.
The filter strength for the arf
:return: The arnr_strength of this Av1VideoConfiguration.
:rtype: int
"""
return self._arnr_strength
@arnr_strength.setter
def arnr_strength(self, arnr_strength):
# type: (int) -> None
"""Sets the arnr_strength of this Av1VideoConfiguration.
The filter strength for the arf
:param arnr_strength: The arnr_strength of this Av1VideoConfiguration.
:type: int
"""
if arnr_strength is not None:
if not isinstance(arnr_strength, int):
raise TypeError("Invalid type for `arnr_strength`, type has to be `int`")
self._arnr_strength = arnr_strength
@property
def max_intra_rate(self):
# type: () -> int
"""Gets the max_intra_rate of this Av1VideoConfiguration.
Maximum data rate for intra frames, expressed as a percentage of the average per-frame bitrate. Default value 0 meaning unlimited
:return: The max_intra_rate of this Av1VideoConfiguration.
:rtype: int
"""
return self._max_intra_rate
@max_intra_rate.setter
def max_intra_rate(self, max_intra_rate):
# type: (int) -> None
"""Sets the max_intra_rate of this Av1VideoConfiguration.
Maximum data rate for intra frames, expressed as a percentage of the average per-frame bitrate. Default value 0 meaning unlimited
:param max_intra_rate: The max_intra_rate of this Av1VideoConfiguration.
:type: int
"""
if max_intra_rate is not None:
if not isinstance(max_intra_rate, int):
raise TypeError("Invalid type for `max_intra_rate`, type has to be `int`")
self._max_intra_rate = max_intra_rate
@property
def is_lossless(self):
# type: () -> bool
"""Gets the is_lossless of this Av1VideoConfiguration.
Lossless encoding mode
:return: The is_lossless of this Av1VideoConfiguration.
:rtype: bool
"""
return self._is_lossless
@is_lossless.setter
def is_lossless(self, is_lossless):
# type: (bool) -> None
"""Sets the is_lossless of this Av1VideoConfiguration.
Lossless encoding mode
:param is_lossless: The is_lossless of this Av1VideoConfiguration.
:type: bool
"""
if is_lossless is not None:
if not isinstance(is_lossless, bool):
raise TypeError("Invalid type for `is_lossless`, type has to be `bool`")
self._is_lossless = is_lossless
@property
def is_frame_parallel(self):
# type: () -> bool
"""Gets the is_frame_parallel of this Av1VideoConfiguration.
Enable frame parallel decoding feature
:return: The is_frame_parallel of this Av1VideoConfiguration.
:rtype: bool
"""
return self._is_frame_parallel
@is_frame_parallel.setter
def is_frame_parallel(self, is_frame_parallel):
# type: (bool) -> None
"""Sets the is_frame_parallel of this Av1VideoConfiguration.
Enable frame parallel decoding feature
:param is_frame_parallel: The is_frame_parallel of this Av1VideoConfiguration.
:type: bool
"""
if is_frame_parallel is not None:
if not isinstance(is_frame_parallel, bool):
raise TypeError("Invalid type for `is_frame_parallel`, type has to be `bool`")
self._is_frame_parallel = is_frame_parallel
@property
def sharpness(self):
# type: () -> int
"""Gets the sharpness of this Av1VideoConfiguration.
Sets the sharpness
:return: The sharpness of this Av1VideoConfiguration.
:rtype: int
"""
return self._sharpness
@sharpness.setter
def sharpness(self, sharpness):
# type: (int) -> None
"""Sets the sharpness of this Av1VideoConfiguration.
Sets the sharpness
:param sharpness: The sharpness of this Av1VideoConfiguration.
:type: int
"""
if sharpness is not None:
if not isinstance(sharpness, int):
raise TypeError("Invalid type for `sharpness`, type has to be `int`")
self._sharpness = sharpness
@property
def is_frame_boost_enabled(self):
# type: () -> bool
"""Gets the is_frame_boost_enabled of this Av1VideoConfiguration.
Enable quality boost by lowering frame level Q periodically
:return: The is_frame_boost_enabled of this Av1VideoConfiguration.
:rtype: bool
"""
return self._is_frame_boost_enabled
@is_frame_boost_enabled.setter
def is_frame_boost_enabled(self, is_frame_boost_enabled):
# type: (bool) -> None
"""Sets the is_frame_boost_enabled of this Av1VideoConfiguration.
Enable quality boost by lowering frame level Q periodically
:param is_frame_boost_enabled: The is_frame_boost_enabled of this Av1VideoConfiguration.
:type: bool
"""
if is_frame_boost_enabled is not None:
if not isinstance(is_frame_boost_enabled, bool):
raise TypeError("Invalid type for `is_frame_boost_enabled`, type has to be `bool`")
self._is_frame_boost_enabled = is_frame_boost_enabled
@property
def noise_sensitivity(self):
# type: () -> bool
"""Gets the noise_sensitivity of this Av1VideoConfiguration.
Enable noise sensitivity on Y channel
:return: The noise_sensitivity of this Av1VideoConfiguration.
:rtype: bool
"""
return self._noise_sensitivity
@noise_sensitivity.setter
def noise_sensitivity(self, noise_sensitivity):
# type: (bool) -> None
"""Sets the noise_sensitivity of this Av1VideoConfiguration.
Enable noise sensitivity on Y channel
:param noise_sensitivity: The noise_sensitivity of this Av1VideoConfiguration.
:type: bool
"""
if noise_sensitivity is not None:
if not isinstance(noise_sensitivity, bool):
raise TypeError("Invalid type for `noise_sensitivity`, type has to be `bool`")
self._noise_sensitivity = noise_sensitivity
@property
def min_gf_interval(self):
# type: () -> int
"""Gets the min_gf_interval of this Av1VideoConfiguration.
Minimum interval between GF/ARF frames
:return: The min_gf_interval of this Av1VideoConfiguration.
:rtype: int
"""
return self._min_gf_interval
@min_gf_interval.setter
def min_gf_interval(self, min_gf_interval):
# type: (int) -> None
"""Sets the min_gf_interval of this Av1VideoConfiguration.
Minimum interval between GF/ARF frames
:param min_gf_interval: The min_gf_interval of this Av1VideoConfiguration.
:type: int
"""
if min_gf_interval is not None:
if not isinstance(min_gf_interval, int):
raise TypeError("Invalid type for `min_gf_interval`, type has to be `int`")
self._min_gf_interval = min_gf_interval
@property
def max_gf_interval(self):
# type: () -> int
"""Gets the max_gf_interval of this Av1VideoConfiguration.
Maximum interval between GF/ARF frames
:return: The max_gf_interval of this Av1VideoConfiguration.
:rtype: int
"""
return self._max_gf_interval
@max_gf_interval.setter
def max_gf_interval(self, max_gf_interval):
# type: (int) -> None
"""Sets the max_gf_interval of this Av1VideoConfiguration.
Maximum interval between GF/ARF frames
:param max_gf_interval: The max_gf_interval of this Av1VideoConfiguration.
:type: int
"""
if max_gf_interval is not None:
if not isinstance(max_gf_interval, int):
raise TypeError("Invalid type for `max_gf_interval`, type has to be `int`")
self._max_gf_interval = max_gf_interval
@property
def num_tile_groups(self):
# type: () -> int
"""Gets the num_tile_groups of this Av1VideoConfiguration.
Maximum number of tile groups
:return: The num_tile_groups of this Av1VideoConfiguration.
:rtype: int
"""
return self._num_tile_groups
@num_tile_groups.setter
def num_tile_groups(self, num_tile_groups):
# type: (int) -> None
"""Sets the num_tile_groups of this Av1VideoConfiguration.
Maximum number of tile groups
:param num_tile_groups: The num_tile_groups of this Av1VideoConfiguration.
:type: int
"""
if num_tile_groups is not None:
if not isinstance(num_tile_groups, int):
raise TypeError("Invalid type for `num_tile_groups`, type has to be `int`")
self._num_tile_groups = num_tile_groups
@property
def mtu_size(self):
# type: () -> int
"""Gets the mtu_size of this Av1VideoConfiguration.
Maximum number of bytes in a tile group
:return: The mtu_size of this Av1VideoConfiguration.
:rtype: int
"""
return self._mtu_size
@mtu_size.setter
def mtu_size(self, mtu_size):
# type: (int) -> None
"""Sets the mtu_size of this Av1VideoConfiguration.
Maximum number of bytes in a tile group
:param mtu_size: The mtu_size of this Av1VideoConfiguration.
:type: int
"""
if mtu_size is not None:
if not isinstance(mtu_size, int):
raise TypeError("Invalid type for `mtu_size`, type has to be `int`")
self._mtu_size = mtu_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(Av1VideoConfiguration, self), "to_dict"):
result = super(Av1VideoConfiguration, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Av1VideoConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
python
|
#!/usr/bin/env python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This script takes an existing tfrecord dataset and generates a new one
# with the images resized.
# E.g.,
# python tensorflow_image_resizer.py \
# -i /path/to/imagenet-full-tfrecord/ -o /path/to/imagenet-new-tfrecord/ --subset_name train
# python tensorflow_image_resizer.py \
# -i /path/to/imagenet-full-tfrecord/ -o /path/to/imagenet-new-tfrecord/ --subset_name validation
from __future__ import print_function
from builtins import range
from multiprocessing import cpu_count
import os
import tensorflow as tf
import time
global FLAGS
class Dataset(object):
def __init__(self, name, data_dir=None):
self.name = name
if data_dir is None:
data_dir = FLAGS.data_dir
self.data_dir = data_dir
def data_files(self, subset):
tf_record_pattern = os.path.join(self.data_dir, '%s-*' % subset)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
raise RuntimeError('No files found for %s dataset at %s' %
(subset, self.data_dir))
return data_files
def reader(self):
return tf.TFRecordReader()
def num_classes(self):
raise NotImplementedError
def num_examples_per_epoch(self, subset):
raise NotImplementedError
def __str__(self):
return self.name
class ImagenetData(Dataset):
def __init__(self, data_dir=None):
super(ImagenetData, self).__init__('ImageNet', data_dir)
def num_classes(self):
return 1000
def num_examples_per_epoch(self, subset):
if subset == 'train': return 1281167
elif subset == 'validation': return 50000
else: raise ValueError('Invalid data subset "%s"' % subset)
class FlowersData(Dataset):
def __init__(self, data_dir=None):
super(FlowersData, self).__init__('Flowers', data_dir)
def num_classes(self):
return 5
def num_examples_per_epoch(self, subset):
if subset == 'train': return 3170
elif subset == 'validation': return 500
else: raise ValueError('Invalid data subset "%s"' % subset)
def resize_example(example):
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/height': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),
'image/width': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),
'image/channels': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),
'image/colorspace': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/class/synset': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/format': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/filename': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
#{k: sparse_float32 for k in ['image/object/bbox/xmin',
{k: tf.VarLenFeature(dtype=tf.float32) for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
example = tf.parse_single_example(example, feature_map)
encoded_image = example['image/encoded']
decoded = tf.image.decode_jpeg(encoded_image, channels = 3)
#decoded = tf.Print(decoded, [tf.shape(decoded)])
if FLAGS.stretch:
# Stretch to a fixed square
new_height, new_width = FLAGS.size, FLAGS.size
else:
# Preserve aspect ratio and only resize if shorter side > FLAGS.size
shape = tf.shape(decoded)
h, w = tf.to_float(shape[0]), tf.to_float(shape[1])
min_side = tf.minimum(h, w)
scale = float(FLAGS.size) / min_side
scale = tf.minimum(scale, 1.0) # Shrink only
# HACK TESTING upscaling small images to 320
#dnscale = tf.minimum(float(FLAGS.size) / min_side, 1.0)
#upscale = tf.maximum(320. / min_side, 1.0)
#scale = dnscale * upscale
new_height = tf.cast(scale * h, tf.int32)
new_width = tf.cast(scale * w, tf.int32)
#decoded = tf.Print(decoded, [new_height, new_width])
resized_float = tf.image.resize_images(
images = decoded,
size = [new_height, new_width],
method = tf.image.ResizeMethod.BILINEAR,
align_corners = False)
#resized_float = tf.Print(resized_float, [tf.reduce_min(resized_float),
# tf.reduce_max(resized_float)])
resized_uint8 = tf.cast(resized_float, tf.uint8)
encoded_resized = tf.image.encode_jpeg(
resized_uint8,
format='rgb',
quality=FLAGS.quality,
progressive=False,
optimize_size=True,
chroma_downsampling=True,
density_unit='in')
"""
# HACK TESTING
#print 'xmin, xmax', example['image/object/bbox/xmin'], example['image/object/bbox/xmin']
#example['image/object/bbox/xmin'] = tf.Print(example['image/object/bbox/xmin'].values,
# [example['image/object/bbox/xmin'].values])
# HACK TESTING
print '*******', example['image/object/bbox/xmin'].values
bbox = tf.stack([example['image/object/bbox/%s'%x].values
for x in ['ymin', 'xmin', 'ymax', 'xmax']])
bbox = tf.transpose(tf.expand_dims(bbox, 0), [0,2,1])
encoded_resized = tf.Print(encoded_resized,
[bbox, example['image/object/bbox/xmin'].values])
"""
return [encoded_resized,
example['image/height'],
example['image/width'],
example['image/channels'],
example['image/colorspace'],
example['image/class/label'],
example['image/class/text'],
example['image/class/synset'],
example['image/format'],
example['image/filename'],
example['image/object/bbox/xmin'],
example['image/object/bbox/ymin'],
example['image/object/bbox/xmax'],
example['image/object/bbox/ymax']]
def int64_feature(value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
if __name__ == "__main__":
import argparse
import glob
import sys
global FLAGS
cmdline = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdline.add_argument('-i', '--input_dir', required=True)
cmdline.add_argument('-o', '--output_dir', required=True)
cmdline.add_argument('-f', '--force', action="store_true")
cmdline.add_argument('-s', '--subset_name', default='train')
cmdline.add_argument('-R', '--stretch', action="store_true")
cmdline.add_argument('-d', '--dataset_name', default=None)
cmdline.add_argument('-r', '--size', default=480, type=int)
cmdline.add_argument('-Q', '--quality', default=85, type=int)
cmdline.add_argument('--start_offset', default=0, type=int)
cmdline.add_argument('--num_preprocess_threads', default=0, type=int,
help="""Number of preprocessing threads.""")
cmdline.add_argument('--num_intra_threads', default=0, type=int,
help="""Number of threads to use for intra-op
parallelism. If set to 0, the system will pick
an appropriate number.""")
cmdline.add_argument('--num_inter_threads', default=0, type=int,
help="""Number of threads to use for inter-op
parallelism. If set to 0, the system will pick
an appropriate number.""")
FLAGS, unknown_args = cmdline.parse_known_args()
if not FLAGS.num_preprocess_threads:
FLAGS.num_preprocess_threads = cpu_count()
if FLAGS.dataset_name is None:
if "imagenet" in FLAGS.input_dir: FLAGS.dataset_name = "imagenet"
elif "flowers" in FLAGS.input_dir: FLAGS.dataset_name = "flowers"
else: raise ValueError("Could not identify name of dataset. Please specify with --data_name option.")
if FLAGS.dataset_name == "imagenet": dataset = ImagenetData(FLAGS.input_dir)
elif FLAGS.dataset_name == "flowers": dataset = FlowersData(FLAGS.input_dir)
else: raise ValueError("Unknown dataset. Must be one of imagenet or flowers.")
infiles = dataset.data_files(FLAGS.subset_name)
num_shards = len(infiles)
infiles = infiles[FLAGS.start_offset:]
num_examples = dataset.num_examples_per_epoch(FLAGS.subset_name)
examples_per_shard = (num_examples-1) // num_shards + 1
print(" num_preprocess_threads : {}\n examples_per_shard is {}\n "
"num_intra_threads is {}\n num_inter_threads is {}".format(FLAGS.num_preprocess_threads, examples_per_shard,
FLAGS.num_inter_threads, FLAGS.num_intra_threads))
config = tf.ConfigProto(
inter_op_parallelism_threads = FLAGS.num_inter_threads,
intra_op_parallelism_threads = FLAGS.num_intra_threads)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
filename_queue = tf.train.string_input_producer(
string_tensor = infiles,
shuffle = False,
capacity = examples_per_shard * FLAGS.num_preprocess_threads,
shared_name = 'filename_queue',
name = 'filename_queue',
num_epochs = 1)
reader = tf.TFRecordReader()
_, read_op = reader.read(filename_queue)
examples_queue = tf.FIFOQueue(
capacity = 2 * examples_per_shard * FLAGS.num_preprocess_threads,
dtypes=[tf.string])
example_enqueue_op = examples_queue.enqueue(read_op)
tf.train.queue_runner.add_queue_runner(
tf.train.QueueRunner(examples_queue, [example_enqueue_op]))
example_dequeue_op = examples_queue.dequeue()
resized_batch = resize_example(example_dequeue_op)
"""
resized_example_ops = []
#output_queue = tf.FIFOQueue(
# capacity=2*examples_per_shard * FLAGS.num_preprocess_threads,
# dtypes=[tf.string])
#output_enqueue_ops = []
for t in xrange(FLAGS.num_preprocess_threads):
output = resize_example(example_dequeue_op)
resized_example_ops.append(output)
#output_enqueue_ops.append(output_queue.enqueue(output))
#output_qr = tf.train.QueueRunner(output_queue, [output_enqueue_op])
#output_dequeue_op = output_queue.dequeue()
resized_batch = tf.train.batch_join(
resized_example_ops,
batch_size = examples_per_shard,
capacity = 3 * examples_per_shard)
print resized_batch
"""
print("Initializing")
#init = tf.initialize_local_variables()
init = tf.local_variables_initializer()
sess.run(init)
coordinator = tf.train.Coordinator()
queue_threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)
print("Running")
batch_num = FLAGS.start_offset
while not coordinator.should_stop():
batch_num += 1
print(batch_num)
output_filename = '%s-%05d-of-%05d' % (FLAGS.subset_name, batch_num, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
if os.path.exists(output_file) and not FLAGS.force:
raise IOError("Output file already exists (pass -f to overwrite): " + output_file)
with tf.python_io.TFRecordWriter(output_file) as writer:
for i in range(examples_per_shard):
#print sess.run([t.op for t in resized_batch])
encoded_images, heights, widths, channels, colorspaces, \
labels, texts, synsets, img_format, img_filename, \
xmin, ymin, xmax, ymax = \
sess.run(resized_batch)
#output_filename = '%s-%05d-of-%05d' % (FLAGS.subset_name, batch_num, num_shards)
#output_file = os.path.join(FLAGS.output_dir, output_filename)
#with tf.python_io.TFRecordWriter(output_file) as writer:
#for rec in xrange(len(encoded_images)):
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(encoded_images),
'image/height': int64_feature(heights[0]),
'image/width': int64_feature(widths[0]),
'image/channels': int64_feature(channels[0]),
'image/colorspace': bytes_feature(colorspaces),
'image/class/label': int64_feature(labels[0]),
'image/class/text': bytes_feature(texts),
'image/class/synset': bytes_feature(synsets),
'image/format': bytes_feature(img_format),
'image/filename': bytes_feature(img_filename),
'image/object/bbox/xmin': float_feature(xmin.values.tolist()),
'image/object/bbox/ymin': float_feature(ymin.values.tolist()),
'image/object/bbox/xmax': float_feature(xmax.values.tolist()),
'image/object/bbox/ymax': float_feature(ymax.values.tolist()) }))
writer.write(example.SerializeToString())
coordinator.request_stop()
coordinator.join(queue_threads, stop_grace_period_secs=5.)
sess.close()
|
python
|
import hassapi as hass
#
# App to turn lights on and off at sunrise and sunset
#
# Args:
#
# on_scene: scene to activate at sunset
# off_scene: scene to activate at sunrise
class OutsideLights(hass.Hass):
def initialize(self):
# Run at Sunrise
self.run_at_sunrise(self.sunrise_cb)
# Run at Sunset
self.run_at_sunset(self.sunset_cb)
def sunrise_cb(self, kwargs):
self.log("OutsideLights: Sunrise Triggered")
self.cancel_timers()
self.turn_on(self.args["off_scene"])
def sunset_cb(self, kwargs):
self.log("OutsideLights: Sunset Triggered")
self.cancel_timers()
self.turn_on(self.args["on_scene"])
def cancel_timers(self):
if "timers" in self.args:
apps = self.args["timers"].split(",")
for app in apps:
App = self.get_app(app)
App.cancel()
|
python
|
#Hello World python script
print("Hello World")
for i in range(0, 100, 5):
print(i**2)
# y = 'Hello'
# y * 5
# y * 5.0
w = 5 / 3
x = 5.0 / 3
y = 5.0 // 3.0
z = 5 % 3
print(w, x, y, z)
# Variable points to the "object" (unlike C it does not hold the value)
# y = 'Hello'
# z = 'hello'
# print(id(y), id(z))
# a = 'Hello'
# b = 'Hello'
# print(id(a), id(b))
# Boolean (True, non 0 value, False, 0 value)
if ( 5 ):
print("Five")
print("True")
if ( 0 ):
print("Zero")
print("False")
if (not 0):
print("Not Zero")
# combining boolean expressions with: and, or, not
# comparison operators: ==, <=, >=, !=, <, >
print( 4 > 5)
print ( 4 <= 5 )
exitCode = 'exit'
ans = ''
while (str(ans) != exitCode):
ans = input("Please enter your names or exit to exit out of the loop: ")
if (str(ans) != exitCode):
print("Hello", ans)
name = ("Ali", "Abu Ali", "Joe", "Abu Joe")
print(name)
|
python
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import sys
from destination_google_sheets import DestinationGoogleSheets
if __name__ == "__main__":
DestinationGoogleSheets().run(sys.argv[1:])
|
python
|
PK
|
python
|
import uuid
import os
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from .utils import auto_delete_filefields_on_delete
class File(models.Model):
name = models.CharField(max_length=100)
date = models.CharField(max_length=100)
comments = models.CharField(max_length=100)
pdf = models.FileField(upload_to='documents/')
def delete(self, *args, **kwargs):
self.pdf.delete()
super(File, self).delete(*args, **kwargs)
def __str__(self):
return self.title
class BaseModel(models.Model):
"""
Base class for all models;
defines common metadata
"""
class Meta:
abstract = True
ordering = ('-created', ) # better choice for UI
get_latest_by = "-created"
# Primary key
id = models.UUIDField('id', default=uuid.uuid4, primary_key=True, unique=True,
null=False, blank=False, editable=False)
# metadata
created = models.DateTimeField(_('created'), null=True, blank=True, )
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('created by'), null=True, blank=True, related_name='+', on_delete=models.SET_NULL)
updated = models.DateTimeField(_('updated'), null=True, blank=True, )
updated_by = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('updated by'), null=True, blank=True, related_name='+', on_delete=models.SET_NULL)
def __str__(self):
return str(self.id)
def get_admin_url(self):
return reverse("admin:%s_%s_change" %
(self._meta.app_label, self._meta.model_name), args=(self.id,))
def get_absolute_url(self):
return self.get_admin_url()
def created_display(self):
return format_datetime(self.created)
created_display.short_description = _('Created')
created_display.admin_order_field = 'created'
def updated_display(self):
return format_datetime(self.updated)
updated_display.short_description = _('Updated')
updated_display.admin_order_field = 'updated'
def save(self, *args, **kwargs):
today = timezone.now()
if self.created is None:
self.created = today
self.updated = today
return super(BaseModel, self).save(*args, **kwargs)
|
python
|
from aiomotorengine import StringField, IntField, BooleanField, FloatField, DateTimeField, ReferenceField, ListField
from xt_base.document.source_docs import InfoAsset
from xt_base.document.base_docs import Project
from dtlib.aio.base_mongo import MyDocument
from dtlib.tornado.account_docs import Organization
from dtlib.tornado.base_docs import OrgDataBaseDocument
class ProjectDataDocument(OrgDataBaseDocument, MyDocument):
"""
项目数据
"""
# 数据归属
project = ReferenceField(reference_document_type=Project) # 测试数据所属的项目
project_name = StringField() # 项目名称,冗余
async def set_project_tag(self, project):
"""
打上项目标记
:type project:Project
:return:
"""
organization = project.organization
self.project = project
self.project_name = project.project_name
self.organization = organization
self.org_name = organization.name
class ProjectFeedBack(ProjectDataDocument):
"""
项目相关的问题反馈
"""
msg = StringField()
# images = ListField() # 图片列表
# todo 语音和视频
label = StringField() # 关于问题的标记
status = IntField() # 处理的状态码
class ServiceStatus(MyDocument):
"""
服务器开放的服务状态
"""
__collection__ = "service_status"
info_asset = ReferenceField(reference_document_type=InfoAsset) # 所属资产,信息资产
port = IntField() # 端口号
protocol = StringField() # 协议名称
status = StringField() # 状态
version = StringField # 版本
class ApiTestData(ProjectDataDocument):
"""
测试数据统计表---因为性能问题,取消使用 2016-09-30
"""
__collection__ = "api_test_data"
was_successful = BooleanField() # 是否是成功的
total = IntField()
failures = IntField()
errors = IntField()
skipped = IntField()
run_time = StringField(max_length=1024)
class ApiTestDataNote(MyDocument):
"""
api测试详细数据,只记录失败和错误---因为性能问题,取消使用 2016-09-30
"""
__collection__ = "api_test_data_note"
apitestdata_id = StringField(max_length=1024)
test_case = StringField(max_length=1024) # 出错测试用例
explain = StringField(max_length=1024) # 目的
status = StringField(max_length=1024) # 测试状态,失败或者错误
note = StringField() # 详细记录
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class UnitTestDataDetail(MyDocument):
"""
api测试详细数据,只记录失败和错误--插入时没有使用
"""
test_case = StringField(max_length=1024) # 出错测试用例
explain = StringField(max_length=1024) # 目的
status = StringField(max_length=1024) # 测试状态,失败或者错误
note = StringField() # 详细记录
class UnitTestData(MyDocument):
"""
测试数据统计表--插入时没有使用,没有使用ORM
"""
__collection__ = "unit_test_data"
pro_version = StringField(max_length=1024) # 项目版本号:1.2.3.4
was_successful = BooleanField() # 是否是成功的
total = IntField()
failures = IntField()
errors = IntField()
skipped = IntField()
run_time = FloatField()
details = ListField(ReferenceField(UnitTestDataDetail))
# 数据归属
project = ReferenceField(reference_document_type=Project) # 测试数据所属的项目
project_name = StringField() # 项目名称,冗余
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class PerformReport(MyDocument):
"""
性能测试报告,已经作废,在bench中有专门的处理的了:2016-09-10
waste
"""
__collection__ = "perform_report"
server_soft_ware = StringField(max_length=2048)
server_host_name = StringField(max_length=2048)
server_port = StringField(max_length=64)
doc_path = StringField(max_length=2048)
doc_length = IntField() # 文档长度,bytes
con_level = IntField() # 并发量
test_time_taken = FloatField() # 消耗时间 seconds
complete_req = IntField() # 完成请求数
failed_req = IntField() # 失败请求数
non_2xx_res = IntField() # 非2xx请求数
total_trans = IntField() # 总传输数据量bytes
html_trans = IntField() # 总html传输数据量bytes
req_per_sec = FloatField() # 每秒请求量
time_per_req = FloatField() # 平均http请求响应时间:ms
time_per_req_across = FloatField() # 所有并发请求量耗时(平均事务响应时间):ms
trans_rate = FloatField() # 每秒传输数据量,Kbytes/sec
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class SafetyTestReport(ProjectDataDocument):
"""
安全测试报告
"""
__collection__ = "safety_test_report"
# project_name = StringField() # 项目名,里面加上版本号,这个并不是具体的内部项目,和其它几种测试数据不同
hack_tool = StringField() # 用于hack的软件名称
total_cnts = IntField() # 总计次数
success_cnts = IntField() # 成功次数
success_rate = FloatField() # 成功率,冗余
time_cycle = FloatField() # 花费时间:s
crack_rate = FloatField() # 破解效率,冗余
mark = StringField() # 描述备注
# organization = ReferenceField(reference_document_type=Organization) # 组织
class ApiReqDelay(MyDocument):
"""测试接口的延时
"""
__collection__ = "api_req_delay"
domain = StringField(max_length=2048) # ms,基准域
path = StringField(max_length=2048) # ms,接口路径及参数
delay = FloatField() # ms,请求时间
http_status = IntField() # http状态值
# 数据归属
project = ReferenceField(reference_document_type=Project)
project_name = StringField() # 项目名称,冗余
organization = ReferenceField(reference_document_type=Organization)
class ExDataRecord(MyDocument):
"""
实验数据的记录表
"""
__collection__ = "ex_data_record"
# --------------数据状态值记录-----------
record_status = StringField(max_length=64) # 数据的状态:reported,filted,experiment
# --------------数据发现和登记期-----------
custom_name = StringField(max_length=256) # 用户名称
captcha_id = StringField(max_length=64)
event_start_time = DateTimeField() # weblog产生开始时间
event_end_time = DateTimeField() # weblog产生结束时间
event_reporter = StringField(max_length=256) # 事件汇报人
event_report_time = DateTimeField() # 事件汇报时间
track_class = StringField(max_length=64)
js_version = StringField(max_length=16)
js_tag_page = StringField(max_length=2048) # 在bitbucket或者tower中的当前版本的修改标记
css_version = StringField(max_length=16)
css_tag_page = StringField(max_length=2048) # 在bitbucket或者tower中的当前版本的修改标记
# --------------数据过滤的采集备案期-----------
data_collection_name = StringField(max_length=256)
producers = StringField(max_length=256)
ex_user = StringField(max_length=256) # 数据收集人
action_time = DateTimeField() # 数据备案时间
# event_time = DateTimeField()
file_name = StringField(max_length=64)
file_path = StringField(max_length=2048) # 实验数据文件所在路径,以FTP的方式来共享
file_size = IntField()
record_cnt = IntField() # 记录的数目
# --------------数据实验期-----------
researcher = StringField(max_length=256) # 数据实验人
researche_time = DateTimeField() # 数据实验时间
research_result = StringField(max_length=10240) # 验证处理结果
# experiment_id= Refer
class CurtainExData(MyDocument):
"""
Curtain项目的测试数据
"""
# answer = ListField()
# track_data = ListField()
action_user = StringField(max_length=256)
class LimitTestData(MyDocument):
"""
测试数据统计表
"""
__collection__ = "limit_test_data"
# id = ObjectId()
was_successful = BooleanField() # 是否是成功的
total = IntField()
failures = IntField()
errors = IntField()
skipped = IntField()
run_time = StringField(max_length=1024)
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class LimitTestDataNote(MyDocument):
"""
api测试详细数据,只记录失败和错误(作废-2016-11-23)
"""
__collection__ = "limit_test_data_note"
limittestdata_id = StringField(max_length=1024)
test_case = StringField(max_length=1024) # 出错测试用例
explain = StringField(max_length=1024) # 目的
status = StringField(max_length=1024) # 测试状态,失败或者错误
note = StringField() # 详细记录
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class UnitPenetrationTest(MyDocument):
"""
渗透测试详细信息
"""
test_case = StringField(max_length=1024) # 测试目的
result = StringField(max_length=1024) # 结果
class PenetrationTestData(ProjectDataDocument):
"""
渗透测试详情
"""
__collection__ = "penetration_test_data"
start_time = StringField()
use_time = FloatField()
note = StringField()
# project = ReferenceField(reference_document_type=Project) # 测试数据所属的项目
# project_name = StringField() # 项目名称,冗余
# organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class PenetrationTestDataNote(MyDocument):
"""
渗透测试详情
"""
__collection__ = "penetration_test_data_note"
penetration_id = StringField(max_length=1024)
ip = StringField(max_length=1024)
details = ListField(ReferenceField(UnitPenetrationTest))
# SSHRootEmptyPassword = StringField(max_length=1024)
# RedisEmptyPassword = StringField(max_length=1024)
# MongoEmptyPassword = StringField(max_length=1024)
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class ProxyipTestData(MyDocument):
"""
爆破测试
"""
__collection__ = "proxyip_test_data"
remoteip = StringField(max_length=1024)
originalip = StringField(max_length=1024)
proxyip = StringField(max_length=1024)
organization = ReferenceField(reference_document_type=Organization) # 数据所属的组织
class FeedbackMsg(MyDocument):
"""
feedback
"""
__collection__ = 'feedback_msg'
file_path = StringField() # 文件路径
msg = StringField() # msg
|
python
|
<caret>a = 1 # surprise!
b = 2
|
python
|
# VNA_characteristics_classes_creators
from enum import enum
print "U R in VnaEnums" # Flag 4 debug
SweepType = enum(LINEAR=1, LOG=2, SEGM=3, POW=4)
SParameters= enum(S11=1, S12=2, S21=3, S22=4)
CalType = enum(OPEN=1, SHORT=2, THRU=3, FULL_2PORT=4, FULL_1PORT=5, TRL_2PORT=6)
DataFormat = enum(LOG=1, LIN=2, LIN_PHASE=3, PHASE=4, GDELAY=5,
SMITH_LIN_PHASE=6, SMITH_LOG_PHASE=7, SMITH_RE_IM=8, SMITH_R_JX=9, SMITH_G_JB=10)
Direction = enum(LEFT=1, RIGHT=2)
|
python
|
from mongoengine import (
DateTimeField,
IntField,
EmbeddedDocument,
StringField,
EmbeddedDocumentListField,
BooleanField,
)
from mongoengine import Document
class UserEvent(EmbeddedDocument):
event_id = StringField()
type = StringField()
timestamp = DateTimeField()
state = StringField()
class Survey(Document):
question3_attempts = IntField()
question4_attempts = IntField()
question1_duration = IntField()
question2_duration = IntField()
question3_duration = IntField()
question4_duration = IntField()
question5_duration = IntField()
question3_solved = BooleanField()
question4_solved = BooleanField()
intro_duration = IntField()
tour_duration = IntField()
user_events = EmbeddedDocumentListField(UserEvent)
user_ip = StringField()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TimePeriodRule import TimePeriodRule
from alipay.aop.api.domain.TimePeriodRule import TimePeriodRule
class VoucherTemplateInfo(object):
def __init__(self):
self._amount = None
self._ceiling_amount = None
self._disable_detail_periods = None
self._discount = None
self._discount_calc_type = None
self._floor_amount = None
self._goods_id = None
self._template_id = None
self._usable_detail_period = None
self._usable_end_time = None
self._usable_start_time = None
self._usable_time_type = None
self._voucher_description = None
self._voucher_name = None
self._voucher_quantity = None
self._voucher_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def ceiling_amount(self):
return self._ceiling_amount
@ceiling_amount.setter
def ceiling_amount(self, value):
self._ceiling_amount = value
@property
def disable_detail_periods(self):
return self._disable_detail_periods
@disable_detail_periods.setter
def disable_detail_periods(self, value):
if isinstance(value, list):
self._disable_detail_periods = list()
for i in value:
if isinstance(i, TimePeriodRule):
self._disable_detail_periods.append(i)
else:
self._disable_detail_periods.append(TimePeriodRule.from_alipay_dict(i))
@property
def discount(self):
return self._discount
@discount.setter
def discount(self, value):
self._discount = value
@property
def discount_calc_type(self):
return self._discount_calc_type
@discount_calc_type.setter
def discount_calc_type(self, value):
self._discount_calc_type = value
@property
def floor_amount(self):
return self._floor_amount
@floor_amount.setter
def floor_amount(self, value):
self._floor_amount = value
@property
def goods_id(self):
return self._goods_id
@goods_id.setter
def goods_id(self, value):
if isinstance(value, list):
self._goods_id = list()
for i in value:
self._goods_id.append(i)
@property
def template_id(self):
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
@property
def usable_detail_period(self):
return self._usable_detail_period
@usable_detail_period.setter
def usable_detail_period(self, value):
if isinstance(value, list):
self._usable_detail_period = list()
for i in value:
if isinstance(i, TimePeriodRule):
self._usable_detail_period.append(i)
else:
self._usable_detail_period.append(TimePeriodRule.from_alipay_dict(i))
@property
def usable_end_time(self):
return self._usable_end_time
@usable_end_time.setter
def usable_end_time(self, value):
self._usable_end_time = value
@property
def usable_start_time(self):
return self._usable_start_time
@usable_start_time.setter
def usable_start_time(self, value):
self._usable_start_time = value
@property
def usable_time_type(self):
return self._usable_time_type
@usable_time_type.setter
def usable_time_type(self, value):
self._usable_time_type = value
@property
def voucher_description(self):
return self._voucher_description
@voucher_description.setter
def voucher_description(self, value):
self._voucher_description = value
@property
def voucher_name(self):
return self._voucher_name
@voucher_name.setter
def voucher_name(self, value):
self._voucher_name = value
@property
def voucher_quantity(self):
return self._voucher_quantity
@voucher_quantity.setter
def voucher_quantity(self, value):
self._voucher_quantity = value
@property
def voucher_type(self):
return self._voucher_type
@voucher_type.setter
def voucher_type(self, value):
self._voucher_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.ceiling_amount:
if hasattr(self.ceiling_amount, 'to_alipay_dict'):
params['ceiling_amount'] = self.ceiling_amount.to_alipay_dict()
else:
params['ceiling_amount'] = self.ceiling_amount
if self.disable_detail_periods:
if isinstance(self.disable_detail_periods, list):
for i in range(0, len(self.disable_detail_periods)):
element = self.disable_detail_periods[i]
if hasattr(element, 'to_alipay_dict'):
self.disable_detail_periods[i] = element.to_alipay_dict()
if hasattr(self.disable_detail_periods, 'to_alipay_dict'):
params['disable_detail_periods'] = self.disable_detail_periods.to_alipay_dict()
else:
params['disable_detail_periods'] = self.disable_detail_periods
if self.discount:
if hasattr(self.discount, 'to_alipay_dict'):
params['discount'] = self.discount.to_alipay_dict()
else:
params['discount'] = self.discount
if self.discount_calc_type:
if hasattr(self.discount_calc_type, 'to_alipay_dict'):
params['discount_calc_type'] = self.discount_calc_type.to_alipay_dict()
else:
params['discount_calc_type'] = self.discount_calc_type
if self.floor_amount:
if hasattr(self.floor_amount, 'to_alipay_dict'):
params['floor_amount'] = self.floor_amount.to_alipay_dict()
else:
params['floor_amount'] = self.floor_amount
if self.goods_id:
if isinstance(self.goods_id, list):
for i in range(0, len(self.goods_id)):
element = self.goods_id[i]
if hasattr(element, 'to_alipay_dict'):
self.goods_id[i] = element.to_alipay_dict()
if hasattr(self.goods_id, 'to_alipay_dict'):
params['goods_id'] = self.goods_id.to_alipay_dict()
else:
params['goods_id'] = self.goods_id
if self.template_id:
if hasattr(self.template_id, 'to_alipay_dict'):
params['template_id'] = self.template_id.to_alipay_dict()
else:
params['template_id'] = self.template_id
if self.usable_detail_period:
if isinstance(self.usable_detail_period, list):
for i in range(0, len(self.usable_detail_period)):
element = self.usable_detail_period[i]
if hasattr(element, 'to_alipay_dict'):
self.usable_detail_period[i] = element.to_alipay_dict()
if hasattr(self.usable_detail_period, 'to_alipay_dict'):
params['usable_detail_period'] = self.usable_detail_period.to_alipay_dict()
else:
params['usable_detail_period'] = self.usable_detail_period
if self.usable_end_time:
if hasattr(self.usable_end_time, 'to_alipay_dict'):
params['usable_end_time'] = self.usable_end_time.to_alipay_dict()
else:
params['usable_end_time'] = self.usable_end_time
if self.usable_start_time:
if hasattr(self.usable_start_time, 'to_alipay_dict'):
params['usable_start_time'] = self.usable_start_time.to_alipay_dict()
else:
params['usable_start_time'] = self.usable_start_time
if self.usable_time_type:
if hasattr(self.usable_time_type, 'to_alipay_dict'):
params['usable_time_type'] = self.usable_time_type.to_alipay_dict()
else:
params['usable_time_type'] = self.usable_time_type
if self.voucher_description:
if hasattr(self.voucher_description, 'to_alipay_dict'):
params['voucher_description'] = self.voucher_description.to_alipay_dict()
else:
params['voucher_description'] = self.voucher_description
if self.voucher_name:
if hasattr(self.voucher_name, 'to_alipay_dict'):
params['voucher_name'] = self.voucher_name.to_alipay_dict()
else:
params['voucher_name'] = self.voucher_name
if self.voucher_quantity:
if hasattr(self.voucher_quantity, 'to_alipay_dict'):
params['voucher_quantity'] = self.voucher_quantity.to_alipay_dict()
else:
params['voucher_quantity'] = self.voucher_quantity
if self.voucher_type:
if hasattr(self.voucher_type, 'to_alipay_dict'):
params['voucher_type'] = self.voucher_type.to_alipay_dict()
else:
params['voucher_type'] = self.voucher_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = VoucherTemplateInfo()
if 'amount' in d:
o.amount = d['amount']
if 'ceiling_amount' in d:
o.ceiling_amount = d['ceiling_amount']
if 'disable_detail_periods' in d:
o.disable_detail_periods = d['disable_detail_periods']
if 'discount' in d:
o.discount = d['discount']
if 'discount_calc_type' in d:
o.discount_calc_type = d['discount_calc_type']
if 'floor_amount' in d:
o.floor_amount = d['floor_amount']
if 'goods_id' in d:
o.goods_id = d['goods_id']
if 'template_id' in d:
o.template_id = d['template_id']
if 'usable_detail_period' in d:
o.usable_detail_period = d['usable_detail_period']
if 'usable_end_time' in d:
o.usable_end_time = d['usable_end_time']
if 'usable_start_time' in d:
o.usable_start_time = d['usable_start_time']
if 'usable_time_type' in d:
o.usable_time_type = d['usable_time_type']
if 'voucher_description' in d:
o.voucher_description = d['voucher_description']
if 'voucher_name' in d:
o.voucher_name = d['voucher_name']
if 'voucher_quantity' in d:
o.voucher_quantity = d['voucher_quantity']
if 'voucher_type' in d:
o.voucher_type = d['voucher_type']
return o
|
python
|
""" Script for generating AE examples.
"""
import argparse
import importlib
import numpy as np
import os
from PIL import Image
import shutil
import sys
import torch
from tqdm import tqdm
from apfv21.attacks.tidr import TIDR
from apfv21.attacks.afv import AFV
from apfv21.utils import img_utils, imagenet_utils
import pdb
ATTACKS_CFG = {
"afv": {
"decay_factor": 1.0,
"prob": 1.0,
"epsilon": 16./255,
"steps": 80,
"step_size": 1./255,
"image_resize": 330,
"dr_weight": 1.0,
"ti_smoothing": True,
"ti_kernel_radius": (20, 3),
"random_start": False,
"train_attention_model": False,
"use_pretrained_foreground": True,
"attention_resize": 256,
"attention_weight_pth": "",
},
"dim": {
"decay_factor": 1.0,
"prob": 0.5,
"epsilon": 16./255,
"steps": 40,
"step_size": 2./255,
"image_resize": 330,
"random_start": False,
},
"tidr": {
"decay_factor": 1.0,
"prob": 0.5,
"epsilon": 16./255,
"steps": 40,
"step_size": 2./255,
"image_resize": 330,
"dr_weight": 0.0,
"random_start": False,
},
"pgd": {
"epsilon": 16/255.,
"k": 40,
"a": 2/255.,
},
"mifgsm": {
"decay_factor": 1.0,
"epsilon": 16./255,
"steps": 40,
"step_size": 2./255
}
}
DR_LAYERS = {
"vgg16": [12],
"resnet152": [5],
"mobilenet_v2": [8]
}
def serialize_config(cfg_dict: dict) -> str:
key_list = list(cfg_dict.keys())
key_list.sort()
ret_str = ""
for key in key_list:
val = cfg_dict[key]
if isinstance(val, float):
val = "{0:.04f}".format(val)
curt_str = "{}_{}".format(key, val)
ret_str = ret_str + curt_str + "_"
ret_str = ret_str.replace('.', 'p')
ret_str = ret_str.replace('/', '_')
ret_str = ret_str.replace('(', '_')
ret_str = ret_str.replace(')', '_')
ret_str = ret_str.replace(',', '_')
ret_str = ret_str.replace(' ', '_')
if len(ret_str) > 255:
ret_str = ret_str[-128:]
return ret_str[:-1]
def generate_adv_example(args):
attack_config = ATTACKS_CFG[args.attack_method]
suffix_str = "{}_{}_{}".format(
args.source_model, args.attack_method, serialize_config(attack_config))
output_folder = os.path.join(args.output_dir, suffix_str)
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.mkdir(output_folder)
imgnet_label = imagenet_utils.load_imagenet_label_dict()
source_lib = importlib.import_module(
"apfv21.models." + args.source_model)
source_model_class = getattr(source_lib, args.source_model.upper())
source_model = source_model_class(is_normalize=True)
attack_lib = importlib.import_module(
os.path.join("apfv21.attacks." + args.attack_method))
attacker_class = getattr(attack_lib, args.attack_method.upper())
if args.attack_method == "afv":
afv_config = ATTACKS_CFG["afv"]
from apfv21.attacks.models.attention_model import ForegroundAttentionFCN as ForegroundAttention
# Only one of |use_pretrained_foreground| and |train_attention_model|
# can be turned on at one time.
if afv_config['use_pretrained_foreground']:
assert not afv_config["train_attention_model"]
if afv_config['train_attention_model']:
assert not afv_config["use_pretrained_foreground"]
attention_model = ForegroundAttention(
1, 3,
resize=(afv_config["attention_resize"],
afv_config["attention_resize"]),
use_pretrained_foreground=afv_config['use_pretrained_foreground']
).cuda() # num_classes, feature_channels
if afv_config["train_attention_model"]:
attention_optim = torch.optim.SGD(
attention_model.parameters(), lr=1e-7, momentum=0.9)
lr_scheduler = torch.optim.lr_scheduler.StepLR(
attention_optim, step_size=10, gamma=0.2)
else:
attention_optim = None
if not afv_config['use_pretrained_foreground']:
assert afv_config["attention_weight_pth"] != ""
if afv_config["attention_weight_pth"] != "":
attention_model.load_state_dict(
torch.load(afv_config["attention_weight_pth"]))
attacker = attacker_class(source_model, attention_model,
attention_optim, attack_config=afv_config)
else:
attacker = attacker_class(source_model, attack_config=attack_config)
img_names = os.listdir(args.input_dir)
success_count = 0
total_count = 0
for img_name in tqdm(img_names):
img_name_noext = os.path.splitext(img_name)[0]
img_path = os.path.join(args.input_dir, img_name)
img_ori_var = img_utils.load_img(img_path).cuda()
pred_ori = torch.argmax(source_model(img_ori_var)[1], dim=1)
if isinstance(attacker, TIDR):
img_adv_var = attacker(
img_ori_var, pred_ori, internal=DR_LAYERS[args.source_model])
elif isinstance(attacker, AFV):
img_adv_var, attention_map_first, attention_map_last = attacker(
img_ori_var, pred_ori, internal=DR_LAYERS[args.source_model])
else:
img_adv_var = attacker(img_ori_var, pred_ori)
pred_adv = torch.argmax(source_model(img_adv_var.cuda())[1], dim=1)
output_img = img_utils.save_img(
img_adv_var, os.path.join(output_folder, img_name_noext + ".png"))
# # Visualization for debuging.
# print("Ori: ", img_name, " , ", pred_ori, ":",
# imgnet_label[pred_ori.cpu().numpy()[0]])
# print("Adv: ", img_name, " , ", pred_adv, ":",
# imgnet_label[pred_adv.cpu().numpy()[0]])
# attention_img_first = Image.fromarray(
# (attention_map_first.detach().cpu().numpy()[0, 0] * 255).astype(np.uint8))
# attention_img_first.save("./temp_attention_first.png")
# attention_img_last = Image.fromarray(
# (attention_map_last.detach().cpu().numpy()[0, 0] * 255).astype(np.uint8))
# attention_img_last.save("./temp_attention_last.png")
# img_utils.save_img(img_adv_var, "./temp_adv.png")
if imgnet_label[pred_ori.cpu().numpy()[0]] != \
imgnet_label[pred_adv.cpu().numpy()[0]]:
success_count += 1
total_count += 1
if total_count % 100 == 0 and args.attack_method == "afv" \
and afv_config["train_attention_model"]:
print("Saving attention model...")
torch.save(
attention_model.state_dict(),
"./weights/attention_model_weights_{}.pth".format(total_count))
lr_scheduler.step()
if args.attack_method == "afv" and afv_config["train_attention_model"]:
print("Saving attention model...")
torch.save(
attention_model.state_dict(),
"./weights/attention_model_weights_{}.pth".format(total_count))
success_rate = float(success_count) / float(total_count)
print("Success rate: ", success_rate)
print("{} over {}".format(success_count, total_count))
return
def parse_args(args):
parser = argparse.ArgumentParser(description="PyTorch AE generation.")
parser.add_argument('--source_model',,
default="vgg16", type=str)
parser.add_argument('--attack_method',,
default="afv", type=str)
parser.add_argument('--input_dir', default="sample_images/", type=str)
parser.add_argument('--output_dir', default="outputs/", type=str)
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
args_dic = vars(args)
generate_adv_example(args)
if __name__ == "__main__":
main()
|
python
|
import csv
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
pd.options.display.max_columns = None
pd.options.display.max_rows = None
# Load the pre-processsed dataset
df = pd.read_hdf('pre-processed.h5')
# Shuffling the dataset
df = df.sample(frac=1).reset_index(drop=True)
# Splitting the first 85% of the dataset for train set
train = df[:round(len(df)*0.85)]
# Min-max normalization of the train set
train_norm = (train - train.min())/(train.max() - train.min())
train_min = train.min()
train_max = train.max()
# Save the train set
train_norm.to_hdf("train_norm.h5", key='w')
# Save train set min and max values for back normalization of the inference cases
train_min.to_hdf("train_min.h5", key='w')
train_max.to_hdf("train_max.h5", key='w')
# Splitting and normalizing the validation set
val = df[round(len(df)*0.85):round(len(df)*0.95)]
val.to_hdf("val.h5", key='w')
val_norm = (val - train.min()) / (train.max() - train.min())
# Save the validation set
val_norm.to_hdf("val_norm.h5", key='w')
# Splitting and normalizing the validation set
test = df[round(len(df)*0.95):]
test.to_hdf("test.h5", key='w')
test_norm = (test - train.min()) / (train.max() - train.min())
# Save the test set
test_norm.to_hdf("test_norm.h5", key='w')
|
python
|
import matplotlib.pyplot as plt
import cv2 as cv
from color_component import get_channel, remove_channel
img = cv.imread('color_img.png')
plt.subplot(3, 1, 1)
imgRGB = img[:, :, ::-1]
plt.imshow(imgRGB)
ch = 1
imgSingleChannel = get_channel(img, ch)
imgRGB = cv.cvtColor(imgSingleChannel, cv.COLOR_BGR2RGB)
plt.subplot(3, 1, 2)
plt.imshow(imgRGB)
imgChannelRemoved = remove_channel(img, ch)
imgRGB = imgChannelRemoved[:, :, ::-1]
plt.imshow(imgRGB)
plt.show()
|
python
|
# Generated by Django 3.0 on 2020-01-06 16:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('content', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Library',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('platform', models.CharField(max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True, verbose_name='Name')),
('is_public', models.BooleanField(blank=True, default=False)),
('date_created', models.DateField(auto_now_add=True)),
],
options={
'verbose_name': 'playlist',
'verbose_name_plural': 'playlists',
},
),
migrations.CreateModel(
name='PlaylistSong',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_saved', models.DateTimeField(auto_now_add=True)),
('playlist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='playlist_to_song', to='music.Playlist')),
('song', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='song_to_playlist', to='content.Song')),
],
options={
'verbose_name': 'playlist song',
'verbose_name_plural': 'playlist songs',
},
),
migrations.AddField(
model_name='playlist',
name='songs',
field=models.ManyToManyField(related_name='playlist_songs', through='music.PlaylistSong', to='content.Song'),
),
migrations.AddField(
model_name='playlist',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='LibrarySong',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_saved', models.DateTimeField(auto_now_add=True)),
('library', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='library_to_song', to='music.Library')),
('song', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='song_to_library', to='content.Song')),
],
options={
'verbose_name': 'library song',
'verbose_name_plural': 'library songs',
},
),
migrations.AddField(
model_name='library',
name='songs',
field=models.ManyToManyField(related_name='library_songs', through='music.LibrarySong', to='content.Song'),
),
migrations.AddField(
model_name='library',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Pizza
def index(request):
messages = ['Welcome Pizza Lovers', 'Our currently available Pizzas are:']
for pizza in Pizza.objects.all():
messages.append(str(pizza))
for topping in pizza.topping_set.all():
messages.append('* Topping: %s' % str(topping.topping_name))
return HttpResponse('<br>'.join(messages))
def pizza_info(request, pizza_id):
try:
pizza = Pizza.objects.get(pk=pizza_id)
except:
return HttpResponse('boo hoo')
return HttpResponse('The pizza with id %s is %s' % (pizza_id, pizza))
|
python
|
import time
import traceback
import logging
import os
import matplotlib.pyplot as plt
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
from pycqed.analysis import analysis_toolbox as a_tools
class AnalysisDaemon:
"""
AnalysisDaemon is a class that allow to process analysis in a
separate python kernel to allow measurements to run in parallel
to the analysis.
The Daemon can either be started:
- in a separate ipython notebook using: `AnalysisDaemon(start=True)`.
Note that the a_tools.datadir should be set before calling the daemon
or passed with the watchdir argument
- via the commandline by calling `analysis_daemon.py` with possible
additional arguments (see `analysis_daemon.py --help`)
- with the start_analysis_daemon.bat script located in pycqedscripts/scripts
(Windows only)
"""
def __init__(self, t_start=None, start=True, watchdir=None):
"""
Initialize AnalysisDaemon
Args:
t_start (str): timestamp from which to start observing the data
directory. If None, defaults to now.
start (bool): whether or not to start the daemon
watchdir (str): directory which the Daemon should look at.
Defaults to analusis_toolbox.datadir.
"""
self.t_start = t_start
self.last_ts = None
self.poll_interval = 10 # seconds
self.errs = []
self.job_errs = []
if watchdir is not None:
a_tools.datadir = watchdir
if start:
self.start()
def start(self):
"""
Starts the AnalysisDaemon
Returns:
"""
self.last_ts = a_tools.latest_data(older_than=self.t_start,
return_path=False,
return_timestamp=True, n_matches=1)[0]
self.run()
def run(self):
try:
while (True):
if not self.check_job():
for i in range(self.poll_interval):
time.sleep(1)
except KeyboardInterrupt as e:
pass
except Exception as e:
log.error(e)
self.errs.append(traceback.format_exc())
self.run()
def check_job(self):
"""
Checks whether new jobs have been found and processes them
Returns: True if a job was found and processed (done or failed),
False otherwise.
"""
try:
timestamps, folders = a_tools.latest_data(newer_than=self.last_ts,
raise_exc=False,
return_timestamp=True,
n_matches=1000)
except ValueError as e:
return # could not find any timestamps matching criteria
log.info(f"Searching jobs in: {timestamps[0]} ... {timestamps[-1]}.")
found_jobs = False
for folder, ts in zip(folders, timestamps):
jobs_in_folder = []
for file in os.listdir(folder):
if file.endswith(".job"):
jobs_in_folder.append(os.path.join(folder, file))
if len(jobs_in_folder) > 0:
log.info(f"Found {len(jobs_in_folder)} job(s) in {ts}")
found_jobs = True
for filename in jobs_in_folder:
if os.path.isfile(filename):
time.sleep(1) # wait to make sure that the file is fully written
job = self.read_job(filename)
errl = len(self.job_errs)
os.rename(filename, filename + '.running')
self.run_job(job)
time.sleep(1) # wait to make sure that files are written
t = self.get_datetimestamp()
if os.path.isfile(filename):
os.rename(filename,
f"{filename}_{t}.loop_detected")
log.warning(f'A loop was detected! Job {filename} '
f'tries to delegate plotting.')
if errl == len(self.job_errs):
os.rename(filename + '.running',
f"{filename}_{t}.done")
else:
new_filename = f"{filename}_{t}.failed"
os.rename(filename + '.running', new_filename)
new_errors = self.job_errs[errl:]
self.write_to_job(new_filename, new_errors)
self.last_ts = ts
if not found_jobs:
log.info(f"No new job found.")
return False
else:
return True
@staticmethod
def get_datetimestamp():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
@staticmethod
def read_job(filename):
job_file = open(filename, 'r')
job = "".join(job_file.readlines())
job_file.close()
return job
@staticmethod
def write_to_job(filename, new_lines):
job_file = open(filename, 'a+')
job_file.write("\n\n")
job_file.write("".join(new_lines))
job_file.close()
def run_job(self, job):
try:
exec(job)
plt.close('all')
except Exception as e:
log.error(f"Error in job: {job}:\n{e}")
self.job_errs.append(traceback.format_exc())
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", help="Watch directory")
parser.add_argument("--t-start", help="Starting watch time formatted as a "
"timestamp YYYYmmdd_hhMMss or string"
" 'now' (default).")
args = parser.parse_args()
if args.data_dir is not None:
a_tools.datadir = args.data_dir
if args.t_start == "now":
args.t_start = None
ad = AnalysisDaemon(t_start=args.t_start)
|
python
|
# Copyright (c) 2015 Scott Christensen
#
# This file is part of htpython modified from condorpy
#
# condorpy/htpython is free software: you can redistribute it and/or modify it under
# the terms of the BSD 2-Clause License. A copy of the BSD 2-Clause License
# should have be distributed with this file.
from collections import OrderedDict
from copy import deepcopy
import pickle, os
class Templates(object):
"""
"""
def __init__(self):
pass
def __getattribute__(self, item):
"""
:param item:
:return:
"""
attr = object.__getattribute__(self, item)
if item in object.__getattribute__(self, '__dict__'):
attr = deepcopy(attr)
return attr
def save(self, file_name=None):
if not file_name:
file_name = os.path.join(os.path.dirname(__file__), 'condorpy-saved-templates')
with open(file_name, 'wb') as file:
pickle.dump(self.__dict__, file, protocol=0)
def load(self, file_name=None):
if not file_name:
file_name = os.path.join(os.path.dirname(__file__), 'condorpy-saved-templates')
if not os.path.isfile(file_name):
return
#TODO: raise an error? log warning?
with open(file_name, 'rb') as file:
pdict = pickle.load(file)
self.__dict__.update(pdict)
def reset(self):
self.__dict__ = dict()
@property
def base(self):
base = OrderedDict()
base['job_name'] = ''
base['universe'] = ''
base['executable'] = ''
base['arguments'] = ''
base['initialdir'] = '$(job_name)'
base['logdir'] = 'logs'
base['log'] = '$(logdir)/$(job_name).$(cluster).log'
base['output'] = '$(logdir)/$(job_name).$(cluster).$(process).out'
base['error'] = '$(logdir)/$(job_name).$(cluster).$(process).err'
return base
@property
def vanilla_base(self):
vanilla_base = self.base
vanilla_base['universe'] = 'vanilla'
return vanilla_base
@property
def vanilla_transfer_files(self):
vanilla_transfer_files = self.vanilla_base
vanilla_transfer_files['transfer_executable'] = 'TRUE'
vanilla_transfer_files['should_transfer_files'] = 'YES'
vanilla_transfer_files['when_to_transfer_output'] = 'ON_EXIT_OR_EVICT'
vanilla_transfer_files['transfer_input_files'] = ''
vanilla_transfer_files['transfer_output_files'] = ''
return vanilla_transfer_files
@property
def vanilla_nfs(self):
vanilla_nfs = self.vanilla_base
return vanilla_nfs
|
python
|
#!/usr/bin/env python
# landScraper.py -v 1.7
# currently designed for python 3.10.2
# Author- David Sullivan
#
# Credit to Michael Shilov from scraping.pro/simple-email-crawler-python for the base for this code
#
# As a reminder, web scraping for the purpose of SPAM or hacking is illegal. This tool has been provided for
# legitimate testers to validate the information provided on a website that they have explicit legal right to
# scrape.
#
# Dependencies:
# -pip install requests
# -pip install bs4
#
# Revision 1.0 - 02/12/2018- Initial creation of script
# Revision 1.1 - 04/06/2018- Added in some more error handling
# Revision 1.2 - 04/18/2018- Added in ability to blacklist words in links (to avoid looping), and the ability to
# process sub-domains
# Revision 1.3 - 08/07/2018- Added in functionality to write to output as it runs, in case the script breaks
# Revision 1.4 - 01/16/2019- Added a timeout for stale requests, suppression for error messages related to SSL
# Revision 1.5 - 02/13/2019- Renamed tool to landScraper, implemented argparse, broke out functions, implemented
# automated cleanup if the program is terminated using keystrokes
# Revision 1.6 - 10/20/2021- Added a User-Agent header to get around 403 errors
# Revision 1.7 - 03/15/2022- Added 'touch' command to create output file if it doesn't exist to make it forward
# compatible with newer versions of python, updated bad link words
import re
import requests.exceptions
import argparse
from collections import deque
from urllib.parse import urlsplit
from bs4 import BeautifulSoup
# disable insecure request warning
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def manual_url():
# set starting url
starting_url = str(
input(r'Put in the full URL of where you want to start crawling (ex: http://test.local/local): '))
return starting_url
def manual_email():
# set email domain
email_domain = str(input(r'Put in the email domain you are looking for (ex: @test.local): '))
return email_domain
def manual_output():
# set output file
outfile = str(input(r'Path and filename for results output (ex: /home/test/crawled_emails.txt): '))
return outfile
def scrape(starting_url, domain_name, email_domain, outfile):
# a queue of urls to be crawled
global response
new_urls = deque([starting_url])
# a set of urls that we have already crawled
processed_urls = set()
# create an empty list for the emails variable
emails = list()
# don't include links with the following (to avoid loops)
bad_link_words = ['##', '.pdf', '.mp3', '.mp4', '.mpg', '.wav', '.jpg', '.png', '.gif', '#', '../']
# process to handle checking the bad_link_words list
def avoid_loops(links, words):
return any(word in links for word in words)
# process urls one by one until we exhaust the queue
while len(new_urls):
# noinspection PyBroadException
try:
# move next url from the queue to the set of processed urls
url = new_urls.popleft()
processed_urls.add(url)
# extract base url to resolve relative links
parts = urlsplit(url)
base_url = "{0.scheme}://{0.netloc}".format(parts)
path = url[:url.rfind('/') + 1] if '/' in parts.path else url
# get url's content
print("Processing %s" % url)
try:
response = requests.get(url, headers={'User-Agent': 'curl/7.72.0'}, verify=False, timeout=10)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
# ignore pages with errors
continue
except KeyboardInterrupt:
cleanup_list(outfile, emails, email_domain)
# extract all email addresses
new_emails = set(re.findall(r"[a-z0-9.\-+_]+@[a-z0-9.\-+_]+\.[a-z]+", response.text, re.I))
# write them to output file
f = open(outfile, 'a+')
for email in new_emails:
f.write(email + '\n')
f.close()
# noinspection PyBroadException
try:
# create a beautiful soup for the html document
soup = BeautifulSoup(response.text, "html.parser")
# find and process all the anchors in the document
for anchor in soup.find_all("a"):
# extract link url from the anchor
link = anchor.attrs["href"] if "href" in anchor.attrs else ''
# resolve relative links
if link.startswith('/'):
link = base_url + link
elif not link.startswith('http'):
link = path + link
# add the new url to the queue if it was not enqueued nor processed yet
if not (link in new_urls or link in processed_urls):
# only add the new url if not in the bad_link_words list
if not avoid_loops(link, bad_link_words):
# only add urls that are part of the domain_name
if domain_name in link:
new_urls.append(link)
except KeyboardInterrupt:
cleanup_list(outfile, emails, email_domain)
except Exception:
# if the URL is too long this can error out
continue
except KeyboardInterrupt:
cleanup_list(outfile, emails, email_domain)
except Exception:
# if some error occurs
continue
return emails
def cleanup_list(outfile, emails, email_domain):
# cleaning up output
print("Cleaning up output file")
# open the output file and import all the crawled emails
f = open(outfile, 'r')
for email in f:
email = email.replace('\n', '')
emails.append(email)
f.close()
# remove duplicates from emails
emails = set([email.lower() for email in emails])
# create a list for final_emails
final_emails = list()
# remove emails not in the set domain
for email in emails:
if email_domain in email:
final_emails.append(email)
# write output
f = open(outfile, 'w')
for email in final_emails:
f.write(email + '\n')
f.close()
# quit the script
print("Cleanup finished. Results can be found in %s" % outfile)
quit()
def main():
# parse input for variables
parser = argparse.ArgumentParser(description='LandScraper- Domain Email Scraper')
parser.add_argument('-u', '--url', help='Starting URL')
parser.add_argument('-d', '--domain', help='Domain name (if different from starting URL')
parser.add_argument('-e', '--email', help='Email Domain')
parser.add_argument('-o', '--output', help='Output file name')
args = parser.parse_args()
# select url
if args.url is not None:
url = args.url
else:
url = manual_url()
# select domain
if args.domain is not None:
domain = args.domain
else:
domain = url
# select email domain
if args.email is not None:
email = args.email
else:
email = manual_email()
# select output
if args.output is not None:
outfile = args.output
else:
outfile = manual_output()
# create outfile if it does not exist
from pathlib import Path
myfile = Path(outfile)
myfile.touch(exist_ok=True)
# run the program
emails = scrape(url, domain, email, outfile)
# run cleanup on the output file
cleanup_list(outfile, emails, email)
main()
|
python
|
"""
Dailymotion OAuth2 support.
This adds support for Dailymotion OAuth service. An application must
be registered first on dailymotion and the settings DAILYMOTION_CONSUMER_KEY
and DAILYMOTION_CONSUMER_SECRET must be defined with the corresponding
values.
User screen name is used to generate username.
By default account id is stored in extra_data field, check OAuthBackend
class for details on how to extend it.
"""
from urllib2 import HTTPError
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.utils import dsa_urlopen
from social_auth.backends import BaseOAuth2
from social_auth.backends import SocialAuthBackend
from social_auth.exceptions import AuthCanceled
# Dailymotion configuration
DAILYMOTION_SERVER = 'api.dailymotion.com'
DAILYMOTION_REQUEST_TOKEN_URL = 'https://%s/oauth/token' % DAILYMOTION_SERVER
DAILYMOTION_ACCESS_TOKEN_URL = 'https://%s/oauth/token' % DAILYMOTION_SERVER
# Note: oauth/authorize forces the user to authorize every time.
# oauth/authenticate uses their previous selection, barring revocation.
DAILYMOTION_AUTHORIZATION_URL = 'https://%s/oauth/authorize' % \
DAILYMOTION_SERVER
DAILYMOTION_CHECK_AUTH = 'https://%s/me/?access_token=' % DAILYMOTION_SERVER
class DailymotionBackend(SocialAuthBackend):
"""Dailymotion OAuth authentication backend"""
name = 'dailymotion'
EXTRA_DATA = [('id', 'id')]
def get_user_id(self, details, response):
"""Use dailymotion username as unique id"""
return details['username']
def get_user_details(self, response):
return {'username': response['screenname']}
class DailymotionAuth(BaseOAuth2):
"""Dailymotion OAuth2 authentication mechanism"""
AUTHORIZATION_URL = DAILYMOTION_AUTHORIZATION_URL
REQUEST_TOKEN_URL = DAILYMOTION_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = DAILYMOTION_ACCESS_TOKEN_URL
AUTH_BACKEND = DailymotionBackend
SETTINGS_KEY_NAME = 'DAILYMOTION_OAUTH2_KEY'
SETTINGS_SECRET_NAME = 'DAILYMOTION_OAUTH2_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
try:
data = dsa_urlopen(DAILYMOTION_CHECK_AUTH + access_token).read()
return simplejson.loads(data)
except (ValueError, HTTPError):
return None
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
if 'denied' in self.data:
raise AuthCanceled(self)
else:
return super(DailymotionAuth, self).auth_complete(*args, **kwargs)
def oauth_request(self, token, url, extra_params=None):
extra_params = extra_params or {}
return extra_params
# Backend definition
BACKENDS = {
'dailymotion': DailymotionAuth,
}
|
python
|
import sys
import urllib2
import zlib
import time
import re
import xml.dom.pulldom
import operator
import codecs
from optparse import OptionParser
nDataBytes, nRawBytes, nRecoveries, maxRecoveries = 0, 0, 0, 3
def getFile(serverString, command, verbose=1, sleepTime=0):
global nRecoveries, nDataBytes, nRawBytes
if sleepTime:
time.sleep(sleepTime)
remoteAddr = serverString + '?verb=%s' % command
if verbose:
print "\r", "Fetching set list ...'%s'" % remoteAddr[-90:]
headers = {'User-Agent': 'OAIHarvester/2.0', 'Accept': 'text/html', 'Accept-Encoding': 'compress, deflate'}
try:
remoteData = urllib2.urlopen(remoteAddr).read()
except urllib2.HTTPError, exValue:
if exValue.code == 503:
retryWait = int(exValue.hdrs.get("Retry-After", "-1"))
if retryWait < 0:
return None
print 'Waiting %d seconds' % retryWait
return getFile(serverString, command, 0, retryWait)
print exValue
if nRecoveries < maxRecoveries:
nRecoveries += 1
return getFile(serverString, command, 1, 60)
return
nRawBytes += len(remoteData)
try:
remoteData = zlib.decompressobj().decompress(remoteData)
except:
pass
nDataBytes += len(remoteData)
mo = re.search('<error *code=\"([^"]*)">(.*)</error>', remoteData)
if mo:
print "OAIERROR code=%s '%s'" % (mo.group(1), mo.group(2))
else:
return remoteData
if __name__ == "__main__":
serverString = 'http://fsu.digital.flvc.org/oai2'
outFileName = 'assets/setSpec.xml'
print "Writing records to %s from %s" % (outFileName, serverString)
ofile = codecs.lookup('utf-8')[-1](file(outFileName, 'wb'))
ofile.write('<repository xmlns:oai_dc="http://www.openarichives.org/OAI/2.0/oai_dc/" \
xmlns:dc="http://purl.org/dc/elements/1.1/" \
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n')
data = getFile(serverString, 'ListSets')
recordCount = 0
while data:
events = xml.dom.pulldom.parseString(data)
for (event, node) in events:
if event == "START_ELEMENT" and node.tagName == 'set':
events.expandNode(node)
node.writexml(ofile)
recordCount += 1
mo = re.search('resumptionToken[^>]*>(.*)</resumptionToken>', data)
if not mo:
break
data = getFile(serverString, "ListSets&resumptionToken=%s" % mo.group(1))
ofile.write('\n</repository>\n'), ofile.close()
print "\nRead %d bytes (%.2f compression)" % (nDataBytes, float(nDataBytes) / nRawBytes)
print "Wrote out %d records" % recordCount
|
python
|
from WonderPy.core.wwConstants import WWRobotConstants
from .wwSensorBase import WWSensorBase
_rcv = WWRobotConstants.RobotComponentValues
_expected_json_fields = (
_rcv.WW_SENSOR_VALUE_DISTANCE,
)
class WWSensorWheel(WWSensorBase):
def __init__(self, robot):
super(WWSensorWheel, self).__init__(robot)
self._distance_raw = None
self._distance_reference = None
@property
def distance(self):
return self._distance_raw - self._distance_reference
def _important_field_names(self):
return 'distance',
def parse(self, single_component_dictionary):
if not self.check_fields_exist(single_component_dictionary, _expected_json_fields):
return
# todo: handle wrap at about +/-9000cm.
self._distance_raw = single_component_dictionary[_rcv.WW_SENSOR_VALUE_DISTANCE]
if self._distance_reference is None:
self.tare()
self._valid = True
def tare(self):
"""
Reset the reference distance
"""
self._distance_reference = self._distance_raw
|
python
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from os import chdir
from subprocess import run
# Bokeh imports
from bokeh._testing.util.project import TOP_PATH, ls_files
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_flake8_bokeh() -> None:
flake8("bokeh")
def test_flake8_examples() -> None:
flake8("examples")
def test_flake8_release() -> None:
flake8("release")
def test_flake8_sphinx() -> None:
flake8("sphinx")
def test_flake8_tests() -> None:
flake8("tests")
def test_flake8_typings() -> None:
flake8("typings")
#-----------------------------------------------------------------------------
# Support
#-----------------------------------------------------------------------------
def flake8(dir: str) -> None:
''' Assures that the Python codebase passes configured Flake8 checks.
'''
chdir(TOP_PATH)
proc = run(["flake8", *ls_files(f"{dir}/**.py", f"{dir}/**.pyi")], capture_output=True)
assert proc.returncode == 0, f"Flake8 issues:\n{proc.stdout.decode('utf-8')}"
|
python
|
from ..check import Check
from ..exceptions import CheckError
def is_number(check_obj):
try:
assert isinstance(check_obj._val, check_obj.NUMERIC_TYPES)
return check_obj
except AssertionError:
raise CheckError('{} is not a number'.format(check_obj._val))
def is_not_number(check_obj):
try:
assert not isinstance(check_obj._val, check_obj.NUMERIC_TYPES)
return check_obj
except AssertionError:
raise CheckError('{} is a number'.format(check_obj._val))
def is_integer(check_obj):
try:
assert isinstance(check_obj._val, int)
return check_obj
except AssertionError:
raise CheckError('{} is not integer'.format(check_obj._val))
def is_not_integer(check_obj):
try:
assert not isinstance(check_obj._val, int)
return check_obj
except AssertionError:
raise CheckError('{} is integer'.format(check_obj._val))
def is_float(check_obj):
try:
assert isinstance(check_obj._val, float)
return check_obj
except AssertionError:
raise CheckError('{} is not float'.format(check_obj._val))
def is_not_float(check_obj):
try:
assert not isinstance(check_obj._val, float)
return check_obj
except AssertionError:
raise CheckError('{} is float'.format(check_obj._val))
def is_real(check_obj):
check_obj.is_number()
try:
assert not isinstance(check_obj._val, complex)
return check_obj
except AssertionError:
raise CheckError('{} is not real'.format(check_obj._val))
def is_not_real(check_obj):
check_obj.is_number()
try:
assert isinstance(check_obj._val, complex)
return check_obj
except AssertionError:
raise CheckError('{} is real'.format(check_obj._val))
def is_complex(check_obj):
try:
assert isinstance(check_obj._val, complex)
return check_obj
except AssertionError:
raise CheckError('{} is not complex'.format(check_obj._val))
def is_not_complex(check_obj):
try:
assert not isinstance(check_obj._val, complex)
return check_obj
except AssertionError:
raise CheckError('{} is complex'.format(check_obj._val))
def is_positive(check_obj):
check_obj.is_real()
try:
assert float(check_obj._val) > 0.
return check_obj
except AssertionError:
raise CheckError('{} is zero or negative'.format(check_obj._val))
def is_not_positive(check_obj):
check_obj.is_real()
try:
assert float(check_obj._val) <= 0
return check_obj
except AssertionError:
raise CheckError('{} is positive'.format(check_obj._val))
def is_negative(check_obj):
check_obj.is_real()
try:
assert float(check_obj._val) < 0.
return check_obj
except AssertionError:
raise CheckError('{} is zero or positive'.format(check_obj._val))
def is_not_negative(check_obj):
check_obj.is_real()
try:
assert float(check_obj._val) >= 0
return check_obj
except AssertionError:
raise CheckError('{} is negative'.format(check_obj._val))
def is_zero(check_obj):
check_obj.is_real()
try:
assert float(check_obj._val) == 0.
return check_obj
except AssertionError:
raise CheckError('{} is non-zero'.format(check_obj._val))
def is_not_zero(check_obj):
check_obj.is_real()
try:
assert float(check_obj._val) != 0.
return check_obj
except AssertionError:
raise CheckError('{} is non-zero'.format(check_obj._val))
def is_at_least(check_obj, lower):
check_obj.is_real()
Check(lower).is_real()
try:
assert float(check_obj._val) >= float(lower)
return check_obj
except AssertionError:
raise CheckError('{} is smaller than {}'.format(check_obj._val, lower))
def is_at_most(check_obj, upper):
check_obj.is_real()
Check(upper).is_real()
try:
assert float(check_obj._val) <= float(upper)
return check_obj
except AssertionError:
raise CheckError('{} is bigger than {}'.format(check_obj._val, upper))
def is_between(check_obj, lower, upper):
check_obj.is_real()
Check(lower).is_real()
Check(upper).is_real()
check_obj.is_at_least(lower).is_at_most(upper)
return check_obj
def is_not_between(check_obj, lower, upper):
check_obj.is_real()
Check(lower).is_real()
Check(upper).is_real()
try:
assert float(check_obj._val) <= lower or float(check_obj._val) >= upper
return check_obj
except AssertionError:
raise CheckError('{} is between {} and {}'.format(check_obj._val, lower, upper))
|
python
|
import os
import json
import time
import torch
from torch import optim
import models
from utils import reduce_lr, stop_early, since
_optimizer_kinds = {'Adam': optim.Adam,
'SGD': optim.SGD}
class SimpleLoader:
def initialize_args(self, **kwargs):
for key, val in kwargs.items():
self.params_dict[key] = val
for key, val in self.params_dict.items():
setattr(self, key, val)
class SimpleTrainingLoop(SimpleLoader):
def __init__(self, model, device, **kwargs):
self.params_dict = {
'optimizer': 'Adam',
'learning_rate': 1e-3,
'num_epochs': 40,
'verbose': True,
'use_early_stopping': True,
'early_stopping_loss': 0,
'cooldown': 0,
'num_epochs_early_stopping': 10,
'delta_early_stopping': 1e-4,
'learning_rate_lower_bound': 1e-6,
'learning_rate_scale': 0.5,
'num_epochs_reduce_lr': 4,
'num_epochs_cooldown': 8,
'use_model_checkpoint': True,
'model_checkpoint_period': 1,
'start_epoch': 0,
'gradient_clip': None,
'warmup_initial_scale': 0.2,
'warmup_num_epochs': 2,
'loss_weights': [1, 1, 0.2],
}
super(SimpleTrainingLoop, self).initialize_args(**kwargs)
self.device = device
if isinstance(model, str):
# In this case it's just a path to a previously stored model
modelsubdirs = sorted([int(_) for _ in os.listdir(model) if _.isnumeric()], reverse=True)
if modelsubdirs:
for modelsubdir in modelsubdirs:
modelsubdir_str = str(modelsubdir)
modeldir = os.path.join(model, modelsubdir_str)
if os.path.isfile(os.path.join(modeldir, 'loss_history.json')):
self.load_from_dir(modeldir)
print("Resuming from epoch {}".format(modelsubdir))
break
else:
raise FileNotFoundError(os.path.join(model, '{{nnetdir}}', 'loss_history.json'))
else:
raise FileNotFoundError(os.path.join(model, '{{nnetdir}}'))
else:
self.model = model
_opt = _optimizer_kinds[self.params_dict['optimizer']]
self.optimizer = _opt(self.model.parameters(),
lr=self.params_dict['learning_rate'])
self.loss_history = {}
def train_one_epoch(self, criterion, data_loaders, phases=('train', 'test')):
epoch_beg = time.time()
self.model.to(self.device)
dataset_sizes = {x: len(data_loaders[x].dataset)
for x in phases}
batch_sizes = {x: data_loaders[x].batch_size for x in phases}
try:
m = (1 - self.params_dict['warmup_initial_scale'])/(self.params_dict['warmup_num_epochs'] - 1)
c = self.params_dict['warmup_initial_scale']
except ZeroDivisionError:
m = 1
c = 0
lr_scale = m * self.elapsed_epochs() + c
if self.elapsed_epochs() < self.params_dict['warmup_num_epochs']:
self.optimizer.param_groups[0]['lr'] *= lr_scale
self.loss_history[str(self.elapsed_epochs())] = {}
print('Epoch {}/{} - lr={}'.format(self.elapsed_epochs(), self.params_dict['num_epochs'],
self.optimizer.param_groups[0]['lr'])
)
for phase in phases:
print('\t{} '.format(phase.title()), end='')
phase_beg = time.time()
if phase == 'train':
self.model.train()
else:
self.model.eval()
running_loss = 0.
running_dist_sum = 0.
running_count = 0.
running_count_per_class = torch.zeros(self.model.num_centroids)
for batch_no, batch_data in enumerate(data_loaders[phase]):
self.optimizer.zero_grad()
data_batch, label_batch, speaker_ids = batch_data
data_batch = data_batch.to(self.device)
label_batch = label_batch.to(self.device)
speaker_ids = speaker_ids.to(self.device)
with torch.set_grad_enabled(phase == 'train'):
encoded = self.model.encoder(data_batch).contiguous()
quantized, alignment = self.model.quantize(encoded, return_alignment=True)
count_per_class = torch.tensor([(alignment == i).sum()
for i in range(self.model.num_centroids)])
running_count_per_class += count_per_class
output = self.model.decoder(quantized)
predicted_centroids = self.model.centroids[alignment]
encoded = encoded.view(-1, encoded.size(-1))
new_length = output.size(1)
old_length = label_batch.size(1)
# Try to correct slight mismatches from down-sampling and up-sampling.
# Be careful in case of down-sampling without up-sampling
if old_length > new_length:
label_batch = label_batch[:, :new_length]
elif new_length > old_length:
output = output[:, :old_length]
distance_loss = criterion['dis_loss'](encoded, predicted_centroids.detach())
commitment_loss = criterion['com_loss'](predicted_centroids, encoded.detach())
reconstruction_loss = criterion['rec_loss'](output, label_batch)
if self.model.use_ma:
total_loss = reconstruction_loss + distance_loss
else:
total_loss = reconstruction_loss + distance_loss + commitment_loss
# Not part of the graph, just metrics to be monitored
mask = (label_batch != data_loaders[phase].dataset.pad_value).float()
numel = 1 # torch.sum(mask).item()
running_loss += reconstruction_loss.item()
running_dist_sum += distance_loss.item()
running_count += numel
class0_sum = running_dist_sum / running_count
px = running_count_per_class / running_count_per_class.sum()
px.clamp_min_(1e-20)
entropy = -(px * torch.log2(px)).sum().item()
if phase == 'train':
total_loss.backward()
if self.params_dict['gradient_clip']:
torch.nn.utils.clip_grad_norm_(self.model.parameters(),
self.params_dict['gradient_clip'])
self.optimizer.step()
phase_elapse = since(phase_beg)
eta = int(phase_elapse
* (dataset_sizes[phase] // batch_sizes[phase]
- batch_no - 1)
/ (batch_no + 1))
if self.params_dict['verbose']:
print('\r\t{} batch: {}/{} batches - ETA: {}s - loss: {:.4f} - dist: {:.4f} - '
'entropy: {:.4f}'.format(phase.title(),
batch_no + 1,
dataset_sizes[phase] // batch_sizes[phase] + 1,
eta, running_loss/running_count,
class0_sum, entropy
), end='')
epoch_loss = running_loss/running_count
print(" - loss: {:.4f} - dist: {:.4f} - entropy: {:.4f}".format(epoch_loss, class0_sum,
entropy))
self.loss_history[str(self.elapsed_epochs() - 1)][phase] = (epoch_loss,
class0_sum, entropy,
running_count)
print('\tTime: {}s'.format(int(since(epoch_beg))))
if self.elapsed_epochs() <= self.params_dict['warmup_num_epochs']:
self.optimizer.param_groups[0]['lr'] /= lr_scale
def train(self, outdir, criterion, data_loaders,
phases=('train', 'test'),
job_num_epochs=None,
**kwargs):
if not job_num_epochs:
job_num_epochs = self.params_dict['num_epochs']
for i in range(self.params_dict['num_epochs']):
self.train_one_epoch(criterion, data_loaders, phases, **kwargs)
if self.params_dict['use_model_checkpoint']\
and (self.elapsed_epochs() % self.params_dict['model_checkpoint_period'] == 0):
self.save_to_dir(os.path.join(outdir,
str(self.elapsed_epochs())))
history_sum = [self.loss_history[str(_)][phases[-1]][self.params_dict['early_stopping_loss']]
for _ in range(self.elapsed_epochs())]
if history_sum[-1] == min(history_sum):
self.save_to_dir(os.path.join(outdir, 'best'))
rl = reduce_lr(history=history_sum,
lr=self.optimizer.param_groups[0]['lr'],
cooldown=self.params_dict['cooldown'],
patience=self.params_dict['num_epochs_reduce_lr'],
mode='min',
difference=self.params_dict['delta_early_stopping'],
lr_scale=self.params_dict['learning_rate_scale'],
lr_min=self.params_dict['learning_rate_lower_bound'],
cool_down_patience=self.params_dict['num_epochs_cooldown'])
self.optimizer.param_groups[0]['lr'], self.params_dict['cooldown'] = rl
if self.params_dict['use_early_stopping']:
if stop_early(history_sum,
patience=self.params_dict['num_epochs_early_stopping'],
mode='min',
difference=self.params_dict['delta_early_stopping']):
print('Stopping Early.')
break
if self.elapsed_epochs() >= self.params_dict['num_epochs']:
break
if i >= job_num_epochs:
return
with open(os.path.join(outdir, '.done.train'), 'w') as _w:
pass
def elapsed_epochs(self):
return len(self.loss_history)
def load_from_dir(self, trainer_dir, model_kind=models.VQVAE):
if os.path.isfile(os.path.join(trainer_dir, 'nnet_kind.txt')):
model_classname = open(os.path.join(trainer_dir, 'nnet_kind.txt')).read().strip()
model_kind = models.get_model(model_classname)
self.model = model_kind.load_from_dir(trainer_dir)
self.model.to(self.device)
with open(os.path.join(trainer_dir, 'optimizer.txt')) as _opt:
opt_name = _opt.read()
_opt = _optimizer_kinds[opt_name]
self.optimizer = _opt(self.model.parameters(), lr=1e-3)
self.optimizer.load_state_dict(torch.load(
os.path.join(trainer_dir, 'optimizer.state'))
)
jsonfile = os.path.join(trainer_dir, 'trainer.json')
with open(jsonfile) as _json:
self.params_dict = json.load(_json)
jsonfile = os.path.join(trainer_dir, 'loss_history.json')
with open(jsonfile) as _json:
self.loss_history = json.load(_json)
def save_to_dir(self, trainer_dir):
if not os.path.isdir(trainer_dir):
os.makedirs(trainer_dir)
self.model.save(trainer_dir)
opt_name = str(self.optimizer).split()[0]
with open(os.path.join(trainer_dir, 'optimizer.txt'), 'w') as _opt:
_opt.write(opt_name)
torch.save(self.optimizer.state_dict(),
os.path.join(trainer_dir, 'optimizer.state')
)
with open(os.path.join(trainer_dir, 'trainer.json'), 'w') as _json:
json.dump(self.params_dict, _json)
with open(os.path.join(trainer_dir, 'loss_history.json'), 'w') as _json:
json.dump(self.loss_history, _json)
_trainers = {'Simple': SimpleTrainingLoop}
def get_trainer(trainer_name):
return _trainers[trainer_name]
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of File Dedupe
# Copyright (C) 2015 Lars Holm Nielsen.
#
# File Dedupe is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Small utility for detecting duplicate files."""
import os
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
"""Integration of PyTest with setuptools."""
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
"""Initialize options."""
TestCommand.initialize_options(self)
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
config = ConfigParser()
config.read("pytest.ini")
self.pytest_args = config.get("pytest", "addopts").split(" ")
def finalize_options(self):
"""Finalize options."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Run tests."""
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Get the version string. Cannot be done with import!
with open(os.path.join('filededupe', 'version.py'), 'rt') as f:
version = re.search(
'__version__\s*=\s*"(?P<version>.*)"\n',
f.read()
).group('version')
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('CHANGES.rst') as history_file:
history = history_file.read().replace('.. :changes:', '')
requirements = [
'fs>=0.5.0',
'click>=5.0',
]
extras_requirements = {}
test_requirements = [
'pytest-cache>=1.0',
'pytest-cov>=2.0.0',
'pytest-isort>=0.1.0',
'pytest-pep8>=1.0.6',
'pytest>=2.8.0',
'coverage>=4.0',
]
setup(
name='filededupe',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
author="Lars Holm Nielsen",
author_email='[email protected]',
url='https://github.com/lnielsen/filededupe',
packages=[
'filededupe',
],
include_package_data=True,
install_requires=requirements,
extras_require=extras_requirements,
license="BSD",
zip_safe=False,
keywords='file deduplication',
entry_points={
'console_scripts': [
"filededupe=filededupe.cli:cli"
],
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
tests_require=test_requirements,
cmdclass={'test': PyTest},
)
|
python
|
#!/usr/bin/env python3
from bs4 import BeautifulSoup as bf
import requests
import json
import random
import webbrowser
import os
import urllib
import time
if not os.path.exists('images'):
os.mkdir('images')
ls = []
def huluxia(id=250):
_key = '6BD0D690D176C706DA83A5D9222E52AEF3708C7537DC1E7AEA069811D93CF42CCDEC8CFE102FF3B77D8737F7BF103B3F19FBAB7F2E14C120'
hlx = requests.get(
url = 'http://floor.huluxia.com/user/info/ANDROID/2.1',
headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh;'
' Intel Mac OS X 10_14_2) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
},
params = {
'_key': _key,
'user_id': id
}
)
hlx.encoding = 'utf-8'
hlx_json = json.loads(hlx.content)
ls.append(hlx_json['avatar'])
#urllib.request.urlretrieve(hlx_json['avatar'],'./images/%s%s.jpg' % (hlx_json['nick'],hlx_json['userID']))
time.sleep(2)
print('成功! %s' % id)
for i in range(1,100):
huluxia(i)
|
python
|
import rawTAspectrum
import tkinter as tk
from tkinter import ttk
from tkinter import Entry, filedialog, messagebox
from threading import Thread
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from tkinter.filedialog import asksaveasfile
import os
from tkinter.ttk import Button
class Project(tk.Frame):
def __init__(self,parent):
#initialize GUI variables
tk.Frame.__init__(self,parent)
#class variables
self.tabCount =1
self.tabPanel = ttk.Notebook(self)
self.tabPanel.pack(fill="both", expand=True)
myMenu = tk.Menu(self)
parent.config(menu=myMenu)
#add menu Items
menu1 = tk.Menu(myMenu)
myMenu.add_cascade(label="Start", menu=menu1)
menu1.add_command(label="Select a file", command=self.open_file)
menu1.add_command(label="Reset", command=self.reset)
# menu1.add_command(label="Save a file", command=self.save_image)
# menu2 = tk.Menu(myMenu)
# myMenu.add_cascade(label="Menu 2", menu=menu2)
#menu2.add_command(label="Menu Item", command=self.emptyCommand)
#menu2.add_command(label="Menu Item", command=self.emptyCommand)
def create_tab(self):
# created new tabs
self.tab = tk.Frame(self.tabPanel)
self.tab.pack(fill="both")
self.tabPanel.add(self.tab,text="Tab" + str(self.tabCount))
self.tabCount = self.tabCount+1
def create_graph1(self):
# created new tabs
self.tab = tk.Frame(self.tabPanel)
self.tab.columnconfigure(0,weight=0)
self.tab.columnconfigure(1,weight=1)
self.tab.columnconfigure(2,weight=1)
self.tab.rowconfigure(0,weight=1)
self.tab.pack(fill="both")
self.tabPanel.add(self.tab,text="Tab" + str(self.tabCount))
self.tabCount = self.tabCount+1
self.controlPane = tk.Frame(self.tab)
self.controlPane.grid(column=0,row=0)
self.label1 = tk.Label(self.controlPane, text="Wlax1")
self.label1.pack()
self.entry1 = Entry(self.controlPane)
self.entry1.pack()
self.label2 = tk.Label(self.controlPane, text="Wlax2")
self.label2.pack()
self.entry2 = Entry(self.controlPane)
self.entry2.pack()
self.label3 = tk.Label(self.controlPane, text="Taxcp")
self.label3.pack()
self.entry3 = Entry(self.controlPane)
self.entry3.pack()
btn_apply = Button(self.controlPane, text="truncate", command=self.apply_button_action)
btn_apply.pack()
self.graph1 = tk.Frame(self.tab)
self.graph2 = tk.Frame(self.tab)
self.graph1.grid(column=1,row=0,sticky="nsew")
self.graph2.grid(column=2,row=0,sticky="nsew")
def apply_button_action(self):
controlThread = Thread(target=self.truncate_graph_thread_exec, daemon = True)
controlThread.start()
def truncate_graph_thread_exec(self):
self.fig3 = rawTAspectrum.load_truncated_chart(int(self.entry1.get()), int(self.entry2.get()), int(self.entry3.get()))
self.fig4 = rawTAspectrum.fourier_transform()
self.fig5 = rawTAspectrum.oscillation_freq_axis()
self.after(0,self.create_truncation_tab)
def create_truncation_tab(self):
self.update()
self.truncTab = tk.Frame(self.tabPanel)
self.truncTab.columnconfigure(0,weight=1)
self.truncTab.columnconfigure(1,weight=1)
self.truncTab.rowconfigure(0,weight=1)
self.truncTab.rowconfigure(1,weight=1)
self.tabPanel.add(self.truncTab,text="Tab" + str(self.tabCount))
self.tabCount = self.tabCount+1
self.graph3 = tk.Frame(self.truncTab)
self.graph3.grid(column=0,row=0,sticky="nsew")
self.graph4 = tk.Frame(self.truncTab)
self.graph4.grid(column=1,row=0,sticky="nsew")
self.graph5 = tk.Frame(self.truncTab)
self.graph5.grid(column=0,row=1,sticky="nsew")
self.freqPane = tk.Frame(self.truncTab)
self.freqPane.grid(column=1,row=1,sticky="nsew")
self.frequencySlider = tk.Scale(self.freqPane, from_=1,to=20, orient=tk.HORIZONTAL, label="frequency")
self.frequencySlider.pack()
btn_apply = Button(self.freqPane, text="truncate", command=self.start_freq_graph)
btn_apply.pack()
canvas = FigureCanvasTkAgg(self.fig3, master=self.graph3)
toolbar = NavigationToolbar2Tk(canvas, self.graph3)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas = FigureCanvasTkAgg(self.fig4, master=self.graph4)
toolbar = NavigationToolbar2Tk(canvas, self.graph4)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.draw()
canvas = FigureCanvasTkAgg(self.fig5, master=self.graph5)
toolbar = NavigationToolbar2Tk(canvas, self.graph5)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.draw()
self.update()
def start_freq_graph(self):
controlThread = Thread(target=self.generate_freq_graph, daemon = True)
controlThread.start()
def generate_freq_graph(self):
self.fig6 = rawTAspectrum.frequency_graph(self.frequencySlider.get())
print(self.frequencySlider.get())
self.after(0,self.create_freq_tab)
def create_freq_tab(self):
self.update()
self.freqTab = tk.Frame(self.tabPanel)
self.tabPanel.add(self.freqTab,text="Tab" + str(self.tabCount))
self.tabCount = self.tabCount+1
self.graph6 = tk.Frame(self.freqTab)
self.graph6.pack()
canvas = FigureCanvasTkAgg(self.fig6, master=self.graph6)
toolbar = NavigationToolbar2Tk(canvas, self.graph6)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.draw()
self.update()
#opens data file and funs analysis
def open_file(self):
self.fileName = filedialog.askopenfilename(initialdir="/C:", title="Select a File", filetypes=(("DAT files", "*.dat"),("All files", "*.*")))
#rawTAspectrum.load_chart(self.fileName)
controlThread = Thread(target=self.dat_file_thread_exec, daemon=True)
self.progressBar = ttk.Progressbar(root,orient=tk.HORIZONTAL,length=200,mode="indeterminate",takefocus=True,maximum=100)
controlThread.start()
self.progressBar.start()
self.progressBar.pack()
def get_tab_count(self):
return self.tabCount
def dat_file_thread_exec(self):
#variable for progress bar on GUI
self.update()
rawTAspectrum.load_data_file(self.fileName)
self.fig1 = rawTAspectrum.load_raw_data()
self.fig2 = rawTAspectrum.load_data_without_axes()
self.after(0,self.draw_plot)
def draw_plot(self):
self.progressBar.stop()
self.progressBar.destroy()
self.create_graph1()
canvas = FigureCanvasTkAgg(self.fig1, master=self.graph1)
canvas2 = FigureCanvasTkAgg(self.fig2,master=self.graph2)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas2.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self.graph1)
toolbar2 = NavigationToolbar2Tk(canvas2, self.graph2)
toolbar.update()
toolbar2.update()
canvas.draw()
canvas2.draw()
self.update()
def reset(self):
self.tabCount = 1
self.tabPanel.destroy()
self.tabPanel.destroy() # clears out the tab panel
self.tabPanel = ttk.Notebook(self) # resetting the tab panel
self.tabPanel.pack(fill="both", expand=True)
# def save_image(self):
# files = [("All files", "*.*" ),
# ("Python files", "*.py"),
# ("Text document", "*.txt"),
# ("Image files", "*.png")]
# file = asksaveasfile(filetypes = files, defaultextension = '.png')
if __name__ == "__main__":
root = tk.Tk()
root.title('Laser Noise Analysis App')
root.geometry("1000x700")
Project(root).pack(fill="both", expand=True)
root.mainloop()
|
python
|
from django.db import models
import reversion
class Dois(models.Model):
texto = models.TextField(blank=False)
versao = models.PositiveIntegerField(default=1)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
# if self.id:
# anterior = Dois.objects.get(id=self.id)
# self.versao = anterior.versao + 1
super().save(force_insert, force_update, using, update_fields)
class Meta:
ordering = ['-id']
verbose_name_plural = 'Varios "DOIS"'
reversion.register(Dois)
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("component", views.component, name="component"),
path("page1", views.page1, name="page1"),
path("page2", views.page2, name="page2"),
]
|
python
|
from pywikiapi import wikipedia
from helpers import clean_api, chunker
import os, pymysql, json, re
# Connect to English Wikipedia
class WikidataAPI:
site = None
def __init__(self):
self.site = wikipedia('www', 'wikidata')
def get_item_data(self, wd_items, raw=False, attributes = ['sitelinks', 'claims'], claim_props=[]):
retMap = {}
for batch in chunker(wd_items, 49):
res = self.site('wbgetentities', ids=batch, props='|'.join(attributes))
for entity in res.get('entities'):
data = res.get('entities').get(entity)
tmp_data = {}
for attr in attributes:
if attr == 'sitelinks':
sitelinks = {f:data.get(attr).get(f).get('title') for f in data.get(attr)}
data.update({'sitelinks': sitelinks})
if attr == 'claims':
claims = clean_api(data.get(attr))
data.update({'claims': claims})
#print(data.get('type'))
#parsed_data = clean_api(data) if raw and 'claims' else data
retMap.update({entity: data})
return retMap
|
python
|
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab 2
# Copyright 2016, 2017 juga (juga at riseup dot net), MIT license.
version = "0.8.5"
|
python
|
# SPDX-License-Identifier: MIT
"""Views in the context of rendering and compilation of layouts."""
# Python imports
from datetime import date
from logging import getLogger
# Django imports
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.module_loading import import_string
from django.views.generic.base import ContextMixin, View
# app imports
from calingen.exceptions import CalingenException
from calingen.forms.generation import LayoutSelectionForm
from calingen.views.generic import RequestEnabledFormView
from calingen.views.mixins import AllCalendarEntriesMixin, RestrictToUserMixin
# get a module level logger
logger = getLogger(__name__)
class CompilerView(
LoginRequiredMixin, RestrictToUserMixin, AllCalendarEntriesMixin, ContextMixin, View
):
"""Render the selected layout and pass the result to a compiler."""
class NoLayoutSelectedException(CalingenException):
"""Raised if there is no selected layout in the user's ``Session``."""
def get(self, *args, **kwargs):
"""Render the selected layout and call the compiler on the result.
The actual response to the GET request is provided by the implementation
of :meth:`calingen.interfaces.plugin_api.CompilerProvider.get_response`.
Notes
-----
If there is no selected layout in the user's ``Session``, a redirect to
:class:`calingen.views.generation.LayoutSelectionView` is performed.
The method retrieves the
:class:`compiler instance <calingen.interfaces.plugin_api.CompilerProvider>`
from the project's settings module
(:attr:`~calingen.settings.CALINGEN_COMPILER`). It will resort to the
configured ``"default"`` compiler, if no specific compiler for the
selected ``layout_type`` (as defined by the implementation of
:class:`~calingen.interfaces.plugin_api.LayoutProvider`) is set or if
the specified compiler can not be imported. In that case a log message
(of level warn) is emitted.
"""
try:
layout = self._get_layout()
except self.NoLayoutSelectedException:
return redirect("calingen:layout-selection")
render_context = self._prepare_context(*args, **kwargs)
rendered_source = layout.render(render_context)
try:
compiler = import_string(settings.CALINGEN_COMPILER[layout.layout_type])
except KeyError:
compiler = import_string(settings.CALINGEN_COMPILER["default"])
except ImportError:
logger.warn(
"Could not import {}, using default compiler".format(
settings.CALINGEN_COMPILER[layout.layout_type]
)
)
compiler = import_string(settings.CALINGEN_COMPILER["default"])
return compiler.get_response(rendered_source, layout_type=layout.layout_type)
def _get_layout(self):
"""Return the :class:`~calingen.interfaces.plugin_api.LayoutProvider` implementation.
Notes
-----
If there is no selected layout in the user's ``Session``, a custom
exception will cause a redirect to the user's profile overview.
"""
selected_layout = self.request.session.pop("selected_layout", None)
if selected_layout is None:
# This is most likely an edge case: The view is accessed with a
# GET request without a selected layout stored in the user's session.
# This could be caused by directly calling this view's url.
# Just redirect to the layout selection.
raise self.NoLayoutSelectedException()
return import_string(selected_layout)
def _prepare_context(self, *args, **kwargs):
"""Prepare the context passed to the layout's rendering method.
Notes
-----
The ``context`` that is passed to the layout's ``render()`` method
contains the following ``keys``:
- ``target_year``: The year to create the layout for.
- ``layout_configuration``: If the layout provides a custom
implementation of :class:`calingen.forms.generation.LayoutConfigurationForm`,
the fetched values will be provided here.
- ``entries``: All calendar entries of the user's profile, resolved to
the ``target_year``, provided as a
:class:`calingen.interfaces.data_exchange.CalendarEntryList` object.
"""
target_year = self.request.session.pop("target_year", date.today().year)
layout_configuration = self.request.session.pop("layout_configuration", None)
return self.get_context_data(
target_year=target_year, layout_configuration=layout_configuration, **kwargs
)
class LayoutConfigurationView(LoginRequiredMixin, RequestEnabledFormView):
"""Show the (optional) configuration form for the selected layout.
Warnings
--------
This view is not restricted to users with a
:class:`Calingen Profile <calingen.models.profile.Profile>` and can be
accessed by any user of the project.
However, on actual generation and compilation of the output, a
``Profile`` is required.
Notes
-----
This is just the view to show and process the layout's implementation of
:class:`calingen.forms.generation.LayoutConfigurationForm`.
"""
template_name = "calingen/layout_configuration.html"
success_url = reverse_lazy("calingen:compilation")
class NoConfigurationFormException(CalingenException):
"""Raised if the selected layout does not have a ``configuration_form``."""
class NoLayoutSelectedException(CalingenException):
"""Raised if there is no selected layout in the user's ``Session``."""
def form_valid(self, form):
"""Trigger saving of the configuration values in the user's ``Session``."""
form.save_configuration()
return super().form_valid(form)
def get(self, request, *args, **kwargs):
"""Handle a GET request to the view.
While processing the request, it is determined if the selected
implementation of :class:`calingen.interfaces.plugin_api.LayoutProvider`
uses a custon ``configuration_form``.
If no custom configuration is implemented by the layout, the request
is redirected to the generator.
Notes
-----
Determining the ``configuration_form`` is done implicitly while
traversing the view's hierarchy during processing the request. Several
methods are involved, but at some point
:meth:`~calingen.views.generation.LayoutConfigurationView.get_form_class` is
called, which will raise an exceptions that is handled here.
If there is no selected layout in the user's ``Session``, a redirect to
:class:`calingen.views.generation.LayoutSelectionView` is performed.
"""
try:
return super().get(request, *args, **kwargs)
except self.NoConfigurationFormException:
# As no layout specific configuration form is required, directly
# redirect to the compilation.
return redirect("calingen:compilation")
except self.NoLayoutSelectedException:
# This is most likely an edge case: The view is accessed with a
# GET request without a selected layout stored in the user's session.
# This could be caused by directly calling this view's url.
# Just redirect to the layout selection.
return redirect("calingen:layout-selection")
def get_form_class(self):
"""Retrieve the layout's configuration form.
Notes
-----
Implementations of :class:`calingen.interfaces.plugin_api.LayoutProvider`
may provide a class attribute ``configuration_form`` with a subclass of
:class:`calingen.forms.generation.LayoutConfigurationForm`.
If ``configuration_form`` is omitted, a custom exception is raised, that
will be handled in
:meth:`~calingen.views.generation.LayoutConfigurationView.get`.
If there is no selected layout in the user's ``Session``, a different
custom exception will cause a redirect to the user's profile overview.
"""
selected_layout = self.request.session.get("selected_layout", None)
if selected_layout is None:
raise self.NoLayoutSelectedException()
layout = import_string(selected_layout)
if layout.configuration_form is None:
raise self.NoConfigurationFormException()
return layout.configuration_form
class LayoutSelectionView(LoginRequiredMixin, RequestEnabledFormView):
"""Provide a list of availabe layouts.
Warnings
--------
This view is not restricted to users with a
:class:`Calingen Profile <calingen.models.profile.Profile>` and can be
accessed by any user of the project.
However, on actual generation and compilation of the output, a
``Profile`` is required.
Notes
-----
This is just the view to show and process the
:class:`calingen.forms.generation.LayoutSelectionForm`.
Relevant logic, that affects the actual creation, rendering and compilation
of layouts is provided in the corresponding
:class:`~django.forms.Form` instance.
"""
template_name = "calingen/layout_selection.html"
form_class = LayoutSelectionForm
success_url = reverse_lazy("calingen:layout-configuration")
def form_valid(self, form):
"""Trigger saving of the selected value in the user's ``Session``."""
form.save_selection()
return super().form_valid(form)
|
python
|
from tabulate import tabulate
from ..helpers.resource_matcher import ResourceMatcher
try:
from IPython.core.display import display, HTML
get_ipython
def display_html(data):
display(HTML(data))
except (NameError, ImportError):
def display_html(data):
print(data)
def _header_print(header, kwargs):
if kwargs.get('tablefmt') == 'html':
display_html(f'<h3>{header}</h3>')
else:
print(f'{header}:')
def _table_print(data, kwargs):
if kwargs.get('tablefmt') == 'html':
display_html(data)
else:
print(data)
def truncate_cell(value, max_size):
value = str(value)
if max_size is not None and len(value) > max_size:
return value[:max_size] + ' ...'
else:
return value
def printer(num_rows=10, last_rows=None, fields=None, resources=None,
header_print=_header_print, table_print=_table_print, max_cell_size=100, **kwargs):
def func(rows):
spec = rows.res
if not ResourceMatcher(resources, spec.descriptor).match(spec.name):
yield from rows
return
header_print(spec.name, kwargs)
schema_fields = spec.schema.fields
if fields:
schema_fields = [f for f in schema_fields if f.name in fields]
field_names = [f.name for f in schema_fields]
headers = ['#'] + [
'{}\n({})'.format(f.name, f.type) for f in schema_fields
]
toprint = []
last = []
x = 1
for i, row in enumerate(rows):
index = i + 1
prow = [index] + [truncate_cell(row[f], max_cell_size) for f in field_names]
yield row
if index - x == (num_rows + 1):
x *= num_rows
if 0 <= index - x <= num_rows:
last.clear()
if toprint and toprint[-1][0] != index - 1:
toprint.append(['...'])
toprint.append(prow)
else:
last.append(prow)
if len(last) > (last_rows or num_rows):
last = last[1:]
if toprint and last and toprint[-1][0] != last[0][0] - 1:
toprint.append(['...'])
toprint += last
table_print(tabulate(toprint, headers=headers, **kwargs), kwargs)
return func
|
python
|
from hashmap.hashmap import HashMap, LinearHashMap
__all__ = ['HashMap', 'LinearHashMap']
|
python
|
# -*- coding: utf-8 -*-
"""
Same as calc_velocity.py, but calls mpi with changa to allow many nodes
NOTE. mpirrun must be already loaded. Also, should do export MX_RCACHE=0
before loading python
Created on Wed Apr 9 15:39:28 2014
@author: ibackus
"""
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
import isaac
import subprocess
import os
import glob
import time
def v_xy(f, param, changbin=None, nr=50, min_per_bin=100):
"""
Attempts to calculate the circular velocities for particles in a thin
(not flat) keplerian disk. Requires ChaNGa
**ARGUMENTS**
f : tipsy snapshot
For a gaseous disk
param : dict
a dictionary containing params for changa. (see isaac.configparser)
changbin : str (OPTIONAL)
If set, should be the full path to the ChaNGa executable. If None,
an attempt to find ChaNGa is made
nr : int (optional)
number of radial bins to use when averaging over accelerations
min_per_bin : int (optional)
The minimum number of particles to be in each bin. If there are too
few particles in a bin, it is merged with an adjacent bin. Thus,
actual number of radial bins may be less than nr.
**RETURNS**
vel : SimArray
An N by 3 SimArray of gas particle velocities.
"""
if changbin is None:
# Try to find the ChaNGa binary full path
changbin = os.popen('which ChaNGa_uw_mpi').read().strip()
# Load up mpi
# Load stuff from the snapshot
x = f.g['x']
y = f.g['y']
z = f.g['z']
r = f.g['rxy']
vel0 = f.g['vel'].copy()
# Remove units from all quantities
r = isaac.strip_units(r)
x = isaac.strip_units(x)
y = isaac.strip_units(y)
z = isaac.strip_units(z)
# Temporary filenames for running ChaNGa
f_prefix = str(np.random.randint(0, 2**32))
f_name = f_prefix + '.std'
p_name = f_prefix + '.param'
# Update parameters
p_temp = param.copy()
p_temp['achInFile'] = f_name
p_temp['achOutName'] = f_prefix
if 'dDumpFrameTime' in p_temp: p_temp.pop('dDumpFrameTime')
if 'dDumpFrameStep' in p_temp: p_temp.pop('dDumpFrameStep')
# --------------------------------------------
# Estimate velocity from gravity only
# --------------------------------------------
# Note, accelerations due to gravity are calculated twice to be extra careful
# This is so that any velocity dependent effects are properly accounted for
# (although, ideally, there should be none)
# The second calculation uses the updated velocities from the first
for iGrav in range(2):
# Save files
f.write(filename=f_name, fmt = pynbody.tipsy.TipsySnap)
isaac.configsave(p_temp, p_name, ftype='param')
# Run ChaNGa, only calculating gravity
command = 'mpirun --mca mtl mx --mca pml cm ' + changbin + ' -gas -n 0 ' + p_name
#command = 'charmrun ++local ' + changbin + ' -gas -n 0 ' + p_name
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while p.poll() is None:
time.sleep(0.1)
# Load accelerations
acc_name = f_prefix + '.000000.acc2'
a = isaac.load_acc(acc_name)
# Clean-up
for fname in glob.glob(f_prefix + '*'): os.remove(fname)
# If a is not a vector, calculate radial acceleration. Otherwise, assume
# a is the radial acceleration
a_r = a[:,0]*x/r + a[:,1]*y/r
# Make sure the units are correct then remove them
a_r = isaac.match_units(a_r, a)[0]
a_r = isaac.strip_units(a_r)
# Calculate cos(theta) where theta is angle above x-y plane
cos = r/np.sqrt(r**2 + z**2)
ar2 = a_r*r**2
# Bin the data
r_edges = np.linspace(r.min(), (1+np.spacing(2))*r.max(), nr + 1)
ind, r_edges = isaac.digitize_threshold(r, min_per_bin, r_edges)
ind -= 1
nr = len(r_edges) - 1
r_bins, ar2_mean, err = isaac.binned_mean(r, ar2, binedges=r_edges, \
weighted_bins=True)
# Fit lines to ar2 vs cos for each radial bin
m = np.zeros(nr)
b = np.zeros(nr)
for i in range(nr):
mask = (ind == i)
p = np.polyfit(cos[mask], ar2[mask], 1)
m[i] = p[0]
b[i] = p[1]
# Interpolate the line fits
m_spline = isaac.extrap1d(r_bins, m)
b_spline = isaac.extrap1d(r_bins, b)
# Calculate circular velocity
ar2_calc = m_spline(r)*cos + b_spline(r)
v_calc = np.sqrt(abs(ar2_calc)/r)
vel = f.g['vel'].copy()
v_calc = isaac.match_units(v_calc,vel)[0]
vel[:,0] = -v_calc*y/r
vel[:,1] = v_calc*x/r
# Assign to f
f.g['vel'] = vel
# --------------------------------------------
# Estimate pressure/gas dynamics accelerations
# --------------------------------------------
a_grav = a
ar2_calc_grav = ar2_calc
# Save files
f.write(filename=f_name, fmt = pynbody.tipsy.TipsySnap)
isaac.configsave(p_temp, p_name, ftype='param')
# Run ChaNGa, including SPH
command = 'mpirun --mca mtl mx --mca pml cm ' + changbin + ' +gas -n 0 ' + p_name
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while p.poll() is None:
time.sleep(0.1)
# Load accelerations
acc_name = f_prefix + '.000000.acc2'
a_total = isaac.load_acc(acc_name)
# Clean-up
for fname in glob.glob(f_prefix + '*'): os.remove(fname)
# Estimate the accelerations due to pressure gradients/gas dynamics
a_gas = a_total - a_grav
ar_gas = a_gas[:,0]*x/r + a_gas[:,1]*y/r
ar_gas = isaac.strip_units(ar_gas)
ar2_gas = ar_gas*r**2
logr_bins, ratio, err = isaac.binned_mean(np.log(r), ar2_gas/ar2_calc_grav, nbins=nr,\
weighted_bins=True)
r_bins = np.exp(logr_bins)
ratio_spline = isaac.extrap1d(r_bins, ratio)
ar2_calc = ar2_calc_grav*(1 + ratio_spline(r))
a_calc = ar2_calc/r**2
v = np.sqrt(r*abs(a_calc))
v = isaac.match_units(v, vel0.units)[0]
vel = vel0.copy()
vel[:,0] = -v*y/r
vel[:,1] = v*x/r
# more cleanup
f.g['vel'] = vel0
return vel
|
python
|
class Country:
def __init__(self, name, capital, population, continent):
self.__name = name
self.__capital = capital
self.__population = population
self.__continent = continent
my_country = Country('France', 'Paris', 67081000, 'Europe')
print(my_country._Country__name)
print(my_country._Country__capital)
print(my_country._Country__population)
print(my_country._Country__continent)
|
python
|
from . import utils
|
python
|
import torch
from torch import nn
class WeightComputer(nn.Module):
def __init__(self, mode="constant", constant_weight=1.0, consistency_fn=None, consistency_neigh=1, logits=False, device="cpu", min_weight=0.0):
"""
:param mode: in {'constant', 'balance_gt', 'pred_entropy', 'pred_consistency', 'pred_merged}
:param constant_weight:
:param consistency_fn:
:param consistency_neigh: in pixels
:param logits: work with logits
"""
super().__init__()
self._mode = mode
self._constant_weight = constant_weight
self._consistency_fn = consistency_fn
self._consistency_neigh = consistency_neigh
self._is_logits = logits
self._min_weight = torch.tensor(min_weight, device=device)
self._device = device
if consistency_neigh != 1 and consistency_neigh != 2:
raise ValueError("invalid consistency neighbourhood {}".format(consistency_neigh))
if ("consistency" in self._mode or "multi" in self._mode) and consistency_fn is None:
raise ValueError("missing consistency function for weight computation")
def forward(self, y, y_gt, apply_weights=None):
weights = torch.maximum(self._weight(y, y_gt), y_gt)
if self._mode not in {"balance_gt", "constant"}:
weights = (1 - self._min_weight) * weights + self._min_weight
if apply_weights is not None:
if apply_weights.ndim == 1 and apply_weights.size()[0] != weights.size()[0]:
raise ValueError("apply weights vector does not have the correct dimensions {}".format(apply_weights.size()))
apply_weights = apply_weights.unsqueeze(1).unsqueeze(1).unsqueeze(1).int()
weights = torch.maximum(weights, apply_weights)
return weights
def _y(self, y):
if self._is_logits:
return torch.sigmoid(y)
else:
return y
def _weight(self, y, y_gt):
if self._mode == "constant":
return torch.full(y.size(), self._constant_weight, device=self._device)
elif self._mode == "balance_gt":
ratio = torch.mean(y_gt, dim=[2, 3], keepdim=True)
ratio[ratio >= 1] = 0 # handle case of no background
w = (1 - y_gt) * ratio / (1 - ratio)
w[w > 1.0] = 1.0 # don't overweight background even if they are minority
return w
elif self._mode == "pred_entropy":
return self._entropy(y)
elif self._mode == "pred_consistency":
return self._consistency(y)
elif self._mode == "pred_merged":
return self._consistency(y) * self._entropy(y)
else:
raise ValueError("Invalid mode '{}'".format(self._mode))
def _entropy(self, y):
if not self._is_logits:
return 1 + y * torch.log2(y) + (1 - y) * torch.log2(1 - y)
else:
probas = torch.sigmoid(y)
logexpy = torch.log(torch.exp(y) + 1)
return 1 + (probas * (y - logexpy) - (1 - probas) * logexpy) / torch.log(torch.tensor(2))
@property
def consist_fn(self):
if self._consistency_fn == "quadratic":
return lambda y1, y2: torch.square(y1 - y2)
elif self._consistency_fn == "absolute":
return lambda y1, y2: torch.abs(y1 - y2)
def _consistency(self, y):
offset_range = list(range(-self._consistency_neigh, self._consistency_neigh+1))
divider = torch.zeros(y.size(), dtype=torch.int8, device=self._device)
accumulate = torch.zeros(y.size(), dtype=y.dtype, device=self._device)
_, _, height, width = y.size()
consist_fn = self.consist_fn
probas = self._y(y)
for offset_x in offset_range:
for offset_y in offset_range:
if offset_x == 0 and offset_y == 0:
continue
ref_y_low, ref_y_high = max(0, offset_y), min(height, height + offset_y)
ref_x_low, ref_x_high = max(0, offset_x), min(width, width + offset_x)
tar_y_low, tar_y_high = max(0, -offset_y), min(height, height - offset_y)
tar_x_low, tar_x_high = max(0, -offset_x), min(width, width - offset_x)
accumulate[:, :, ref_y_low:ref_y_high, ref_x_low:ref_x_high] += consist_fn(
probas[:, :, ref_y_low:ref_y_high, ref_x_low:ref_x_high],
probas[:, :, tar_y_low:tar_y_high, tar_x_low:tar_x_high])
divider[:, :, ref_y_low:ref_y_high, ref_x_low:ref_x_high] += 1
return 1 - (accumulate / divider)
|
python
|
# import the module, psqlwrapper is a class defined inside the postgresqlwrapper module
from sqlwrapper import psqlwrapper
#create a db object
db = psqlwrapper()
#let's connect to our postgres server
#remember to start your postgres server, either by using pgadmin interface or command line
db.connect('dbname', 'username', 'password', host='127.0.0.1', port=5432)
#if everything goes correct you have successfully connected to your database file
#we have just started the shop thus we only keep one type of pet i.e dogs let's create dog table
# dogs will have following characteristics id, breed, color, weight(in kg)
# it takes tablename, columns, data types and primary key as the input
db.create_table('dogs', ['id','breed','color','weight'],['integer','text','text','real'],'id')
#now your table has been created
# you can check all the tables in the database by using db.show_tables()
#now we have creted dogs table
#You can check the description of the columns in the table by using db.describe_table('dogs')
db.show_tables()
db.describe_table()
db.insert('dogs', ['id','breed','color','weight',],[1,'Labrador','yellow',29.4])
#the above query can also be written as
db.insert('dogs',[],[2,'German Shepherd','black',30.6])
#let's say i got a new dog but its weight is unknown
db.insert('dogs',['id','breed','color'],[3, 'German Shepherd', 'Brown'])
#this will make an entry of (3,German Shepherd, brown,None) in the table
#now let's fetch the values that was inserted
print db.fetch_all('dogs') #this will return all values in the dogs table in the form of list of dictionaries
print db.fetch_first('dogs') #this will return the first entry of the table
print db.fetch_last('dogs') #this will return thr last entry of the table
# now let's fetch the dogs whose breed is German Shepherd
print db.fetch_by('dogs',breed = "German Shepherd")
#fetch all the dogs whose breed is German Shepherd or it has yellow color skin
print db.fetch_by('dogs',breed = "German Shepherd", color = "yellow")
# fetch all the dogs whose breed is German Shepherd and color is black
print db.fetch_by('dogs',breed = "German Shepherd", color = "black")
#remember we had bought a dog whose weight was not known, well now the weight is known so let's update the entry
db.update_by('dogs',['weight'],[34.5],id = 3)
#let's say he was bought by some buyer, thus now we will have to delete his entry
db.delete_by('dogs', id=3)
#you can also count entries in the table by using
db.count_entries('dogs')
# now you can use drop table method
db.drop_table('dogs')
#also there is a method to delete all data from a specific table
db.delete_all_from('dogs')
#you're done all basic functions at your finger tips, don't need to write queries, now to develop a basic
#app it is fast and easy !!!
|
python
|
import logging
from pathlib import Path
from typing import Optional
from genomics_data_index.storage.MaskedGenomicRegions import MaskedGenomicRegions
from genomics_data_index.storage.io.mutation.NucleotideSampleData import NucleotideSampleData
from genomics_data_index.storage.io.mutation.SequenceFile import SequenceFile
from genomics_data_index.storage.util import TRACE_LEVEL
logger = logging.getLogger(__name__)
class NucleotideSampleDataSequenceMask(NucleotideSampleData):
def __init__(self, sample_name: str, vcf_file: Path, vcf_file_index: Optional[Path],
sample_mask_sequence: Optional[Path], subtract_vcf_from_mask: bool = False):
super().__init__(sample_name=sample_name,
vcf_file=vcf_file,
vcf_file_index=vcf_file_index,
mask_bed_file=None,
preprocessed=False)
self._sample_mask_sequence = sample_mask_sequence
self._subtract_vcf_from_mask = subtract_vcf_from_mask
def _preprocess_mask(self, output_dir: Path) -> Path:
if self._sample_mask_sequence is None:
mask_file = output_dir / f'{self.sample_name_persistence}.bed.gz'
mask = MaskedGenomicRegions.empty_mask()
mask.write(mask_file)
return mask_file
else:
new_file = output_dir / f'{self.sample_name_persistence}.bed.gz'
if new_file.exists():
raise Exception(f'File {new_file} already exists')
name, records = SequenceFile(self._sample_mask_sequence).parse_sequence_file()
logger.log(TRACE_LEVEL, f'Getting genomic masks from {self._sample_mask_sequence}')
masked_regions = MaskedGenomicRegions.from_sequences(sequences=records)
if self._subtract_vcf_from_mask:
logger.log(TRACE_LEVEL, f'Subtracting variants in vcf_file=[{self._vcf_file}] '
f'from mask produced by sequence=[{self._sample_mask_sequence}]')
vcf_regions = MaskedGenomicRegions.from_vcf_file(self._vcf_file)
masked_regions = masked_regions.subtract(vcf_regions)
masked_regions.write(new_file)
return new_file
@classmethod
def create(cls, sample_name: str, vcf_file: Path,
sample_mask_sequence: Optional[Path] = None,
subtract_vcf_from_mask: bool = False) -> NucleotideSampleData:
return NucleotideSampleDataSequenceMask(sample_name=sample_name,
vcf_file=vcf_file,
vcf_file_index=None,
sample_mask_sequence=sample_mask_sequence,
subtract_vcf_from_mask=subtract_vcf_from_mask)
|
python
|
'''Approach :
1. Create a new list "temp" containing a empty list node, i.e. value is None, as the head node
2. Set the next pointer of the head node to the node with a smaller value in the given two linked lists. For example l1's head node is smaller than l2's then set the next pointer of the new list's head node to l1.
3. Assign l1's head record to l1's current head node's next node.
4. Repeat step 2 until one of the remaining two linked lists have no nodes left.
5. Assign l1's tail record to keep the information of the remaining L2, i.e. point it to L2.
6. Return l1's head nodes's next record.'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
head = None
temp = ListNode()
while l1 and l2 :
if l1.val > l2.val :
center = l2
l2 = l2.next
else :
center = l1
l1 = l1.next
if not head :
head = center
temp = head
else:
temp.next = center
temp = temp.next
if head :
if l1 :
temp.next = l1
if l2 :
temp.next = l2
else:
if l1 :
head = l1
else:
head = l2
return head
|
python
|
import os
import sys
import unittest
from shutil import copyfile
from unittest.mock import MagicMock, patch
import fs
import pytest
from moban.core.definitions import TemplateTarget
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class TestCustomOptions(unittest.TestCase):
def setUp(self):
self.config_file = "config.yaml"
with open(self.config_file, "w") as f:
f.write("hello: world")
self.patcher1 = patch(
"moban.core.utils.verify_the_existence_of_directories"
)
self.patcher1.start()
@patch("moban.externals.file_system.abspath")
@patch("moban.core.moban_factory.MobanEngine.render_to_file")
def test_custom_options(self, fake_template_doer, fake_abspath):
test_args = [
"moban",
"-c",
self.config_file,
"-cd",
".",
"-td",
".",
"-t",
"a.jj2",
]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with("a.jj2", "config.yaml", "-")
@patch("moban.core.moban_factory.MobanEngine.render_to_file")
def test_minimal_options(self, fake_template_doer):
test_args = ["moban", "-c", self.config_file, "-t", "a.jj2"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with("a.jj2", "config.yaml", "-")
def test_missing_template(self):
test_args = ["moban", "-c", self.config_file]
fake_stdin = MagicMock(isatty=MagicMock(return_value=True))
with patch.object(sys, "stdin", fake_stdin):
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(SystemExit):
main()
def tearDown(self):
self.patcher1.stop()
os.unlink(self.config_file)
class TestOptions(unittest.TestCase):
def setUp(self):
self.config_file = "data.yml"
with open(self.config_file, "w") as f:
f.write("hello: world")
self.patcher1 = patch(
"moban.core.utils.verify_the_existence_of_directories"
)
self.patcher1.start()
@patch("moban.core.moban_factory.MobanEngine.render_to_file")
def test_default_options(self, fake_template_doer):
test_args = ["moban", "-t", "a.jj2"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with("a.jj2", "data.yml", "-")
@patch("moban.core.moban_factory.MobanEngine.render_string_to_file")
def test_string_template(self, fake_template_doer):
string_template = "{{HELLO}}"
test_args = ["moban", string_template]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with(
string_template, "data.yml", "-"
)
def test_no_argments(self):
test_args = ["moban"]
fake_stdin = MagicMock(isatty=MagicMock(return_value=True))
with patch.object(sys, "stdin", fake_stdin):
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(SystemExit):
main()
def tearDown(self):
self.patcher1.stop()
os.unlink(self.config_file)
class TestNoOptions(unittest.TestCase):
def setUp(self):
self.config_file = ".moban.yml"
copyfile(
fs.path.join("tests", "fixtures", self.config_file),
self.config_file,
)
self.data_file = "data.yaml"
with open(self.data_file, "w") as f:
f.write("hello: world")
self.patcher1 = patch(
"moban.core.utils.verify_the_existence_of_directories"
)
self.patcher1.start()
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_single_command(self, fake_template_doer):
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
call_args = list(fake_template_doer.call_args[0][0])
assert call_args == [
TemplateTarget("README.rst.jj2", "data.yaml", "README.rst"),
TemplateTarget("setup.py.jj2", "data.yaml", "setup.py"),
]
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_single_command_with_missing_output(self, fake_template_doer):
test_args = ["moban", "-t", "README.rst.jj2"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(Exception):
main()
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_single_command_with_a_few_options(self, fake_template_doer):
test_args = ["moban", "-t", "README.rst.jj2", "-o", "xyz.output"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
call_args = list(fake_template_doer.call_args[0][0])
assert call_args == [
TemplateTarget("README.rst.jj2", "data.yaml", "xyz.output")
]
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_single_command_with_options(self, fake_template_doer):
test_args = [
"moban",
"-t",
"README.rst.jj2",
"-c",
"new.yml",
"-o",
"xyz.output",
]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
call_args = list(fake_template_doer.call_args[0][0])
assert call_args == [
TemplateTarget("README.rst.jj2", "new.yml", "xyz.output")
]
def test_single_command_without_output_option(self):
test_args = ["moban", "-t", "abc.jj2"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(Exception):
main()
def tearDown(self):
os.unlink(self.config_file)
os.unlink(self.data_file)
self.patcher1.stop()
class TestNoOptions2(unittest.TestCase):
def setUp(self):
self.config_file = ".moban.yml"
copyfile(
fs.path.join("tests", "fixtures", self.config_file),
self.config_file,
)
self.data_file = "data.yaml"
with open(self.data_file, "w") as f:
f.write("hello: world")
self.patcher1 = patch(
"moban.core.utils.verify_the_existence_of_directories"
)
self.patcher1.start()
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_single_command(self, fake_template_doer):
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
call_args = list(fake_template_doer.call_args[0][0])
assert call_args == [
TemplateTarget("README.rst.jj2", "data.yaml", "README.rst"),
TemplateTarget("setup.py.jj2", "data.yaml", "setup.py"),
]
def tearDown(self):
self.patcher1.stop()
os.unlink(self.config_file)
os.unlink(self.data_file)
class TestCustomMobanFile(unittest.TestCase):
def setUp(self):
self.config_file = "custom-moban.txt"
copyfile(
fs.path.join("tests", "fixtures", ".moban.yml"), self.config_file
)
self.data_file = "data.yaml"
with open(self.data_file, "w") as f:
f.write("hello: world")
self.patcher1 = patch(
"moban.core.utils.verify_the_existence_of_directories"
)
self.patcher1.start()
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_single_command(self, fake_template_doer):
test_args = ["moban", "-m", self.config_file]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
call_args = list(fake_template_doer.call_args[0][0])
assert call_args == [
TemplateTarget("README.rst.jj2", "data.yaml", "README.rst"),
TemplateTarget("setup.py.jj2", "data.yaml", "setup.py"),
]
def tearDown(self):
self.patcher1.stop()
os.unlink(self.config_file)
os.unlink(self.data_file)
class TestTemplateOption(unittest.TestCase):
def setUp(self):
self.config_file = "custom-moban.txt"
copyfile(
fs.path.join("tests", "fixtures", ".moban.yml"), self.config_file
)
self.patcher1 = patch(
"moban.core.utils.verify_the_existence_of_directories"
)
self.patcher1.start()
@patch("moban.core.moban_factory.MobanEngine.render_to_file")
def test_template_option_override_moban_file(self, fake_template_doer):
test_args = ["moban", "-t", "setup.py.jj2"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with(
"setup.py.jj2", "data.yml", "-"
)
@patch("moban.core.moban_factory.MobanEngine.render_to_file")
def test_template_option_not_in_moban_file(self, fake_template_doer):
test_args = ["moban", "-t", "foo.jj2"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with("foo.jj2", "data.yml", "-")
def tearDown(self):
self.patcher1.stop()
os.unlink(self.config_file)
@patch("moban.core.utils.verify_the_existence_of_directories")
def test_duplicated_targets_in_moban_file(fake_verify):
config_file = "duplicated.moban.yml"
copyfile(fs.path.join("tests", "fixtures", config_file), ".moban.yml")
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
pytest.raises(SystemExit, main)
os.unlink(".moban.yml")
class TestInvalidMobanFile(unittest.TestCase):
def setUp(self):
self.config_file = ".moban.yml"
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_no_configuration(self, fake_template_doer):
with open(self.config_file, "w") as f:
f.write("")
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(SystemExit):
main()
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_no_configuration_2(self, fake_template_doer):
with open(self.config_file, "w") as f:
f.write("not: related")
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(SystemExit):
main()
@patch("moban.core.moban_factory.MobanEngine.render_to_files")
def test_no_targets(self, fake_template_doer):
with open(self.config_file, "w") as f:
f.write("configuration: test")
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(SystemExit):
main()
def tearDown(self):
os.unlink(self.config_file)
class TestComplexOptions(unittest.TestCase):
def setUp(self):
self.config_file = ".moban.yml"
copyfile(
fs.path.join("tests", "fixtures", ".moban-2.yml"), self.config_file
)
self.data_file = "data.yaml"
with open(self.data_file, "w") as f:
f.write("hello: world")
@patch(
"moban.core.utils.verify_the_existence_of_directories",
return_value=".",
)
def test_single_command(self, _):
test_args = ["moban"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with patch(
"moban.core.moban_factory.MobanEngine.render_to_files"
) as fake:
main()
call_args = list(fake.call_args[0][0])
assert call_args == [
TemplateTarget(
"README.rst.jj2", "custom-data.yaml", "README.rst"
),
TemplateTarget("setup.py.jj2", "data.yml", "setup.py"),
]
def tearDown(self):
os.unlink(self.config_file)
os.unlink(self.data_file)
class TestTemplateTypeOption(unittest.TestCase):
def setUp(self):
self.config_file = "data.yml"
with open(self.config_file, "w") as f:
f.write("hello: world")
@patch("moban.core.moban_factory.MobanEngine.render_to_file")
def test_mako_option(self, fake_template_doer):
test_args = ["moban", "-t", "a.mako"]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
fake_template_doer.assert_called_with("a.mako", "data.yml", "-")
def tearDown(self):
os.unlink(self.config_file)
def test_version_option():
test_args = ["moban", "-V"]
with patch.object(sys, "argv", test_args):
from moban.main import main
with pytest.raises(SystemExit):
main()
@patch("logging.basicConfig")
def test_warning_verbose(fake_config):
fake_config.side_effect = [IOError("stop test")]
test_args = ["moban", "-vvv"]
with patch.object(sys, "argv", test_args):
from moban.main import main
try:
main()
except IOError:
fake_config.assert_called_with(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=10,
)
@patch("logging.basicConfig")
def test_debug_five_verbose_option(fake_config, *_):
fake_config.side_effect = [IOError("stop test")]
test_args = ["moban", "-vvvvv"]
with patch.object(sys, "argv", test_args):
from moban.main import main
try:
main()
except IOError:
fake_config.assert_called_with(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=10,
)
@patch("moban.core.utils.verify_the_existence_of_directories", return_value=[])
def test_git_repo_example(_):
test_args = [
"moban",
"-t",
"git://github.com/moremoban/pypi-mobans.git!/templates/_version.py.jj2",
"-c",
"git://github.com/moremoban/pypi-mobans.git!/config/data.yml",
"-o",
"test_git_repo_example.py",
]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
with open("test_git_repo_example.py") as f:
content = f.read()
assert content == '__version__ = "0.1.1rc3"\n__author__ = "C.W."\n'
os.unlink("test_git_repo_example.py")
@patch("moban.core.utils.verify_the_existence_of_directories", return_value=[])
def test_pypi_pkg_example(_):
test_args = [
"moban",
"-t",
"pypi://pypi-mobans-pkg/resources/templates/_version.py.jj2",
"-c",
"pypi://pypi-mobans-pkg/resources/config/data.yml",
"-o",
"test_pypi_pkg_example.py",
]
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
with open("test_pypi_pkg_example.py") as f:
content = f.read()
assert content == '__version__ = "0.1.1rc3"\n__author__ = "C.W."\n'
os.unlink("test_pypi_pkg_example.py")
def test_add_extension():
if sys.version_info[0] == 2:
return pytest.skip("jinja2-python-version does not support python 2")
test_commands = [
[
"moban",
"-t",
"{{ python_version }}",
"-e",
"jinja2=jinja2_python_version.PythonVersionExtension",
"-o",
"moban.output",
],
[
"moban",
"-t",
"{{ python_version }}",
"-e",
"jj2=jinja2_python_version.PythonVersionExtension",
"-o",
"moban.output",
],
]
for test_args in test_commands:
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
with open("moban.output") as f:
content = f.read()
assert content == "{}.{}".format(
sys.version_info[0], sys.version_info[1]
)
os.unlink("moban.output")
def test_stdin_input():
if sys.platform == "win32":
return pytest.skip("windows test fails with this pipe test 2")
test_args = ["moban", "-d", "hello=world", "-o", "moban.output"]
with patch.object(sys, "stdin", StringIO("{{hello}}")):
with patch.object(sys, "argv", test_args):
from moban.main import main
main()
with open("moban.output") as f:
content = f.read()
assert content == "world"
os.unlink("moban.output")
def test_stdout():
test_args = ["moban", "-d", "hello=world", "-t", "{{hello}}"]
with patch.object(sys, "argv", test_args):
with patch("sys.stdout", new_callable=StringIO) as fake_stdout:
from moban.main import main
main()
assert fake_stdout.getvalue() == "world\n"
def test_render_file_stdout():
config_file = "config.yaml"
with open(config_file, "w") as f:
f.write("hello: world")
template_file = "t.jj2"
with open(template_file, "w") as f:
f.write("{{hello}}")
test_args = ["moban", "-t", "t.jj2", "-c", "config.yaml"]
with patch.object(sys, "argv", test_args):
with patch("sys.stdout", new_callable=StringIO) as fake_stdout:
from moban.main import main
main()
assert fake_stdout.getvalue() == "world\n"
def test_custom_jinja2_filters_tests():
config_file = "config.yaml"
with open(config_file, "w") as f:
f.write("hello: world")
template_file = "t.jj2"
with open(template_file, "w") as f:
f.write("{{hello}}")
test_args = [
"moban",
"-e",
"jinja2=filter:moban.externals.file_system.url_join",
"jinja2=test:moban.externals.file_system.exists",
"jinja2=global:description=moban.constants.PROGRAM_DESCRIPTION",
"-t",
"{{'a'|url_join('b')}} {{'b' is exists}}{{ description }}",
]
with patch.object(sys, "argv", test_args):
with patch("sys.stdout", new_callable=StringIO) as fake_stdout:
from moban.main import main
expected_output = (
"a/b False"
+ "Static text generator using "
+ "any template, any data and any location.\n"
)
main()
assert fake_stdout.getvalue() == expected_output
|
python
|
# -*- coding: utf-8 -*-
# Multilingual support postpone indefinitely. Only Help has a language option.
# (Making `app` method that handles all strings that should contain property `lang` which comes from config",
# doesn't work either because `app` doesn't exist when widgets are initialized.)
"""
Besides differences in written language letters and words,
various languages use different marks as separators for decimals.
Also sometimes variations in the separators exist within the same country.
This module will focus mostly on the most prevalent cases.
An option to choose decimal separator mark could be created
but the selection should NOT be forced on the user
since they might be very inexperienced and not even know which is the correct mark.
"""
class LanguageNotImplemented(Exception):
pass
SUPPORTED_LANGUAGES = {}
class Language(object):
def __init__(self, name, name_native):
self.name_native = name_native # Language name spelled with characters of the language itself
self.name = name
SUPPORTED_LANGUAGES.update({self.name: self})
# In reality, separators differ for different english-speaking countries
# (e.g. eng_US, eng_ireland, eng_UK ..).
# Since this program is not expected to become very popular,
# the convention below should be ok.
# Alternatively, the user could be prompted to select his separator;
# the prompt should be in a very easy to understand form:
# e.g. "Three point one." Select the image that is accurate (used for separator blabla..)
english = Language(name='english', name_native='english')
greek = Language(name='greek', name_native=u'ελληνικά')
class Message(object):
DEFAULT_LANGUAGE = 'english'
selected_language = DEFAULT_LANGUAGE
def __init__(self, only_english=False, **kwargs):
# Ensures required languages are implemented
if only_english:
if self.DEFAULT_LANGUAGE not in kwargs:
raise LanguageNotImplemented(self.DEFAULT_LANGUAGE)
langs_not_implemented = set(kwargs) - set(SUPPORTED_LANGUAGES)
if langs_not_implemented:
raise LanguageNotImplemented(langs_not_implemented)
self.langs_msgs_dct = kwargs
PLAY = Message(
english='Play',
greek=u'Παιχνίδι',
)
ABOUT = Message(
english='About',
greek=u'Σχετικά',
)
HELP = Message(
english='Help',
greek=u'Βοήθεια',
)
REWARDS = Message(
english='Rewards',
greek=u'Βραβεία',
)
VERSION = Message(
english='version',
greek='έκδοση',
)
EASY = Message(
english='easy',
greek='εύκολα',
)
MEDIUM = Message(
english='medium',
greek='μέτρια',
)
HARD = Message(
english='hard',
greek='δύσκολα',
)
OPERATION_CATEGORIES = Message(
english='Operation type',
greek='Είδος πράξεων',
)
DIFFICULTY = Message(
english='Difficulty',
greek='Δυσκολία',
)
CLEAR = Message(
english='Clear',
greek='Σβήσιμο',
)
CHECK_ANSWER = Message(
english='Check\nanswer',
greek='Έλεγχος\nαπάντησης',
)
SEPARATOR_SYMBOL = Message(
english='.',
greek=',',
)
|
python
|
from django.core.management.base import BaseCommand
from pages.models import Invite
import csv
class Command(BaseCommand):
def handle(self, *args, **options):
print('Loading CSV')
csv_path = './academy_invites_2014.csv'
with open(csv_path, 'rt') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
obj = Invite.objects.create(
name=row['Name'],
branch=row['Branch']
)
print(obj)
|
python
|
import logging
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from galaxy_api.api import models
from galaxy_api.auth import models as auth_models
from galaxy_api.api import permissions
from .base import BaseTestCase
from .x_rh_identity import user_x_rh_identity
log = logging.getLogger(__name__)
class TestUiNamespaceViewSet(BaseTestCase):
def setUp(self):
super().setUp()
def test_get(self):
url = reverse('api:ui:namespaces-list')
response = self.client.get(url, format='json')
log.debug('response: %s', response)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_new_user(self):
username = 'newuser'
username_token_b64 = user_x_rh_identity(username,
account_number="666")
client = APIClient()
client.credentials(**{"HTTP_X_RH_IDENTITY": username_token_b64})
url = reverse('api:ui:namespaces-list')
response = client.get(url, format='json')
log.debug('response: %s', response)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# user isn't a namespace owner, should get empty list of namespaces
assert not response.data['data']
def test_get_namespace_owner_user(self):
username = 'some_namespace_member'
some_namespace_member = auth_models.User.objects.create(username=username)
namespace_group = self._create_group('rh-identity', 'some_namespace',
users=some_namespace_member)
namespace = self._create_namespace('some_namespace', namespace_group)
log.debug('namespace: %s', namespace)
some_namespace_member_token_b64 = user_x_rh_identity(username)
client = APIClient()
client.credentials(**{"HTTP_X_RH_IDENTITY": some_namespace_member_token_b64})
url = reverse('api:ui:namespaces-list')
response = client.get(url, format='json')
log.debug('response: %s', response)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data'][0]['name'], 'some_namespace')
# TODO: test get detail, put/update detail, put/update detail for partner-engineers, etc
class TestUiMyNamespaceViewSet(BaseTestCase):
def setUp(self):
super().setUp()
def test_list_user_not_in_namespace_group(self):
url = reverse('api:ui:namespaces-list')
username = 'not_namespace_member'
not_namespace_member_token_b64 = user_x_rh_identity(username)
client = APIClient()
client.credentials(**{"HTTP_X_RH_IDENTITY": not_namespace_member_token_b64})
response = client.get(url, format='json')
log.debug('response: %s', response)
log.debug('response.data:\n%s', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# No matching namespaces found
self.assertEqual(response.data['data'], [])
def test_list_user_in_namespace_group(self):
url = reverse('api:ui:namespaces-list')
username = 'some_namespace_member'
some_namespace_member = auth_models.User.objects.create(username=username)
namespace_group = self._create_group('rh-identity', 'some_namespace',
users=some_namespace_member)
namespace = self._create_namespace('some_namespace', namespace_group)
log.debug('namespace: %s', namespace)
some_namespace_member_token_b64 = user_x_rh_identity(username)
client = APIClient()
client.credentials(**{"HTTP_X_RH_IDENTITY": some_namespace_member_token_b64})
response = client.get(url, format='json')
log.debug('response: %s', response)
log.debug('response.data:\n%s', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data'][0]['name'], 'some_namespace')
def test_list_user_in_namespace_system_admin(self):
url = reverse('api:ui:namespaces-list')
username = 'some_namespace_member'
some_namespace_member = auth_models.User.objects.create(username=username)
namespace_group_name = permissions.IsPartnerEngineer.GROUP_NAME
namespace_group = auth_models.Group.objects.create(name=namespace_group_name)
namespace_group.user_set.add(*[some_namespace_member])
namespace = self._create_namespace('some_namespace', namespace_group)
# create another namespace without any groups
another_namespace = models.Namespace.objects.create(name='another_namespace')
log.debug('namespace: %s', namespace)
log.debug('another_namespace: %s', another_namespace)
some_namespace_member_token_b64 = user_x_rh_identity(username)
client = APIClient()
client.credentials(**{"HTTP_X_RH_IDENTITY": some_namespace_member_token_b64})
response = client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data'][0]['name'], 'some_namespace')
namespace_names = [ns_data['name'] for ns_data in response.data['data']]
# Verify the system user can see all namespacees, even those without a group
self.assertIn('some_namespace', namespace_names)
self.assertIn('another_namespace', namespace_names)
|
python
|
from __future__ import absolute_import, division, print_function
import numpy as np
import iminuit as minuit
import time
import functools
import logging
from .processing import build_trigger_windows
from scipy import optimize as op
from collections import OrderedDict
from copy import deepcopy
from scipy.special import gammainc
from .models import TimeLine
pvalue = lambda dof, chisq: 1. - gammainc(.5 * dof, .5 * chisq)
def setDefault(func = None, passed_kwargs = {}):
"""
Read in default keywords of the simulation and pass to function
"""
if func is None:
return functools.partial(setDefault, passed_kwargs = passed_kwargs)
@functools.wraps(func)
def init(*args, **kwargs):
for k in passed_kwargs.keys():
kwargs.setdefault(k,passed_kwargs[k])
return func(*args, **kwargs)
return init
minuit_def = {
'verbosity': 0,
'int_steps': 1e-4,
'strategy': 2,
'tol': 1e-5,
'up': 1., # it's a chi 2 fit
'max_tol_increase': 3000.,
'tol_increase': 1000.,
'ncall': 10000,
'steps': 40,
'scan_bound': (0.,10.),
'pedantic': True,
'precision': None,
'scipy': False,
'pinit': {'A' : 1.,
'c': 1.,
't0': 1.,
'tr': 1.,
'td': 1.},
'fix': {'A' : False,
'c': False,
't0': False,
'tr': False,
'td': False},
'islog': {'A' : False,
'c': False,
't0': False,
'tr': False,
'td': False},
'limits': {'A' : [0.,100.],
'c': [0.,5.],
't0': [0.,100.],
'tr': [0.,100.],
'td': [0.,100.]}
}
# --- miniuit defaults ------------------------------------- #
class FitTimeLine(object):
def __init__(self, t, v, dv, fSample):
"""
Initialize the fitting class
:param t: array-like
time values in micro s
:param v: array-like
voltage values in mV
:param dv: array-like
uncertainties of voltage values in mV
:param fSample: float
Sampling frequency in Hz
"""
self._t = t
self._v = v
self._dv = dv
self._f = None
self._fSample = fSample
return
@property
def t(self):
return self._t
@property
def v(self):
return self._v
@property
def dv(self):
return self._dv
@property
def f(self):
return self._f
@property
def fSample(self):
return self._fSample
@property
def m(self):
return self._m
@t.setter
def t(self, t):
self._t = t
return
@v.setter
def v(self, v):
self._v = v
return
@fSample.setter
def fSample(self, fSample):
self._fSample = fSample
return
@dv.setter
def dv(self, dv):
self._dv = dv
return
def calcObjFunc(self,*args):
return self.__calcObjFunc(*args)
def __calcObjFunc(self,*args):
"""
objective function passed to iMinuit
"""
params = {}
for i,p in enumerate(self.parnames):
if self.par_islog[p]:
params[p] = np.power(10.,args[i])
else:
params[p] = args[i]
return self.returnObjFunc(params)
def __wrapObjFunc(self,args):
"""
objective function passed to scipy.optimize
"""
params = {}
for i,p in enumerate(self.parnames):
if not self.fitarg['fix_{0:s}'.format(p)]:
if self.par_islog[p]:
params[p] = np.power(10.,args[i])
else:
params[p] = args[i]
else:
if self.par_islog[p]:
params[p] = np.power(10.,self.fitarg[p])
else:
params[p] = self.fitarg[p]
return self.returnObjFunc(params)
def returnObjFunc(self,params):
"""Calculate the objective function"""
f = self._f(self._t, **params)
chi2 = ((self._v - f)**2. / self._dv**2.).sum()
return chi2
@setDefault(passed_kwargs = minuit_def)
def fill_fitarg(self, **kwargs):
"""Helper function to fill the dictionary for minuit fitting"""
# set the fit arguments
fitarg = {}
fitarg.update(kwargs['pinit'])
for k in kwargs['limits'].keys():
fitarg['limit_{0:s}'.format(k)] = kwargs['limits'][k]
fitarg['fix_{0:s}'.format(k)] = kwargs['fix'][k]
fitarg['error_{0:s}'.format(k)] = kwargs['pinit'][k] * kwargs['int_steps']
fitarg = OrderedDict(sorted(fitarg.items()))
# get the names of the parameters
self.parnames = kwargs['pinit'].keys()
self.par_islog = kwargs['islog']
return fitarg
@setDefault(passed_kwargs = minuit_def)
def run_migrad(self,fitarg,**kwargs):
"""
Helper function to initialize migrad and run the fit.
Initial parameters are optionally estimated with scipy optimize.
"""
self.fitarg = fitarg
values, bounds = [],[]
for k in self.parnames:
values.append(fitarg[k])
bounds.append(fitarg['limit_{0:s}'.format(k)])
logging.debug(self.parnames)
logging.debug(values)
logging.debug(self.__wrapObjFunc(values))
if kwargs['scipy']:
self.res = op.minimize(self.__wrapObjFunc,
values,
bounds = bounds,
method='TNC',
#method='Powell',
options={'maxiter': kwargs['ncall']} #'xtol': 1e-20, 'eps' : 1e-20, 'disp': True}
#tol=None, callback=None,
#options={'disp': False, 'minfev': 0, 'scale': None,
#'rescale': -1, 'offset': None, 'gtol': -1,
#'eps': 1e-08, 'eta': -1, 'maxiter': kwargs['ncall'],
#'maxCGit': -1, 'mesg_num': None, 'ftol': -1, 'xtol': -1, 'stepmx': 0,
#'accuracy': 0}
)
logging.debug(self.res)
for i,k in enumerate(self.parnames):
fitarg[k] = self.res.x[i]
logging.debug(fitarg)
cmd_string = "lambda {0}: self.__calcObjFunc({0})".format(
(", ".join(self.parnames), ", ".join(self.parnames)))
string_args = ", ".join(self.parnames)
global f # needs to be global for eval to find it
f = lambda *args: self.__calcObjFunc(*args)
cmd_string = "lambda %s: f(%s)" % (string_args, string_args)
logging.debug(cmd_string)
# work around so that the parameters get names for minuit
self._minimize_f = eval(cmd_string, globals(), locals())
self._m = minuit.Minuit(self._minimize_f,
print_level =kwargs['verbosity'],
errordef = kwargs['up'],
pedantic = kwargs['pedantic'],
**fitarg)
self._m.tol = kwargs['tol']
self._m.strategy = kwargs['strategy']
logging.debug("tol {0:.2e}, strategy: {1:n}".format(
self._m.tol,self._m.strategy))
self._m.migrad(ncall = kwargs['ncall']) #, precision = kwargs['precision'])
return
def __print_failed_fit(self):
"""print output if migrad failed"""
if not self._m.migrad_ok():
fmin = self._m.get_fmin()
logging.warning(
'*** migrad minimum not ok! Printing output of get_fmin'
)
logging.warning('{0:s}:\t{1}'.format('*** has_accurate_covar',
fmin.has_accurate_covar))
logging.warning('{0:s}:\t{1}'.format('*** has_covariance',
fmin.has_covariance))
logging.warning('{0:s}:\t{1}'.format('*** has_made_posdef_covar',
fmin.has_made_posdef_covar))
logging.warning('{0:s}:\t{1}'.format('*** has_posdef_covar',
fmin.has_posdef_covar))
logging.warning('{0:s}:\t{1}'.format('*** has_reached_call_limit',
fmin.has_reached_call_limit))
logging.warning('{0:s}:\t{1}'.format('*** has_valid_parameters',
fmin.has_valid_parameters))
logging.warning('{0:s}:\t{1}'.format('*** hesse_failed',
fmin.hesse_failed))
logging.warning('{0:s}:\t{1}'.format('*** is_above_max_edm',
fmin.is_above_max_edm))
logging.warning('{0:s}:\t{1}'.format('*** is_valid',
fmin.is_valid))
return
def __repeat_migrad(self, **kwargs):
"""Repeat fit if fit was above edm"""
fmin = self._m.get_fmin()
if not self._m.migrad_ok() and fmin['is_above_max_edm']:
logging.warning(
'Migrad did not converge, is above max edm. Increasing tol.'
)
tol = self._m.tol
self._m.tol *= self._m.edm /(self._m.tol * self._m.errordef ) * kwargs['tol_increase']
logging.info('New tolerance : {0}'.format(self._m.tol))
if self._m.tol >= kwargs['max_tol_increase']:
logging.warning(
'New tolerance to large for required precision'
)
else:
self._m.migrad(
ncall = kwargs['ncall'])#,
#precision = kwargs['precision']
#)
logging.info(
'Migrad status after second try: {0}'.format(
self._m.migrad_ok()
)
)
self._m.tol = tol
return
@setDefault(passed_kwargs = minuit_def)
def fit(self,tmin = None, tmax = None, function = 'tesresponse',
minos = 1., parscan = 'none', fmax = 1e6, norder = 3,
dvdt_thr = -25., maxcomp = 3, v_thr=0.,
**kwargs):
"""
Fit the time series
{options}
:param tmin: float
Minimum time in time series. If None, don't use a limit. Default: None
:param tmax:
Maximum time in time series. If None, don't use a limit. Default: None
:param function: str
either "tesresponse" or "expflare". Determines the function that is fit to the data.
Default: "tesresponse"
:param minos: float
Confidence level in sigma for which minos errors are calculated. Default: 1.
:param parscan: str
either 'none' or name of parameter. If name of parameter, the likelihood is profiled
for this parameter and the profile likelihood is returned. Default: 'none'
:param fmax: float
Maximum frequency for filter (used to find trigger time)
:param norder: int
Order of filter (used to find trigger time)
:param dvdt_thr: float
threshold to find trigger from derivative in mV / micro sec. Default: -25.
:param v_thr: float
threshold to find trigger from time series in V. Default: 0.
:param maxcomp: int
Maximum pulse components allowed in one trigger window.
:param kwargs:
Additional key word arguments passed to minuit.
:return:
Dictionary with results
"""
if np.isscalar(tmin) and np.isscalar(tmax):
m = (self.t >= tmin) & (self.t < tmax)
elif np.isscalar(tmax):
m = (self.t < tmax)
elif np.isscalar(tmin):
m = (self.t >= tmin)
if np.isscalar(tmin) or np.isscalar(tmax) == float:
self._t = self._t[m]
self._v = self._v[m]
self._dv = self._dv[m]
t0s, _, _ = build_trigger_windows(self._t / 1e6, self._v / 1e3,
self._fSample,
thr=dvdt_thr,
thr_v=v_thr,
fmax=fmax,
norder=norder)
t0s = np.array(t0s) * 1e6 # convert back to micro s
ntrigger = t0s.size
logging.info("Within window, found {0:n} point(s) satisfying trigger criteria".format(ntrigger))
t1 = time.time()
aic, chi2, nPars, vals, errs, fitargs, dof = [], [], [], [], [], [], []
# loop through trigger times
#for i, t0 in enumerate(t0s):
i = 0
while i < ntrigger and i < maxcomp:
f = TimeLine(numcomp=i+1, function=function)
kwargs['pinit']['t0_{0:03n}'.format(i)] = t0s[i]
kwargs['limits']['t0_{0:03n}'.format(i)] = [self._t.min(), self._t.max()]
if i > 0:
if self._m.migrad_ok():
kwargs['pinit'].update({k : self._m.values[k] for k in kwargs['pinit'].keys() \
if k in self._m.values.keys()})
for k in ['td', 'tr', 'A']:
kwargs['pinit']['{0:s}_{1:03n}'.format(k, i)] = kwargs['pinit']['{0:s}_000'.format(k)]
kwargs['limits']['{0:s}_{1:03n}'.format(k, i)] = kwargs['limits']['{0:s}_000'.format(k)]
kwargs['fix'] = {k: False for k in kwargs['pinit'].keys()}
kwargs['islog'] = {k: False for k in kwargs['pinit'].keys()}
self._f = lambda t, **params : f(t, **params)
fitarg = self.fill_fitarg(**kwargs)
logging.debug(fitarg)
self.run_migrad(fitarg, **kwargs)
npar = np.sum([np.invert(self._m.fitarg[k]) for k in self._m.fitarg.keys() if 'fix' in k])
try:
self._m.hesse()
logging.debug("Hesse matrix calculation finished")
except RuntimeError as e:
logging.warning(
"*** Hesse matrix calculation failed: {0}".format(e)
)
logging.debug(self._m.fval)
self.__repeat_migrad(**kwargs)
logging.debug(self._m.fval)
fmin = self._m.get_fmin()
if not fmin.hesse_failed:
try:
self.corr = self._m.np_matrix(correlation=True)
except:
self.corr = -1
logging.debug(self._m.values)
if self._m.migrad_ok():
if parscan in self.parnames:
parscan, llh, bf, ok = self.llhscan(parscan,
bounds = kwargs['scan_bound'],
steps = kwargs['steps'],
log = False
)
self._m.fitarg['fix_{0:s}'.format(parscan)] = False
if np.min(llh) < self._m.fval:
idx = np.argmin(llh)
if ok[idx]:
logging.warning("New minimum found in objective function scan!")
fitarg = deepcopy(self._m.fitarg)
for k in self.parnames:
fitarg[k] = bf[idx][k]
fitarg['fix_{0:s}'.format(parscan)] = True
kwargs['scipy'] = False
self.run_migrad(fitarg, **kwargs)
if minos:
for k in self._m.values.keys():
if kwargs['fix'][k]:
continue
self._m.minos(k,minos)
logging.debug("Minos finished")
else:
self.__print_failed_fit()
# terminate if we have tested all components
if i == t0s.size - 1 or i == maxcomp - 1:
return dict(chi2 = self._m.fval,
value = dict(self._m.values),
error = dict(self._m.errors),
aic = 2. * npar + self._m.fval,
dof = self._t.size - npar,
npar = npar,
fitarg = self._m.fitarg,
numcomp = i + 1,
fit_ok = False
)
vals.append(dict(self._m.values))
errs.append(dict(self._m.errors))
fitargs.append(self._m.fitarg)
nPars.append(npar)
aic.append(2. * npar + self._m.fval)
dof.append(self._t.size - nPars[-1])
chi2.append(self._m.fval)
i+=1
# bad fit, add additional trigger time
#if chi2[-1] / dof[-1] > 2.5 and i >= ntrigger:
if pvalue(dof[-1], chi2[-1]) < 0.01 and i >= ntrigger:
logging.info("bad fit and all trigger times added, adding additional component")
idresmax = np.argmax(np.abs((self._v - self._f(self._t, **self._m.fitarg))/self._dv))
t0s = np.append(t0s, self._t[idresmax])
# select best fit
ibest = np.argmin(aic)
logging.info('fit took: {0}s'.format(time.time() - t1))
logging.info("Best AIC = {0:.2f} for {1:n} components".format(aic[ibest], ibest + 1))
for k in vals[ibest].keys():
if kwargs['fix'][k]:
err = np.nan
else:
err = fitargs[ibest]['error_{0:s}'.format(k)]
val = fitargs[ibest]['{0:s}'.format(k)]
logging.info('best fit {0:s}: {1:.5e} +/- {2:.5e}'.format(k,val,err))
result = dict(chi2 = chi2[ibest],
value = vals[ibest],
error = errs[ibest],
aic = aic[ibest],
dof = dof[ibest],
npar = nPars[ibest],
fitarg = fitargs[ibest],
numcomp = ibest + 1,
fit_ok = True
)
if minos:
result['minos'] = self._m.merrors
if parscan in self.parnames:
result['parscan'] = (r, llh)
return result
def llhscan(self, parname, bounds, steps, log = False):
"""
Perform a manual scan of the objective function for one parameter
(inspired by mnprofile)
Parameters
----------
parname: str
parameter that is scanned
bounds: list or tuple
scan bounds for parameter
steps: int
number of scanning steps
{options}
log: bool
if true, use logarithmic scale
Returns
-------
tuple of 4 lists containing the scan values, likelihood values,
best fit values at each scanning step, migrad_ok status
"""
llh, pars, ok = [],[],[]
if log:
values = np.logscape(np.log10(bounds[0]),np.log10(bounds[1]), steps)
else:
values = np.linspace(bounds[0], bounds[1], steps)
for i,v in enumerate(values):
fitarg = deepcopy(self._m.fitarg)
fitarg[parname] = v
fitarg['fix_{0:s}'.format(parname)] = True
string_args = ", ".join(self.parnames)
global f # needs to be global for eval to find it
f = lambda *args: self.__calcObjFunc(*args)
cmd_string = "lambda %s: f(%s)" % (string_args, string_args)
minimize_f = eval(cmd_string, globals(), locals())
m = minuit.Minuit(minimize_f,
print_level=0, forced_parameters=self._m.parameters,
pedantic=False, **fitarg)
m.migrad()
llh.append(m.fval)
pars.append(m.values)
ok.append(m.migrad_ok())
return values, np.array(llh), pars, ok
|
python
|
import BaseHTTPServer
import time
import sys
import SocketServer, os
import md5
try:
import json
except:
import simplejson as json
HOST_NAME = ''
PORT_NUMBER = 8000
SERVER_VERSION = '1.0.1'
SSDP_PORT = 1900
SSDP_MCAST_ADDR = '239.255.255.250'
savedDescription = {}
delay = {}
counter = {}
userAgentHash = {}
def keep_running():
return True
class ThreadingHTTPServer(SocketServer.ThreadingMixIn,
SocketServer.TCPServer, BaseHTTPServer.HTTPServer):
pass
class RedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path = self.path[1:]
try:
path.index('get_description')
except ValueError:
try:
path.index('get_counter')
except ValueError:
try:
path.index('version')
except ValueError:
self.send_response(200)
self.send_header('Content-Type','text/plain; charset="utf-8"')
self.send_header('Content-Length',str(0))
self.end_headers()
print('no stuff given')
else:
self.send_response(200)
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-Length',str(len(SERVER_VERSION)))
self.end_headers()
self.wfile.write(SERVER_VERSION)
self.wfile.close()
print('version check: ',SERVER_VERSION)
else:
if(path[:path.index('?')] == 'get_counter'):
id = int(path[path.index('?')+1:])
try:
self.send_response(200)
self.send_header('Access-Control-Allow-Origin','*')
DATA = str(counter[id])
self.send_header('Content-Length',str(len(DATA)))
self.end_headers()
self.wfile.write(DATA)
self.wfile.close()
except KeyError:
self.send_response(404)
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-Length',str(len('wrong id!')))
self.end_headers()
self.wfile.write('wrong id!')
self.wfile.close()
else:
if(path[:path.index('?')] == 'get_description'):
id = int(path[path.index('?')+1:])
try:
self.send_response(200)
if(delay[id] > 0):
time.sleep(delay[id])
print('message delayed: ', delay[id])
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-Length',str(len(savedDescription[id])))
self.end_headers()
self.wfile.write(savedDescription[id])
self.wfile.close()
if(userAgentHash[id] == md5.md5(str(self.client_address[0]) + str(self.headers['user-agent'])).hexdigest()):
global counter
counter[id] = counter[id] + 1
print('description given',id,counter[id])
except KeyError:
print('404: ',path)
self.send_response(404)
self.send_header('Access-Control-Allow-Origin','*')
self.send_header('Content-Length',str(len('wrong id!')))
self.end_headers()
self.wfile.write('wrong id!')
self.wfile.close()
else:
self.send_response(404)
self.send_header('Content-Length',str(0))
self.end_headers()
print('no stuff given')
def do_POST(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin','*')
self.end_headers()
length = int(self.headers.getheader('Content-Length'))
if(length > 0):
res = self.rfile.read(length)
try:
params = json.loads(res)
except ValueError, err:
print 'ERROR:', err
params = {}
else:
params = {}
if(self.path == '/set_description'):
try:
params['description']
params['id']
except KeyError:
print('description error - no proper param!')
pass
else:
global savedDescription
global counter
global userAgentHash
try:
savedDescription[params['id']]
except KeyError:
counter[params['id']] = 0
else:
if(savedDescription[params['id']] != params['description']):
counter[params['id']] = 0
savedDescription[params['id']] = params['description']
userAgentHash[params['id']] = md5.md5(str(self.client_address[0]) + str(self.headers['user-agent'])).hexdigest()
try:
params['delay']
except KeyError:
delay[params['id']] = 0
else:
delay[params['id']] = float(params['delay'])/1000.0
pass
elif(self.path == '/delete_description'):
try:
params['id']
except KeyError:
print('id error - no proper param!')
pass
else:
global savedDescription
del savedDescription[params['id']]
print('saved_descrition cleared')
pass
elif(self.path == '/broadcast_msg'):
if(not params['data']):
self.wfile.write('error with params: no "data"!')
self.wfile.close()
print('broadcast_msg error - no proper param!')
pass
else:
self.wfile.write('message sent')
self.wfile.close()
ssdp_multicast(params['data'], self.request.getsockname())
pass
def ssdp_multicast(buf, interface_address):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("IP_MULTICAST_IF: " + str(interface_address))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(interface_address[0]))
s.sendto(buf, (SSDP_MCAST_ADDR, SSDP_PORT))
if __name__ == '__main__':
server_class = ThreadingHTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), RedirectHandler)
print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind(('', SSDP_PORT))
import struct
mreqn = struct.pack(
'4s4si',
socket.inet_aton(SSDP_MCAST_ADDR),
socket.inet_aton('0.0.0.0'),
0)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreqn)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
|
python
|
'''
In this exercise, you'll use the Baby Names Dataset (from data.gov) again. This time, both DataFrames names_1981 and names_1881 are loaded without specifying an Index column (so the default Indexes for both are RangeIndexes).
You'll use the DataFrame .append() method to make a DataFrame combined_names. To distinguish rows from the original two DataFrames, you'll add a 'year' column to each with the year (1881 or 1981 in this case). In addition, you'll specify ignore_index=True so that the index values are not used along the concatenation axis. The resulting axis will instead be labeled 0, 1, ..., n-1, which is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information.
'''
# Add 'year' column to names_1881 and names_1981
names_1881['year'] = 1881
names_1981['year'] = 1981
# Append names_1981 after names_1881 with ignore_index=True: combined_names
combined_names = names_1881.append(names_1981, ignore_index=True)
# Print shapes of names_1981, names_1881, and combined_names
print(names_1981.shape)
print(names_1881.shape)
print(combined_names.shape)
# Print all rows that contain the name 'Morgan'
print(combined_names.loc[combined_names['name'] == 'Morgan'])
|
python
|
#!/usr/bin/env python3
import os
telamon_root = os.path.realpath("../../")
tuning_path = os.path.realpath(".")
setting_path = tuning_path + "/settings/"
spec = {
"log_file": str,
"num_workers": int,
"stop_bound": float,
"timeout": float,
"distance_to_best": float,
"algorithm": {
"type": ("bandit", ),
"new_nodes_order": ("api", "random", "bound", "weighted_random"),
"old_nodes_order": ("bound", "bandit", "weighted_random"),
"threshold": int,
"delta": float,
"monte_carlo": bool,
},
}
def check(value, *, path=(), spec=spec):
"""Check that a value adheres to a specification.
Entries in the specification can be:
- A tuple of allowed values: the provided `value` must be one of these
- A type: the provided `value` must have that type
- A dict: the provided `value` must be a dict which has only keys defined
in the `spec` (some of the `spec` keys may be missing from the `value`
dict). Each provided value in the dict will be checked recursively with
the corresponding entry in the spec.
All entries in `value` are optional (i.e. can be `None`), unless the
corresponding entry in the specification is a `dict`.
Args:
value: The value to check.
path: The path in the toplevel object leading to this
value. This is used to make more legible error messages.
spec: The specification to check the value against. See above
for explanations on the format.
Raises:
ValueError: When the value does not match the specification.
"""
if isinstance(spec, dict):
if not isinstance(value, dict):
raise ValueError(
"Key {} should be a dict; got {}".format(
".".join(path), value))
invalid = set(value.keys()) - set(spec.keys())
if invalid:
invalid_keys = sorted(['.'.join(path + (key, )) for key in invalid])
raise ValueError(
"Key{} {} {} invalid".format(
"" if len(invalid_keys) == 1 else "s",
' and '.join(filter(None, [
', '.join(invalid_keys[:-1]),
invalid_keys[-1],
])),
"is" if len(invalid_keys) == 1 else "are"))
for key, spec_value in spec.items():
check(value.get(key), path=path + (key, ), spec=spec_value)
elif value is None:
pass
elif isinstance(spec, type):
if not isinstance(value, spec):
raise ValueError(
"Key {} should be a {}; got {!r}".format(
".".join(path), spec.__name__, value))
elif isinstance(spec, tuple):
if value not in spec:
raise ValueError(
"Key {} should be one of {}; got {!r}".format(
".".join(path), ", ".join(map(repr, spec)), value))
else:
raise AssertionError(
"Invalid spec: {}".format(spec))
def serialize_value(value):
"""Serialize a single value.
This is used instead of a single-shot `.format()` call because some values
need special treatment for being serialized in YAML; notably, booleans must
be written as lowercase strings, and floats exponents must not start with a
0.
"""
if isinstance(value, bool):
return repr(value).lower()
elif isinstance(value, float):
return "{0:.16}".format(value).replace("e+0", "e+").replace("e-0", "e-")
else:
return repr(value)
def serialize(f, key, value):
"""Serialize a (key, value) pair into a file."""
if isinstance(value, dict):
f.write("[{}]\n".format(key))
for k, v in value.items():
serialize(f, k, v)
elif value is not None:
f.write("{} = {}\n".format(key, serialize_value(value)))
def create_setting_file(options_dict, filename):
try:
check(options_dict)
except ValueError as e:
print("Invalid options dict: {}".format(e))
return
with open(filename, "w+") as f:
for key, value in options_dict.items():
serialize(f, key, value)
def clear_directory(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
filename = "test_py.log"
opts = {
"num_workers": 24,
"timeout": 150.,
"algorithm": {
"type": "bandit",
"monte_carlo": True,
}
}
if __name__ == "__main__":
if not os.path.exists(setting_path):
os.makedirs(setting_path)
clear_directory(setting_path)
for i in range(8):
opts["algorithm"]["delta"] = pow(2, i) * 0.00001
for j in range(1, 4):
opts["algorithm"]["threshold"] = j * 10
filename = ("d" + "-" + "{:3e}".format(opts["algorithm"]["delta"]) + "_" + "t"
+ "{}".format(opts["algorithm"]["threshold"]) +".toml")
create_setting_file(opts, setting_path + filename)
|
python
|
#-*-coding:utf-8-*-
from pyspark.sql.types import IntegerType, TimestampType
from pyspark.sql.functions import *
from base import spark
from utils import uuidsha
columns = [
col('docu_dk').alias('alrt_docu_dk'),
col('docu_nr_mp').alias('alrt_docu_nr_mp'),
col('docu_orgi_orga_dk_responsavel').alias('alrt_orgi_orga_dk'),
col('elapsed').alias('alrt_dias_referencia')
]
columns_alias = [
col('alrt_docu_dk'),
col('alrt_docu_nr_mp'),
col('alrt_orgi_orga_dk'),
col('alrt_dias_referencia')
]
key_columns = [
col('alrt_docu_dk')
]
def alerta_prcr(options):
# data do fato será usada para a maioria dos cálculos
# Caso a data do fato seja NULL, ou seja maior que a data de cadastro, usar cadastro como data do fato
# Apenas códigos de pacotes de PIPs
# Mantém-se a data do fato original (mesmo NULL ou maior que dt cadastro) para a tabela de metadados do cálculo
doc_pena = spark.sql("""
SELECT docu_dk, docu_nr_mp, docu_nr_externo, docu_tx_etiqueta, docu_dt_fato as docu_dt_fato_original,
CASE WHEN docu_dt_fato < docu_dt_cadastro THEN docu_dt_fato ELSE docu_dt_cadastro END as docu_dt_fato,
docu_dt_cadastro, docu_orgi_orga_dk_responsavel,
cls.id as cldc_dk,
cls.cod_mgp as cldc_ds_classe,
cls.hierarquia as cldc_ds_hierarquia,
tpa.id,
tpa.artigo_lei,
tpa.max_pena,
tpa.nome_delito,
tpa.multiplicador,
tpa.abuso_menor
FROM documentos_ativos
LEFT JOIN {0}.mmps_classe_docto cls ON cls.id = docu_cldc_dk
JOIN {1}.mcpr_assunto_documento ON docu_dk = asdo_docu_dk
JOIN {0}.tb_penas_assuntos tpa ON tpa.id = asdo_assu_dk
JOIN {0}.atualizacao_pj_pacote ON docu_orgi_orga_dk_responsavel = id_orgao
WHERE docu_dt_cadastro >= '2010-01-01'
AND max_pena IS NOT NULL
AND cod_pct IN (200, 201, 202, 203, 204, 205, 206, 207, 208, 209)
AND asdo_dt_fim IS NULL -- tira assuntos que acabaram
""".format(options['schema_exadata_aux'], options['schema_exadata'])
)
doc_pena.createOrReplaceTempView('DOC_PENA')
# Calcula tempos de prescrição a partir das penas máximas
# Caso um dos assuntos seja multiplicador, multiplicar as penas pelo fator
doc_prescricao = spark.sql("""
WITH PENA_FATORES AS (
SELECT docu_dk, EXP(SUM(LN(max_pena))) AS fator_pena, concat_ws(', ', collect_list(nome_delito)) AS delitos_multiplicadores
FROM DOC_PENA
WHERE multiplicador = 1
GROUP BY docu_dk
)
SELECT *,
CASE
WHEN max_pena_fatorado < 1 THEN 3
WHEN max_pena_fatorado < 2 THEN 4
WHEN max_pena_fatorado < 4 THEN 8
WHEN max_pena_fatorado < 8 THEN 12
WHEN max_pena_fatorado < 12 THEN 16
ELSE 20 END AS tempo_prescricao
FROM (
SELECT
P.*,
CASE WHEN fator_pena IS NOT NULL THEN max_pena * fator_pena ELSE max_pena END AS max_pena_fatorado,
fator_pena,
delitos_multiplicadores
FROM DOC_PENA P
LEFT JOIN PENA_FATORES F ON F.docu_dk = P.docu_dk
WHERE multiplicador = 0
) t
""")
doc_prescricao.createOrReplaceTempView('DOC_PRESCRICAO')
# Se o acusado tiver < 21 ou >= 70, seja na data do fato ou na data presente, multiplicar tempo_prescricao por 0.5
doc_prescricao_fatorado = spark.sql("""
WITH PRESCRICAO_FATORES AS (
SELECT docu_dk, investigado_pess_dk, investigado_nm,
CASE WHEN NOT (dt_compare >= dt_21 AND current_timestamp() < dt_70) THEN 0.5 ELSE NULL END AS fator_prescricao
FROM (
SELECT DISTINCT
docu_dk,
pesf_pess_dk as investigado_pess_dk,
pesf_nm_pessoa_fisica as investigado_nm,
add_months(pesf_dt_nasc, 21 * 12) AS dt_21,
add_months(pesf_dt_nasc, 70 * 12) AS dt_70,
docu_dt_fato AS dt_compare
FROM DOC_PRESCRICAO
JOIN {0}.mcpr_personagem ON pers_docu_dk = docu_dk
JOIN {0}.mcpr_pessoa_fisica ON pers_pesf_dk = pesf_pess_dk
WHERE pers_tppe_dk IN (290, 7, 21, 317, 20, 14, 32, 345, 40, 5, 24)
AND pesf_nm_pessoa_fisica != 'MP'
) t
)
SELECT P.*,
CASE WHEN fator_prescricao IS NOT NULL THEN tempo_prescricao * fator_prescricao ELSE tempo_prescricao END AS tempo_prescricao_fatorado,
fator_prescricao IS NOT NULL AS investigado_maior_70_menor_21,
investigado_pess_dk,
investigado_nm
FROM DOC_PRESCRICAO P
LEFT JOIN PRESCRICAO_FATORES F ON F.docu_dk = P.docu_dk
""".format(options['schema_exadata']))
doc_prescricao_fatorado.createOrReplaceTempView('DOC_PRESCRICAO_FATORADO')
# Calcular data inicial de prescrição
# Casos em que houve rescisão de acordo de não persecução penal, data inicial é a data do andamento
spark.sql("""
SELECT vist_docu_dk, pcao_dt_andamento
FROM vista
JOIN {0}.mcpr_andamento ON vist_dk = pcao_vist_dk
JOIN {0}.mcpr_sub_andamento ON stao_pcao_dk = pcao_dk
WHERE stao_tppr_dk = 7920
AND year_month >= 201901
""".format(options['schema_exadata'])
).createOrReplaceTempView('DOCS_ANPP')
# Casos de abuso de menor, data inicial é a data de 18 anos do menor,
# no caso em que o menor tinha menos de 18 na data do fato/cadastro
# Prioridade de data inicial: data de 18 anos (caso abuso menor), rescisão de acordo ANPP, dt_fato
dt_inicial = spark.sql("""
WITH DOCS_ABUSO_MENOR AS (
SELECT docu_dk, MAX(dt_18_anos) AS dt_18_anos
FROM (
SELECT docu_dk, CASE WHEN dt_18_anos > docu_dt_fato THEN dt_18_anos ELSE NULL END AS dt_18_anos
FROM DOC_PRESCRICAO_FATORADO P
JOIN {0}.mcpr_personagem ON pers_docu_dk = docu_dk
JOIN (
SELECT
PF.*,
cast(add_months(pesf_dt_nasc, 18*12) as timestamp) AS dt_18_anos
FROM {0}.mcpr_pessoa_fisica PF
) t ON pers_pesf_dk = pesf_pess_dk
WHERE abuso_menor = 1
AND pers_tppe_dk IN (3, 13, 18, 6, 248, 290)
) t2
GROUP BY docu_dk
)
SELECT P.*,
CASE
WHEN dt_18_anos IS NOT NULL AND abuso_menor = 1 THEN dt_18_anos
WHEN pcao_dt_andamento IS NOT NULL THEN pcao_dt_andamento
ELSE docu_dt_fato END AS dt_inicial_prescricao,
dt_18_anos AS vitima_menor_mais_jovem_dt_18_anos,
pcao_dt_andamento AS dt_acordo_npp
FROM DOC_PRESCRICAO_FATORADO P
LEFT JOIN DOCS_ANPP ON vist_docu_dk = docu_dk
LEFT JOIN DOCS_ABUSO_MENOR M ON M.docu_dk = P.docu_dk
""".format(options['schema_exadata']))
dt_inicial.createOrReplaceTempView('DOCS_DT_INICIAL_PRESCRICAO')
# Data de prescrição = data inicial de prescrição + tempo de prescrição
resultado = spark.sql("""
SELECT
D.*,
cast(add_months(dt_inicial_prescricao, tempo_prescricao_fatorado * 12) as timestamp) AS data_prescricao
FROM DOCS_DT_INICIAL_PRESCRICAO D
""").\
withColumn("elapsed", lit(datediff(current_date(), 'data_prescricao')).cast(IntegerType()))
resultado.createOrReplaceTempView('TEMPO_PARA_PRESCRICAO')
spark.catalog.cacheTable("TEMPO_PARA_PRESCRICAO")
# Tabela de detalhes dos alertas, para ser usada no overlay do alerta, e também para debugging
spark.sql("""
SELECT
docu_dk AS adpr_docu_dk,
investigado_pess_dk as adpr_investigado_pess_dk,
investigado_nm as adpr_investigado_nm,
nome_delito as adpr_nome_delito,
id as adpr_id_assunto,
artigo_lei as adpr_artigo_lei,
abuso_menor as adpr_abuso_menor,
max_pena as adpr_max_pena,
delitos_multiplicadores as adpr_delitos_multiplicadores,
fator_pena as adpr_fator_pena,
max_pena_fatorado as adpr_max_pena_fatorado,
tempo_prescricao as adpr_tempo_prescricao,
investigado_maior_70_menor_21 as adpr_investigado_prescricao_reduzida,
tempo_prescricao_fatorado as adpr_tempo_prescricao_fatorado,
vitima_menor_mais_jovem_dt_18_anos as adpr_dt_18_anos_menor_vitima,
dt_acordo_npp as adpr_dt_acordo_npp,
docu_dt_fato_original as adpr_docu_dt_fato,
docu_dt_cadastro as adpr_docu_dt_cadastro,
cast(dt_inicial_prescricao as string) as adpr_dt_inicial_prescricao,
data_prescricao as adpr_dt_final_prescricao,
elapsed as adpr_dias_prescrito
FROM TEMPO_PARA_PRESCRICAO
""").write.mode('overwrite').saveAsTable('{}.{}'.format(
options['schema_alertas'],
options['prescricao_tabela_detalhe']
)
)
LIMIAR_PRESCRICAO_PROXIMA = -options['prescricao_limiar']
subtipos = spark.sql("""
SELECT T.*,
CASE
WHEN elapsed > 0 THEN 2 -- prescrito
WHEN elapsed <= {LIMIAR_PRESCRICAO_PROXIMA} THEN 0 -- nem prescrito nem proximo
ELSE 1 -- proximo de prescrever
END AS status_prescricao
FROM TEMPO_PARA_PRESCRICAO T
""".format(
LIMIAR_PRESCRICAO_PROXIMA=LIMIAR_PRESCRICAO_PROXIMA)
)
max_min_status = subtipos.groupBy(columns[:-1]).agg(min('status_prescricao'), max('status_prescricao'), min('elapsed')).\
withColumnRenamed('max(status_prescricao)', 'max_status').\
withColumnRenamed('min(status_prescricao)', 'min_status').\
withColumnRenamed('min(elapsed)', 'alrt_dias_referencia')
max_min_status.createOrReplaceTempView('MAX_MIN_STATUS')
# Os WHEN precisam ser feitos na ordem PRCR1, 2, 3 e depois 4!
resultado = spark.sql("""
SELECT T.*,
CASE
WHEN min_status = 2 THEN 'PRCR1' -- todos prescritos
WHEN min_status = 1 THEN 'PRCR2' -- todos próximos de prescrever
WHEN max_status = 2 THEN 'PRCR3' -- subentende-se min=0 aqui, logo, algum prescrito (mas não todos próximos)
WHEN max_status = 1 THEN 'PRCR4' -- subentende-se min=0, logo, nenhum prescrito, mas algum próximo (não todos)
ELSE NULL -- não entra em nenhum caso (será filtrado)
END AS alrt_sigla,
CASE
WHEN min_status = 2 THEN 'Todos os crimes prescritos'
WHEN min_status = 1 THEN 'Todos os crimes próximos de prescrever'
WHEN max_status = 2 THEN 'Algum crime prescrito'
WHEN max_status = 1 THEN 'Algum crime próximo de prescrever'
ELSE NULL
END AS alrt_descricao
FROM MAX_MIN_STATUS T
""")
resultado = resultado.filter('alrt_sigla IS NOT NULL').select(columns_alias + ['alrt_sigla', 'alrt_descricao'])
resultado = resultado.withColumn('alrt_key', uuidsha(*key_columns))
return resultado
|
python
|
from .base import BaseCLItest
class TestCliPush(BaseCLItest):
"""
askanna push
We expect to initiate a push action of our code to the AskAnna server
"""
verb = "push"
def test_command_push_base(self):
assert "push" in self.result.output
self.assertIn("push", self.result.output)
self.assertNotIn("noop", self.result.output)
|
python
|
"""Class performing under-sampling based on the neighbourhood cleaning rule."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
from __future__ import division, print_function
from collections import Counter
import numpy as np
from ..base import BaseMulticlassSampler
from ..utils import check_neighbors_object
class NeighbourhoodCleaningRule(BaseMulticlassSampler):
"""Class performing under-sampling based on the neighbourhood cleaning
rule.
Parameters
----------
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
size_ngh : int, optional (default=None)
Size of the neighbourhood to consider to compute the average
distance to the minority point samples.
NOTE: size_ngh is deprecated from 0.2 and will be replaced in 0.4
Use ``n_neighbors`` instead.
n_neighbors : int or object, optional (default=3)
If int, size of the neighbourhood to consider in order to make
the comparison between each samples and their NN.
If object, an estimator that inherits from
`sklearn.neighbors.base.KNeighborsMixin` that will be used to find
the k_neighbors.
n_jobs : int, optional (default=1)
The number of threads to open if possible.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
This class support multi-class.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import \
NeighbourhoodCleaningRule # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> ncr = NeighbourhoodCleaningRule(random_state=42)
>>> X_res, y_res = ncr.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({1: 889, 0: 100})
References
----------
.. [1] J. Laurikkala, "Improving identification of difficult small classes
by balancing class distribution," Springer Berlin Heidelberg, 2001.
"""
def __init__(self,
return_indices=False,
random_state=None,
size_ngh=None,
n_neighbors=3,
n_jobs=1):
super(NeighbourhoodCleaningRule, self).__init__(
random_state=random_state)
self.return_indices = return_indices
self.size_ngh = size_ngh
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def fit(self, X, y):
"""Find the classes statistics before to perform sampling.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
self : object,
Return self.
"""
super(NeighbourhoodCleaningRule, self).fit(X, y)
self.nn_ = check_neighbors_object('n_neighbors', self.n_neighbors)
# set the number of jobs
self.nn_.set_params(**{'n_jobs': self.n_jobs})
return self
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
# Start with the minority class
X_min = X[y == self.min_c_]
y_min = y[y == self.min_c_]
# All the minority class samples will be preserved
X_resampled = X_min.copy()
y_resampled = y_min.copy()
# If we need to offer support for the indices
if self.return_indices:
idx_under = np.flatnonzero(y == self.min_c_)
# Fit the whole dataset
self.nn_.fit(X)
idx_to_exclude = []
# Loop over the other classes under picking at random
for key in self.stats_c_.keys():
# Get the sample of the current class
sub_samples_x = X[y == key]
# Get the samples associated
idx_sub_sample = np.flatnonzero(y == key)
# Find the NN for the current class
nnhood_idx = self.nn_.kneighbors(
sub_samples_x, return_distance=False)
# Get the label of the corresponding to the index
nnhood_label = (y[nnhood_idx] == key)
# Check which one are the same label than the current class
# Make an AND operation through the three neighbours
nnhood_bool = np.logical_not(np.all(nnhood_label, axis=1))
# If the minority class remove the majority samples
if key == self.min_c_:
# Get the index to exclude
idx_to_exclude += nnhood_idx[np.nonzero(np.logical_not(
nnhood_label[np.flatnonzero(nnhood_bool)]))].tolist()
else:
# Get the index to exclude
idx_to_exclude += idx_sub_sample[np.nonzero(
nnhood_bool)].tolist()
idx_to_exclude = np.unique(idx_to_exclude)
# Create a vector with the sample to select
sel_idx = np.ones(y.shape)
sel_idx[idx_to_exclude] = 0
# Exclude as well the minority sample since that they will be
# concatenated later
sel_idx[y == self.min_c_] = 0
# Get the samples from the majority classes
sel_x = X[np.flatnonzero(sel_idx), :]
sel_y = y[np.flatnonzero(sel_idx)]
# If we need to offer support for the indices selected
if self.return_indices:
idx_tmp = np.flatnonzero(sel_idx)
idx_under = np.concatenate((idx_under, idx_tmp), axis=0)
X_resampled = np.concatenate((X_resampled, sel_x), axis=0)
y_resampled = np.concatenate((y_resampled, sel_y), axis=0)
self.logger.info('Under-sampling performed: %s', Counter(y_resampled))
# Check if the indices of the samples selected should be returned too
if self.return_indices:
# Return the indices of interest
return X_resampled, y_resampled, idx_under
else:
return X_resampled, y_resampled
|
python
|
import unittest
import numpy as np
from pavlidis import pavlidis
class TestPavlidis(unittest.TestCase):
def test_pixel(self):
case = np.zeros((3, 4), np.uint8)
case[1, 2] = True
result = pavlidis(case, 1, 2)
self.assertEqual(len(result), 1)
self.assertEqual(result[0, 0], 1)
self.assertEqual(result[0, 1], 2)
def test_edge(self):
# Test a 2x2 square within a 2x2 grid so that it might run off
# the edges
# This checks turning too
case = np.ones((2, 2), np.uint8)
result = pavlidis(case, 1, 0)
self.assertEqual(len(result), 4)
self.assertEqual(result[0, 0], 1)
self.assertEqual(result[0, 1], 0)
self.assertEqual(result[1, 0], 0)
self.assertEqual(result[1, 1], 0)
self.assertEqual(result[2, 0], 0)
self.assertEqual(result[2, 1], 1)
self.assertEqual(result[3, 0], 1)
self.assertEqual(result[3, 1], 1)
def test_bool_cast(self):
case = np.zeros((3, 3), bool)
case[1, 1] = True
result = pavlidis(case, 1, 1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0, 0], 1)
self.assertEqual(result[0, 1], 1)
def test_interior_raises(self):
case = np.ones((3, 3), bool)
self.assertRaises(BaseException, pavlidis, case, 1, 1)
# The test names check the p1, p2 and p3 cases for the directions,
# xn (going towards -x), yn, xp and yp
#
def test_p1xn(self):
#
# 2 0
# | /
# 1
case = np.zeros((4, 4), np.uint8)
case[1, 1:3] = True
case[2, 1] = True
result = pavlidis(case, 1, 2)
self.assertEqual(len(result), 3)
self.assertEqual(result[0, 0], 1)
self.assertEqual(result[0, 1], 2)
self.assertEqual(result[1, 0], 2)
self.assertEqual(result[1, 1], 1)
def test_p2xn(self):
#
# 2
# |
# 1 - 0
case = np.zeros((4, 4), np.uint8)
case[2, 1:3] = True
case[1, 1] = True
result = pavlidis(case, 2, 2)
self.assertEqual(len(result), 3)
self.assertEqual(result[0, 0], 2)
self.assertEqual(result[0, 1], 2)
self.assertEqual(result[1, 0], 2)
self.assertEqual(result[1, 1], 1)
def test_p3xn(self):
#
# 1--2
# \
# 0
#
case = np.zeros((4, 4), np.uint8)
case[2, 2] = True
case[1, 1:3] = True
result = pavlidis(case, 2, 2)
self.assertEqual(len(result), 3)
self.assertEqual(result[0, 0], 2)
self.assertEqual(result[0, 1], 2)
self.assertEqual(result[1, 0], 1)
self.assertEqual(result[1, 1], 1)
def test_p1yn(self):
#
# 2--3
# \
# 1
# |
# 0
case = np.zeros((4, 4), np.uint8)
case[0:3, 2] = True
case[0, 1] = True
result = pavlidis(case, 2, 2)
self.assertEqual(len(result), 5)
self.assertEqual(result[1, 0], 1)
self.assertEqual(result[1, 1], 2)
self.assertEqual(result[2, 0], 0)
self.assertEqual(result[2, 1], 1)
def test_p2yn(self):
#
# 2 -- 3
# |
# 1
# |
# 0
case = np.zeros((4, 4), np.uint8)
case[1:, 1] = True
case[1, 2] = True
result = pavlidis(case, 3, 1)
self.assertEqual(len(result), 5)
self.assertEqual(result[1, 0], 2)
self.assertEqual(result[1, 1], 1)
self.assertEqual(result[2, 0], 1)
self.assertEqual(result[2, 1], 1)
def test_p3yn(self):
#
# 2
# /
# 1
# |
# 0
case = np.zeros((4, 4), np.uint8)
case[2:, 1] = True
case[1, 2] = True
result = pavlidis(case, 3, 1)
self.assertEqual(len(result), 4)
self.assertEqual(result[1, 0], 2)
self.assertEqual(result[1, 1], 1)
self.assertEqual(result[2, 0], 1)
self.assertEqual(result[2, 1], 2)
def test_p1xp(self):
# 3
# /|
# 1 - 2 4
# /
# 0
case = np.zeros((4, 4), np.uint8)
case[3, 0] = True
case[2, 1:4] = True
case[1, 3] = True
result = pavlidis(case, 3, 0)
self.assertEqual(len(result), 7)
self.assertEqual(result[2, 0], 2)
self.assertEqual(result[2, 1], 2)
self.assertEqual(result[3, 0], 1)
self.assertEqual(result[3, 1], 3)
def test_p2xp(self):
#
# 1 - 2 - 3
# /
# 0
case = np.zeros((4, 4), np.uint8)
case[3, 0] = True
case[2, 1:4] = True
result = pavlidis(case, 3, 0)
self.assertEqual(len(result), 6)
self.assertEqual(result[2, 0], 2)
self.assertEqual(result[2, 1], 2)
self.assertEqual(result[3, 0], 2)
self.assertEqual(result[3, 1], 3)
def test_p3xp(self):
#
# 1 - 2
# / \
# 0 3
case = np.zeros((4, 4), np.uint8)
case[3, 0] = True
case[2, 1:3] = True
case[3, 3] = True
result = pavlidis(case, 3, 0)
self.assertEqual(len(result), 6)
self.assertEqual(result[2, 0], 2)
self.assertEqual(result[2, 1], 2)
self.assertEqual(result[3, 0], 3)
self.assertEqual(result[3, 1], 3)
def test_p1yp(self):
#
# 1 - 2
# / |
# 0 3
# \
# 5--4
case = np.zeros((4, 4), np.uint8)
case[2, 0] = True
case[1, 1] = True
case[1, 2] = True
case[2, 2] = True
case[3, 2:] = True
result = pavlidis(case, 2, 0)
self.assertEqual(result[3, 0], 2)
self.assertEqual(result[3, 1], 2)
self.assertEqual(result[4, 0], 3)
self.assertEqual(result[4, 1], 3)
def test_p2yp(self):
#
# 1 - 2
# / |
# 0 3
# |
# 5 -- 4
case = np.zeros((4, 4), np.uint8)
case[2, 0] = True
case[1, 1] = True
case[1, 2] = True
case[2, 2] = True
case[3, 1:3] = True
result = pavlidis(case, 2, 0)
self.assertEqual(result[3, 0], 2)
self.assertEqual(result[3, 1], 2)
self.assertEqual(result[4, 0], 3)
self.assertEqual(result[4, 1], 2)
def test_p3yp(self):
#
# 1 - 2
# / |
# 0 3
# |
# 5 -- 4
case = np.zeros((4, 4), np.uint8)
case[2, 0] = True
case[1, 1] = True
case[1, 2] = True
case[2, 2] = True
case[3, 1] = True
result = pavlidis(case, 2, 0)
self.assertEqual(result[3, 0], 2)
self.assertEqual(result[3, 1], 2)
self.assertEqual(result[4, 0], 3)
self.assertEqual(result[4, 1], 1)
def test_issue1_regression(self):
small = np.zeros((4, 4))
small[1, 1] = True
small[2, 1] = True
self.assertEqual(len(pavlidis(small, 1, 1)), 2)
def test_issue2_regression(self):
small = np.zeros((10, 10))
small[4, 5:] = True
small[5, 4:] = True
small[6, 3:] = True
small[7, 2:] = True
self.assertGreater(len(pavlidis(small, 4, 5)), 4)
if __name__ == '__main__':
unittest.main()
|
python
|
import urllib.parse
from agent import source, pipeline
from agent.pipeline.config.stages.source.jdbc import JDBCSource
class SolarWindsScript(JDBCSource):
JYTHON_SCRIPT = 'solarwinds.py'
SOLARWINDS_API_ADDRESS = '/SolarWinds/InformationService/v3/Json/Query'
def get_config(self) -> dict:
with open(self.get_jython_file_path()) as f:
return {
'scriptConf.params': [
{'key': 'PIPELINE_NAME', 'value': self.pipeline.name},
{'key': 'QUERY', 'value': pipeline.jdbc.query.SolarWindsBuilder(self.pipeline).build()},
{
'key': 'SOLARWINDS_API_URL',
'value': urllib.parse.urljoin(
self.pipeline.source.config[source.SolarWindsSource.URL],
self.SOLARWINDS_API_ADDRESS
)
},
{'key': 'API_USER', 'value': self.pipeline.source.config[source.SolarWindsSource.USERNAME]},
{'key': 'API_PASSWORD', 'value': self.pipeline.source.config[source.SolarWindsSource.PASSWORD]},
{'key': 'INTERVAL_IN_SECONDS', 'value': str(self.pipeline.interval)},
{'key': 'DELAY_IN_SECONDS', 'value': str(self.pipeline.delay)},
{'key': 'DAYS_TO_BACKFILL', 'value': self.pipeline.days_to_backfill},
{'key': 'QUERY_TIMEOUT', 'value': str(self.pipeline.source.query_timeout)},
{
'key': 'MONITORING_URL',
'value': urllib.parse.urljoin(
self.pipeline.streamsets.agent_external_url,
f'/monitoring/source_http_error/{self.pipeline.name}/'
)
},
{'key': 'VERIFY_SSL', 'value': '1' if self.pipeline.source.config.get('verify_ssl', True) else ''},
],
'script': f.read()
}
|
python
|
from logging import getLogger
import multiprocessing
from pathlib import Path
import random
import traceback
from typing import Union
import networkx as nx
from remake.remake_exceptions import RemakeError
from remake.special_paths import SpecialPaths
from remake.task import Task
from remake.task_control import TaskControl
from remake.task_query_set import TaskQuerySet
from remake.setup_logging import setup_stdout_logging
logger = getLogger(__name__)
class Remake:
"""Core class. A remakefile is defined by creating an instance of Remake.
Acts as an entry point to running all tasks via python, and retrieving information about the state of any task.
Contains a list of all tasks added by any `TaskRule`.
A remakefile must contain:
>>> demo = Remake()
This must be near the top of the file - after the imports but before any `TaskRule` is defined.
"""
remakes = {}
current_remake = {}
def __init__(self, name: str = None, config: dict = None, special_paths: SpecialPaths = None):
"""Constructor.
:param name: name to use for remakefile (defaults to its filename)
:param config: configuration for executors
:param special_paths: special paths to use for all input/output filenames
"""
setup_stdout_logging('INFO', colour=True)
if not name:
stack = next(traceback.walk_stack(None))
frame = stack[0]
name = frame.f_globals['__file__']
# This is needed for when MultiprocExecutor makes its own Remakes in worker procs.
if multiprocessing.current_process().name == 'MainProcess':
if name in Remake.remakes:
# Can happen on ipython run remakefile.
# Can also happen on tab completion of Remake obj.
# e.g. in remake/examples/ex1.py:
# ex1.Bas<tab>
# traceback.print_stack()
logger.info(f'Remake {name} added twice')
Remake.remakes[name] = self
else:
logger.debug(f'Process {multiprocessing.current_process().name}')
logger.debug(Remake.current_remake)
logger.debug(Remake.remakes)
Remake.current_remake[multiprocessing.current_process().name] = self
self.config = config
if not special_paths:
special_paths = SpecialPaths()
self.special_paths = special_paths
self.task_ctrl = TaskControl(name, config, special_paths)
self.rules = []
self.tasks = TaskQuerySet(task_ctrl=self.task_ctrl)
@property
def name(self):
return self.task_ctrl.name
@property
def pending_tasks(self):
return self.task_ctrl.pending_tasks
@property
def remaining_tasks(self):
return self.task_ctrl.remaining_tasks
@property
def completed_tasks(self):
return self.task_ctrl.completed_tasks
def task_status(self, task: Task) -> str:
"""Get the status of a task.
:param task: task to get status for
:return: status
"""
return self.task_ctrl.statuses.task_status(task)
def rerun_required(self):
"""Rerun status of this Remake object.
:return: True if any tasks remain to be run
"""
assert self.finalized
return self.task_ctrl.rescan_tasks or self.task_ctrl.pending_tasks
def configure(self, print_reasons: bool, executor: str, display: str):
"""Allow Remake object to be configured after creation.
:param print_reasons: print reason for running individual task
:param executor: name of which `remake.executor` to use
:param display: how to display task status after each task is run
"""
self.task_ctrl.print_reasons = print_reasons
self.task_ctrl.set_executor(executor)
if display == 'print_status':
self.task_ctrl.display_func = self.task_ctrl.__class__.print_status
elif display == 'task_dag':
from remake.experimental.networkx_displays import display_task_status
self.task_ctrl.display_func = display_task_status
elif display:
raise Exception(f'display {display} not recognized')
def short_status(self, mode='logger.info'):
"""Log/print a short status line.
:param mode: 'logger.info' or 'print'
"""
if mode == 'logger.info':
displayer = logger.info
elif mode == 'print':
displayer = print
else:
raise ValueError(f'Unrecognized mode: {mode}')
displayer(f'Status (complete/rescan/pending/remaining/cannot run): '
f'{len(self.completed_tasks)}/{len(self.task_ctrl.rescan_tasks)}/'
f'{len(self.pending_tasks)}/{len(self.remaining_tasks)}/{len(self.task_ctrl.cannot_run_tasks)}')
def display_task_dag(self):
"""Display all tasks as a Directed Acyclic Graph (DAG)"""
from remake.experimental.networkx_displays import display_task_status
import matplotlib.pyplot as plt
display_task_status(self.task_ctrl)
plt.show()
def run_all(self, force=False):
"""Run all tasks.
:param force: force rerun of each task
"""
self.task_ctrl.run_all(force=force)
def run_one(self):
"""Run the next pending task"""
all_pending = list(self.task_ctrl.rescan_tasks + self.task_ctrl.statuses.ordered_pending_tasks)
if all_pending:
task = all_pending[0]
self.task_ctrl.run_requested([task], force=False)
def run_random(self):
"""Run a random task (pot luck out of pending)"""
task = random.choice(list(self.task_ctrl.pending_tasks))
self.run_requested([task], force=False)
def run_requested(self, requested, force=False, handle_dependencies=False):
"""Run requested tasks.
:param requested:
:param force: force rerun of each task
:param handle_dependencies: add all ancestor tasks to ensure given tasks can be run
"""
# Work out whether it's possible to run requested tasks.
ancestors = self.all_ancestors(requested)
rerun_required_ancestors = ancestors & (self.pending_tasks |
self.remaining_tasks)
missing_tasks = rerun_required_ancestors - set(requested)
if missing_tasks:
logger.debug(f'{len(missing_tasks)} need to be added')
if not handle_dependencies:
logger.error('Impossible to run requested tasks')
raise RemakeError('Cannot run with requested tasks. Use --handle-dependencies to fix.')
else:
requested = list(rerun_required_ancestors)
requested = self.task_ctrl.rescan_tasks + requested
self.task_ctrl.run_requested(requested, force=force)
def list_rules(self):
"""List all rules"""
return self.rules
def find_task(self, task_path_hash_key: Union[Task, str]):
"""Find a task from its path_hash_key.
:param task_path_hash_key: key of task
:return: found task
"""
if isinstance(task_path_hash_key, Task):
return task_path_hash_key
else:
return self.find_tasks([task_path_hash_key])[0].task
def find_tasks(self, task_path_hash_keys):
"""Find all tasks given by their path hash keys
:param task_path_hash_keys: list of path hash keys
:return: all found tasks
"""
tasks = TaskQuerySet([], self.task_ctrl)
for task_path_hash_key in task_path_hash_keys:
if len(task_path_hash_key) == 40:
tasks.append(self.task_ctrl.task_from_path_hash_key[task_path_hash_key])
else:
# TODO: Make less bad.
# I know this is terribly inefficient!
_tasks = []
for k, v in self.task_ctrl.task_from_path_hash_key.items():
if k[:len(task_path_hash_key)] == task_path_hash_key:
_tasks.append(v)
if len(_tasks) == 0:
raise KeyError(task_path_hash_key)
elif len(_tasks) > 1:
raise KeyError(f'{task_path_hash_key} matches multiple keys')
tasks.append(_tasks[0])
return tasks
def list_tasks(self, tfilter=None, rule=None, requires_rerun=False,
uses_file=None, produces_file=None,
ancestor_of=None, descendant_of=None):
"""List all tasks subject to requirements.
:param tfilter: dict of key/value pairs to filter tasks on
:param rule: rule that tasks belongs to
:param requires_rerun: whether tasks require rerun
:param uses_file: whether tasks use a given file
:param produces_file: whether tasks produce a given file
:param ancestor_of: whether tasks are an ancestor of this task (path hash key)
:param descendant_of: whether tasks are a descendant of this task (path hash key)
:return: all matching tasks
"""
tasks = TaskQuerySet([t for t in self.tasks], self.task_ctrl)
if tfilter:
tasks = tasks.filter(cast_to_str=True, **tfilter)
if rule:
tasks = tasks.in_rule(rule)
if uses_file:
uses_file = Path(uses_file).absolute()
tasks = [t for t in tasks if uses_file in t.inputs.values()]
if produces_file:
produces_file = Path(produces_file).absolute()
tasks = [t for t in tasks if produces_file in t.outputs.values()]
if ancestor_of:
ancestor_of = self.find_task(ancestor_of)
ancestor_tasks = self.ancestors(ancestor_of)
tasks = sorted(ancestor_tasks & set(tasks), key=self.task_ctrl.sorted_tasks.get)
if descendant_of:
descendant_of = self.find_task(descendant_of)
descendant_tasks = self.descendants(descendant_of)
tasks = sorted(descendant_tasks & set(tasks), key=self.task_ctrl.sorted_tasks.get)
if requires_rerun:
tasks = [t for t in tasks
if self.task_ctrl.statuses.task_status(t) in ['pending', 'remaining']]
return TaskQuerySet(tasks, self.task_ctrl)
def all_descendants(self, tasks):
"""Find all descendants of tasks
:param tasks: tasks to start from
:return: all descendants
"""
descendants = set()
for task in tasks:
if task in descendants:
continue
descendants |= self.descendants(task)
return descendants
def all_ancestors(self, tasks):
"""Find all ancestors of tasks
:param tasks: tasks to start from
:return: all ancestors
"""
ancestors = set()
for task in tasks:
if task in ancestors:
continue
ancestors |= self.ancestors(task)
return ancestors
def descendants(self, task):
"""All descendants of a given task.
:param task: task to start from
:return: all descendants
"""
return set(nx.bfs_tree(self.task_ctrl.task_dag, task))
def ancestors(self, task):
"""All ancestors of a given task.
:param task: task to start from
:return: all ancestors
"""
return set(nx.bfs_tree(self.task_ctrl.task_dag, task, reverse=True))
def list_files(self, filetype=None, exists=False,
produced_by_rule=None, used_by_rule=None,
produced_by_task=None, used_by_task=None):
"""List all files subject to criteria.
:param filetype: one of input_only, output_only, input, output, inout
:param exists: whether file exists
:param produced_by_rule: whether file is produced by rule
:param used_by_rule: whether file is used by rule
:param produced_by_task: whether file is produced by task (path hash key)
:param used_by_task: whether file is used by task (path hash key)
:return: all matching files
"""
input_paths = set(self.task_ctrl.input_task_map.keys())
output_paths = set(self.task_ctrl.output_task_map.keys())
input_only_paths = input_paths - output_paths
output_only_paths = output_paths - input_paths
inout_paths = input_paths & output_paths
files = input_paths | output_only_paths
if filetype is None:
files = sorted(files)
elif filetype == 'input_only':
files = sorted(input_only_paths)
elif filetype == 'output_only':
files = sorted(output_only_paths)
elif filetype == 'input':
files = sorted(input_paths)
elif filetype == 'output':
files = sorted(output_paths)
elif filetype == 'inout':
files = sorted(inout_paths)
else:
raise ValueError(f'Unknown filetype: {filetype}')
if exists:
files = [f for f in files if f.exists()]
if used_by_rule:
_files = set()
for f in files:
if f not in self.task_ctrl.input_task_map:
continue
for t in self.task_ctrl.input_task_map[f]:
if t.__class__.__name__ == used_by_rule:
_files.add(f)
files = sorted(_files)
if produced_by_rule:
_files = set()
for f in files:
if f not in self.task_ctrl.output_task_map:
continue
t = self.task_ctrl.output_task_map[f]
if t.__class__.__name__ == produced_by_rule:
_files.add(f)
files = sorted(_files)
if used_by_task:
used_by_task = self.find_task(used_by_task)
_files = set()
for f in files:
if f not in self.task_ctrl.input_task_map:
continue
for t in self.task_ctrl.input_task_map[f]:
if t is used_by_task:
_files.add(f)
files = sorted(_files)
if produced_by_task:
produced_by_task = self.find_task(produced_by_task)
_files = set()
for f in files:
if f not in self.task_ctrl.output_task_map:
continue
t = self.task_ctrl.output_task_map[f]
if t is produced_by_task:
_files.add(f)
files = sorted(_files)
filelist = []
for file in files:
if file in input_only_paths:
ftype = 'input-only'
elif file in output_only_paths:
ftype = 'output-only'
elif file in inout_paths:
ftype = 'inout'
filelist.append((file, ftype, file.exists()))
return filelist
def task_info(self, task_path_hash_keys):
"""Task info for all given tasks.
:param task_path_hash_keys: task hash keys for tasks
:return: dict containing all info
"""
assert self.finalized
info = {}
tasks = self.find_tasks(task_path_hash_keys)
for task_path_hash_key, task in zip(task_path_hash_keys, tasks):
task_md = self.task_ctrl.metadata_manager.task_metadata_map[task]
status = self.task_ctrl.statuses.task_status(task)
info[task_path_hash_key] = (task, task_md, status)
return info
def file_info(self, filenames):
"""File info for all given files.
:param filenames: filenames to get info for
:return: dict containing all info
"""
info = {}
for filepath in (Path(fn).absolute() for fn in filenames):
if filepath in self.task_ctrl.input_task_map:
used_by_tasks = self.task_ctrl.input_task_map[filepath]
else:
used_by_tasks = []
if filepath in self.task_ctrl.output_task_map:
produced_by_task = self.task_ctrl.output_task_map[filepath]
else:
produced_by_task = None
if used_by_tasks or produced_by_task:
path_md = self.task_ctrl.metadata_manager.path_metadata_map[filepath]
else:
path_md = None
info[filepath] = path_md, produced_by_task, used_by_tasks
return info
@property
def finalized(self):
"""Is this finalized (i.e. ready to be run)?"""
return self.task_ctrl.finalized
def reset(self):
"""Reset the internal state"""
self.task_ctrl.reset()
return self
def finalize(self):
"""Finalize this Remake object"""
self.task_ctrl.finalize()
Remake.current_remake[multiprocessing.current_process().name] = None
return self
|
python
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Eric Bidelman <ebidel@>'
import os
import sys
import jinja2
import webapp2
import http2push as http2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__))
)
# TODO(ericbidelman): investigate + remove
# ATM, this is necessary to only add the vulcanized bundle URL when it's actually
# being requested by the browser. There appears to be a bug in GAE s.t. other
# files won't be pushed if one of the URLs is never requested by the browser.
# by the browser.
def fixup_for_vulcanize(vulcanize, urls):
"""Replaces element.html URL with a vulcanized version or
elements.vulcanize.html with the unvulcanized version.
Args:
vulcanize: True if the URL should be replaced by the vulcanized version.
urls: A dict of url: priority mappings.
Returns:
An update dict of URL mappings.
"""
# TODO: don't hardcode adding the vulcanized import bundle.
UNVULCANIZED_FILE = 'elements.html'
VULCANIZED_FILE = 'elements.vulcanize.html'
push_urls = {}
for k,v in urls.iteritems():
url = k
if vulcanize is not None:
if k.endswith(UNVULCANIZED_FILE):
url = k.replace(UNVULCANIZED_FILE, VULCANIZED_FILE)
else:
if k.endswith(VULCANIZED_FILE):
url = k.replace(VULCANIZED_FILE, UNVULCANIZED_FILE)
push_urls[url] = v
return push_urls
class MainHandler(http2.PushHandler):
def get(self):
vulcanize = self.request.get('vulcanize', None)
# TODO: Remove (see above).
push_urls = self.push_urls;
noextras = self.request.get('noextras', None)
if noextras is not None:
push_urls = fixup_for_vulcanize(vulcanize, self.push_urls)
# HTTP2 server push resources?
if self.request.get('nopush', None) is None:
# Send Link: <URL>; rel=preload header.
header = self._generate_link_preload_headers(push_urls)
self.response.headers.add_header('Link', header)
template = JINJA_ENVIRONMENT.get_template('static/index.html')
return self.response.write(template.render({
'vulcanize': vulcanize is not None
}))
# # Example - decorators.
# class MainHandler(http2.PushHandler):
# @http2.push('push_manifest.json')
# def get(self):
# vulcanize = self.request.get('vulcanize', None)
# # TODO: Remove (see above).
# fixup_for_vulcanize(vulcanize, self.push_urls)
# path = os.path.join(os.path.dirname(__file__), 'static/index.html')
# return self.response.write(template.render(path, {
# 'vulcanize': vulcanize is not None
# }))
app = webapp2.WSGIApplication([
('/', MainHandler),
], debug=True)
|
python
|
# Copyright (c) 2012 Qumulo, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import qumulo.lib.request as request
V1_SETTINGS_FIELDS = set((
'assigned_by',
'ip_ranges',
'floating_ip_ranges',
'netmask',
'gateway',
'dns_servers',
'dns_search_domains',
'mtu',
'bonding_mode'
))
@request.request
def get_cluster_network_config(conninfo, credentials):
method = "GET"
uri = "/v1/network/settings"
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def modify_cluster_network_config(conninfo, credentials, **kwargs):
method = "PATCH"
uri = "/v1/network/settings"
config = { }
for key, value in kwargs.items():
assert key in V1_SETTINGS_FIELDS
if value is not None:
config[key] = value
if set(kwargs.keys()) == V1_SETTINGS_FIELDS:
method = "PUT"
return request.rest_request(conninfo, credentials, method, uri, body=config)
@request.request
def list_network_status(conninfo, credentials):
method = "GET"
uri = "/v1/network/status/"
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def get_network_status(conninfo, credentials, node):
method = "GET"
uri = "/v1/network/status/{}".format(node)
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def list_interfaces(conninfo, credentials):
method = "GET"
uri = "/v2/network/interfaces/"
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def get_interface(conninfo, credentials, interface_id):
method = "GET"
uri = "/v2/network/interfaces/{}".format(interface_id)
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def list_networks(conninfo, credentials, interface_id):
method = "GET"
uri = "/v2/network/interfaces/{}/networks/".format(interface_id)
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def get_network(conninfo, credentials, interface_id, network_id):
method = "GET"
uri = "/v2/network/interfaces/{}/networks/{}".format(
interface_id, network_id)
return request.rest_request(conninfo, credentials, method, uri)
# Don't allow setting interface ID and name.
V2_INTERFACE_FIELDS = set((
'bonding_mode',
'default_gateway',
'mtu',
))
@request.request
def modify_interface(conninfo, credentials, interface_id, **kwargs):
# Always patch and don't allow setting interface ID and name.
method = "PATCH"
uri = "/v2/network/interfaces/{}".format(interface_id)
config = { }
for key, value in kwargs.items():
assert key in V2_INTERFACE_FIELDS
if value is not None:
config[key] = value
if set(config.keys()) == V2_INTERFACE_FIELDS:
method = "PUT"
return request.rest_request(conninfo, credentials, method, uri, body=config)
V2_NETWORK_FIELDS = set((
'assigned_by',
'ip_ranges',
'floating_ip_ranges',
'netmask',
'dns_servers',
'dns_search_domains',
'mtu',
'vlan_id',
))
@request.request
def modify_network(conninfo, credentials, interface_id, network_id, **kwargs):
method = "PATCH"
uri = "/v2/network/interfaces/{}/networks/{}".format(
interface_id, network_id)
config = {}
for key, value in kwargs.items():
assert key in V2_NETWORK_FIELDS
if value is not None:
config[key] = value
if set(config.keys()) == V2_NETWORK_FIELDS:
method = "PUT"
return request.rest_request(conninfo, credentials, method, uri, body=config)
@request.request
def list_network_status_v2(conninfo, credentials, interface_id):
method = "GET"
uri = "/v2/network/interfaces/{}/status/".format(interface_id)
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def get_network_status_v2(conninfo, credentials, interface_id, node_id):
method = "GET"
uri = "/v2/network/interfaces/{}/status/{}".format(interface_id, node_id)
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def get_static_ip_allocation(conninfo, credentials,
try_ranges=None, try_netmask=None, try_floating_ranges=None):
method = "GET"
uri = "/v1/network/static-ip-allocation"
query_params = []
if try_ranges:
query_params.append("try={}".format(try_ranges))
if try_netmask:
query_params.append("netmask={}".format(try_netmask))
if try_floating_ranges:
query_params.append("floating={}".format(try_floating_ranges))
if query_params:
uri = uri + "?" + "&".join(query_params)
return request.rest_request(conninfo, credentials, method, uri)
@request.request
def get_floating_ip_allocation(conninfo, credentials):
method = "GET"
uri = "/v1/network/floating-ip-allocation"
return request.rest_request(conninfo, credentials, method, uri)
|
python
|
GRAPH_ENDPOINT = 'https://graph.facebook.com/v10.0/'
INSIGHTS_CSV = 'insta_insights.csv'
HTML_FILE = 'index.html'
CSS_FILE = 'style.css'
HTML_TEMPLATE = 'index.html.template'
ID_COL = 'id'
IMPRESSIONS_COL = 'impressions'
ENGAGEMENT_COL = 'engagement'
REACH_COL = 'reach'
TIMESTAMP_COL = 'timestamp'
HOUR_COL = 'hour'
DEFAULT_COLUMNS = [ID_COL, IMPRESSIONS_COL, ENGAGEMENT_COL, REACH_COL, TIMESTAMP_COL, HOUR_COL]
METRICS_COLUMNS = [IMPRESSIONS_COL, ENGAGEMENT_COL, REACH_COL]
|
python
|
"""Module with git related utilities."""
import git
class GitRepoVersionInfo:
"""
Provides application versions information based on the tags and commits in the repo
"""
def __init__(self, path: str):
"""
Create an instance of GitRepoVersionInfo
:param path: The path to search for git information. It searches for '.git' in this folder or any parent
folder.
"""
self._is_repo = False
try:
self._repo = git.Repo(path, search_parent_directories=True)
self._is_repo = True
except git.exc.InvalidGitRepositoryError:
self._repo = None
@property
def is_git_repo(self) -> bool:
"""
Checks if the path given in constructor is a sub-path of a valid git repo.
:return: Boolean true, if repo was found.
"""
return self._is_repo
def get_git_version(self, strip_v_in_version: bool = True) -> str:
"""
Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string
"""
if not self._is_repo:
raise git.exc.InvalidGitRepositoryError()
tags = sorted(self._repo.tags, key=lambda t: t.commit.committed_date)
latest_tag = None if len(tags) == 0 else tags[-1]
ver = "0.0.0" if latest_tag is None else latest_tag.name
if strip_v_in_version and ver.startswith("v"):
txt_ver = ver.lstrip("v")
txt_ver = txt_ver.lstrip("-_.")
else:
txt_ver = ver
sha = self._repo.head.commit.hexsha
if latest_tag is not None and sha == latest_tag.commit.hexsha:
return txt_ver
return f"{txt_ver}-{sha}"
|
python
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.views.generic.list_detail import object_list
from django.views.generic.create_update import *
from app.models import Carrier
def index(request):
return object_list(request, Carrier.all().order('-listed'))
def create_carrier(request):
return create_object(request, Carrier, post_save_redirect='/admin')
def update_carrier(request, id):
return update_object(request, Carrier, object_id=id, post_save_redirect='/admin')
def delete_carrier(request, id):
return delete_object(request, Carrier, object_id=id, post_delete_redirect='/admin')
|
python
|
import tweepy
import requests
import json
import os
import sys
import random
import datetime
from time import sleep
# Import relevant files
import twitter
import retquote as rq
import tweetq as tq
#Insert tweet in database
def insert_tweet(tweet,client):
"""
Inserting tweets in a different collection just for logging purposes
can be skipped if wanted.
"""
db = client.tweetbot
coll = db.tweets
twin = {
"tweetid": tweet.id,
"tweetText": tweet.full_text,
"createdDate": tweet.created_at}
coll.insert_one(twin)
print(f"{rq.current_time()}Successfully inserted in secondary collection!")
#Get quote from API and parse and format it
def getquote():
URL = "https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=json"
raw = requests.get(url=URL)
if raw.status_code != 200:
print(f"{rq.current_time()}Cannot get the quote (HTTP {raw.status_code}): {raw.text}\nAPI may be down!")
sleep(120)
return getquote()
try:
quote = json.loads(raw.text.replace("\\",""))
except Exception as e:
print(f"{rq.current_time()}\n{raw.text}\nException:\n{e}\nRetrying again...")
sleep(5)
return getquote()
if quote["quoteText"].strip()=="":
sleep(5)
return getquote()
if quote["quoteAuthor"].strip()=="":
quote["quoteAuthor"] = "Unknown"
author = "–"+quote["quoteAuthor"].strip()
# author= textmanup(author,typem="bold")
tweettopublish=quote["quoteText"].strip()+"\n"+author
print(f"{rq.current_time()}Returned quote from API-:\n{tweettopublish}")
return tweettopublish
# Follow back every user
def follow_followers(api):
"""
Check the followers list and see if the user is being
followed by the bot if not follow the user.
"""
for follower in tweepy.Cursor(api.followers).items():
if not follower.following:
print(f"{rq.current_time()}Following {follower.name}")
follower.follow()
# Make the text bold, italic or bolditalic
def textmanup(input_text,typem="bold"):
"""
Twitter does not support formatting the text so to format the text
we can use certain unicode chars and replace the letters with these unicode chars
for getting the desired result but these unicode chars may increase the length of the text.
"""
chars = "QWERTYUIOPASDFGHJKLZXCVBNMqwertyuiopasdfghjklzxcvbnm0123456789"
bold_chars = "𝗤𝗪𝗘𝗥𝗧𝗬𝗨𝗜𝗢𝗣𝗔𝗦𝗗𝗙𝗚𝗛𝗝𝗞𝗟𝗭𝗫𝗖𝗩𝗕𝗡𝗠𝗾𝘄𝗲𝗿𝘁𝘆𝘂𝗶𝗼𝗽𝗮𝘀𝗱𝗳𝗴𝗵𝗷𝗸𝗹𝘇𝘅𝗰𝘃𝗯𝗻𝗺𝟬𝟭𝟮𝟯𝟰𝟱𝟲𝟳𝟴𝟵"
itlaics_chars ="𝘘𝘞𝘌𝘙𝘛𝘠𝘜𝘐𝘖𝘗𝘈𝘚𝘋𝘍𝘎𝘏𝘑𝘒𝘓𝘡𝘟𝘊𝘝𝘉𝘕𝘔𝘲𝘸𝘦𝘳𝘵𝘺𝘶𝘪𝘰𝘱𝘢𝘴𝘥𝘧𝘨𝘩𝘫𝘬𝘭𝘻𝘹𝘤𝘷𝘣𝘯𝘮0123456789"
bold_italics_chars = "𝙌𝙒𝙀𝙍𝙏𝙔𝙐𝙄𝙊𝙋𝘼𝙎𝘿𝙁𝙂𝙃𝙅𝙆𝙇𝙕𝙓𝘾𝙑𝘽𝙉𝙈𝙦𝙬𝙚𝙧𝙩𝙮𝙪𝙞𝙤𝙥𝙖𝙨𝙙𝙛𝙜𝙝𝙟𝙠𝙡𝙯𝙭𝙘𝙫𝙗𝙣𝙢0123456789"
output = ""
for character in input_text:
if character in chars:
if typem=="bold":
output += bold_chars[chars.index(character)]
elif typem=="italic":
output += itlaics_chars[chars.index(character)]
elif typem=="bolditalic":
output += bold_italics_chars[chars.index(character)]
else:
output += character
return output
def main():
"""
Connect with the twitter API.
Connect with the Mongo Atlas Instance when using db for getting quotes.
Load the log files from the replit db when logging is on and platform is replit.
Replit db only stores the log files as they cannot be stored directly
in the filesystem as it is not persistent.
Execute keep_alive for showing twitter profile and displaying logs
The keep_alive function along with Uptime Robot helps keep the replit running.
See https://bit.ly/3h5ZS09 for more detials!
If logging is on then redirect the default stdout to a log file,
according to the day and delete any log files older than 14 days.
After tweeting insert the tweets in a secondary collection
in the db and print/log those tweets.
Reset the stdout to default if previously changed.
Update the current day's log/key in replit db and sleep.
"""
try:
api= twitter.create_api()
if os.environ.get("quote_method","db"=="db"):
from pymongo import MongoClient
client = MongoClient(os.environ.get("database_uri"))
if os.environ.get("platform_type","local")=="replit":
from keep_alive import keep_alive
keep_alive()
if os.environ.get("logging","off")=="on":
import saveindb as sdb
sdb.load_files()
except Exception as e:
print(f"{rq.current_time()}Exception encountered in connecting with Database or Twitter.Check the credentials again!\n{rq.current_time()}{e}")
sys.exit()
# Keep tweeting every hour until forever
while True:
del_old = False
if os.environ.get("logging","off")=="on":
log_date = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=5,minutes=30))) # Get date for IST
old_log = f"Logs/log_{(log_date-datetime.timedelta(days=15)).strftime('%d_%m_%y')}.txt"
curr_log = f"Logs/log_{log_date.strftime('%d_%m_%y')}.txt"
sys.stdout = open(curr_log,"a",encoding="utf-8")
if os.path.isfile(old_log):
os.remove(old_log)
if os.environ.get("platform_type","local")=="replit":
sdb.clean_db(old_log)
del_old = True
print(f"\n{rq.current_time()}New tweet session!")
if del_old:
print(f"{rq.current_time()}Removed old log! {old_log}")
try:
follow_followers(api)
except tweepy.TweepError:
pass
try:
if os.environ.get("quote_method","db")=="db":
quote = rq.main(client)
tweet,t2 = tq.tweet_quote(api,quote)
else:
quote = getquote()
tweet,t2 = tq.tweet_dbdown(api,quote)
except Exception as e:
print(f"{rq.current_time()}Problem getting Quote! DB may be down. Using API for Quote.\n{rq.current_time()}{e}")
quote = getquote()
tweet,t2 = tq.tweet_dbdown(api,quote)
try:
if os.environ.get("quote_method","db")=="db":
insert_tweet(tweet,client)
print(f"{rq.current_time()}Tweet Sent-:\nTweetId: {tweet.id}\n{tweet.full_text}")
if t2 is not None:
if os.environ.get("quote_method","db")=="db":
insert_tweet(t2,client)
print(f"{rq.current_time()}2nd Tweet Sent-:\nTweetId: {t2.id}\n{t2.full_text}")
except Exception as e:
print(f"{rq.current_time()}Error inserting in tweet collections!\n{rq.current_time()}{e}")
sleep_time = random.randint(60*52, 60*58)
if os.environ.get("logging","off")=="on":
sys.stdout.flush()
os.close(sys.stdout.fileno())
sys.stdout = sys.__stdout__
if os.environ.get("platform_type","local")=="replit":
sdb.save_file(curr_log)
sleep(sleep_time)
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 22:14:51 2021
@author: Allectus
"""
import os
import re
import copy
import pandas as pd
import tkinter as tk
import plotly.io as pio
import plotly.express as px
from tkinter import filedialog
from lxml import etree
#==============================================================================
def parse_asset_file(xmlfile, taglist, convert=True, collapse_diffs=True):
#Parses X4:Foundations asset xml files
#
#xmlfile: file path to desired input asset file
#taglist: XML asset property tag to collect attributes for
#convert: If True attributes will be converted to floats
xtree = etree.parse(xmlfile)
result = {}
for attr in taglist:
attr_element = xtree.find('//' + str(attr))
if attr_element is not None:
attr_path = xtree.getpath(attr_element)
if collapse_diffs:
attr_path = re.sub(r'/diff/(replace|add)', '', attr_path)
if attr_element is None:
attr_dict = {}
else:
attr_dict = {str(attr_path) + '/' + str(k):v for k,v in attr_element.attrib.items()}
if convert:
attr_dict = {k:float(v) for k,v in attr_dict.items()}
else:
attr_dict = {}
result.update(attr_dict)
return(result)
#------------------------------------------------------------------------------
def export_asset_xml_diff(outfilepath, attributes):
#Exports X4:Foundations asset diff xml files
#
#outfilepath: file path to desired output file
#attributes: dict of xpath:value to be exported in the diff file
attributes
outstr = '\n'.join(['<?xml version="1.0" encoding="utf-8"?>',
'<diff>',
' <replace sel="' +
'\n <replace sel="'.join([str(xpath)[:str(xpath).rfind('/') + 1] + '@' +
str(xpath)[str(xpath).rfind('/') + 1:] + '">' +
str(round(val,2)) + '</replace>'
for xpath,val in attributes.items()]),
'</diff>'])
os.makedirs(os.path.dirname(outfilepath), exist_ok=True)
with open(outfilepath, 'w') as outfile:
outfile.write(outstr)
return(True)
#------------------------------------------------------------------------------
def parse_resources(resources, asset_path, file_pattern, taglist):
#Collects and parses relevant X4:Foundations asset files based upon input filters
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
loc_resources = copy.deepcopy(resources)
#Find game files
loc_resources['assetdir'] = loc_resources.root.apply(lambda x: os.path.join(x, asset_path))
loc_resources['filelist'] = loc_resources.assetdir.apply(os.listdir)
loc_resources = loc_resources.explode('filelist', ignore_index=True)
#Filter out unwanted files (only keep appropriate xml files)
loc_resources.rename(columns={'filelist':'basefilename'}, inplace=True)
loc_resources['keep'] = loc_resources.basefilename.apply(lambda x: os.path.splitext(x)[1] == '.xml') & loc_resources.basefilename.str.contains(file_pattern)
loc_resources = loc_resources[loc_resources.keep].reset_index(drop=True)
loc_resources = loc_resources.drop('keep', axis=1)
loc_resources['fullpath'] = loc_resources.apply(lambda x: os.path.join(x['assetdir'], x['basefilename']), axis=1)
#Parse the discovered files
loc_resources = pd.concat([loc_resources, pd.DataFrame(list(loc_resources['fullpath'].apply(
lambda x: parse_asset_file(x, taglist=taglist, convert=True, collapse_diffs=True))))], axis=1)
return(loc_resources)
#------------------------------------------------------------------------------
def update_shields(resources, asset_path = 'assets/props/SurfaceElements/macros',
file_pattern=r'^shield.*', taglist = ['recharge']):
#Identifies and modified X4: Foundations shield files
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
shield_resources = parse_resources(resources=resources, asset_path=asset_path,
file_pattern=file_pattern, taglist=taglist)
#capture owner/size/type from filename
shield_metadata = shield_resources.basefilename.str.extract(r'(shield_)(.*)(_)(s|m|l|xl)(_)(.*)(_.*)(mk.)(.*)', expand=True)
shield_metadata = shield_metadata.rename(columns={1:'race', 3:'size', 5:'type', 7:'mk'})
shield_resources = pd.concat([shield_resources, shield_metadata[['race', 'size', 'type', 'mk']]], axis=1)
#colname look up table (to retain xpath in colname so we dont have to reshape to long format)
#gives 'tag_attrib': xpath
modified_cols = {}
cnm_init = {}
for tag in taglist:
colpattern = r'.*(/' + str(tag) + r'/).*'
cnm_init.update({str(tag)+'_'+str(c)[str(c).rfind('/')+1:] :c for c in shield_resources.columns if re.match(colpattern, c)})
vro_results = shield_resources[(shield_resources['source'] == 'vro')].reset_index()
base_results = shield_resources[(shield_resources['source'] == 'base')].reset_index()
modified = pd.merge(vro_results, base_results, how='left', on=['race', 'size', 'type', 'mk'], suffixes=['_vro', '_base'])
#update colname map
cnm = copy.deepcopy(cnm_init)
cnm.update({str(k)+'_base':str(v)+'_base' for k, v in cnm_init.items()})
cnm.update({str(k)+'_vro':str(v)+'_vro' for k, v in cnm_init.items()})
#modify values
max_factors = modified.groupby(['size', 'mk']).apply(lambda x: (x[cnm['recharge_max_vro']] / x[cnm['recharge_max_base']]).mean()).reset_index()
max_factors.rename(columns={0:'max_factor'}, inplace=True)
modified = modified.merge(max_factors, how='left', on=['size', 'mk'])
modified[cnm['recharge_max']] = modified[cnm['recharge_max_base']] * modified['max_factor']
modified.loc[(modified['race'].isin(['kha'])) | (modified[cnm['recharge_max']].isna()), cnm['recharge_max']] = modified[cnm['recharge_max_vro']]
modified_cols.update({'recharge_max': cnm['recharge_max']})
modified[cnm['recharge_delay']] = modified[cnm['recharge_delay_base']] * (3/2)
modified.loc[(modified['race'].isin(['kha'])) | (~modified['size'].isin(['s'])) | (modified[cnm['recharge_delay']].isna()), cnm['recharge_delay']] = modified[cnm['recharge_delay_vro']]
modified_cols.update({'recharge_delay': cnm['recharge_delay']})
recharge_factors = modified.groupby(['size', 'mk']).apply(lambda x: (x[cnm['recharge_rate_vro']] / x[cnm['recharge_rate_base']]).mean()).reset_index()
recharge_factors.rename(columns={0:'recharge_factor'}, inplace=True)
modified = modified.merge(recharge_factors, how='left', on=['size', 'mk'])
modified[cnm['recharge_rate']] = modified[cnm['recharge_rate_base']] * modified['recharge_factor']
modified.loc[modified['size'].isin(['s']), cnm['recharge_rate']] = modified[cnm['recharge_rate_base']] * 0.9
modified.loc[modified['size'].isin(['m']), cnm['recharge_rate']] = modified[cnm['recharge_rate_base']] * modified['recharge_factor'] * 1.25
modified.loc[(modified['race'].isin(['kha'])) | (modified[cnm['recharge_rate']].isna()), cnm['recharge_rate']] = modified[cnm['recharge_rate_vro']]
modified_cols.update({'recharge_rate':cnm['recharge_rate']})
return(modified, modified_cols)
#------------------------------------------------------------------------------
def update_engines(resources, asset_path = 'assets/props/Engines/macros',
file_pattern=r'^engine.*', taglist = ['thrust', 'boost', 'travel']):
#Identifies and modified X4: Foundations engine files
#
#resources: pd.DataFrame of available unpacked input directories, contains resources root
#asset_path: path to relevant directory for the specific asset, relative to resource root
#file_pattern: regex pattern to id files in asset path to retain
#taglist: tags to extract from the identied input files
engine_resources = parse_resources(resources=resources, asset_path=asset_path,
file_pattern=file_pattern, taglist=taglist)
#capture owner/size/type from filename
engine_metadata = engine_resources.basefilename.str.extract(r'(engine_)(.*)(_)(s|m|l|xl)(_)(.*)(_.*)(mk.)(.*)', expand=True)
engine_metadata = engine_metadata.rename(columns={1:'race', 3:'size', 5:'type', 7:'mk'})
engine_resources = pd.concat([engine_resources, engine_metadata[['race', 'size', 'type', 'mk']]], axis=1)
#colname look up table (to retain xpath in colname so we dont have to reshape to long format)
#gives 'tag_attrib': xpath
modified_cols = {}
cnm_init = {}
for tag in taglist:
colpattern = r'.*(/' + str(tag) + r'/).*'
cnm_init.update({str(tag)+'_'+str(c)[str(c).rfind('/')+1:] :c for c in engine_resources.columns if re.match(colpattern, c)})
#Further filter observations to only those with travel stats (eliminate thrusters etc)
engine_resources = engine_resources[~engine_resources[cnm_init['travel_thrust']].isna()].reset_index(drop=True)
engine_resources['eff_boost_thrust'] = engine_resources[cnm_init['thrust_forward']] * engine_resources[cnm_init['boost_thrust']]
engine_resources['eff_travel_thrust'] = engine_resources[cnm_init['thrust_forward']] * engine_resources[cnm_init['travel_thrust']]
vro_results = engine_resources[(engine_resources['source'] == 'vro')].reset_index()
base_results = engine_resources[(engine_resources['source'] == 'base')].reset_index()
modified = pd.merge(vro_results, base_results, how='left', on=['race', 'size', 'type', 'mk'], suffixes=['_vro', '_base'])
#update colname map
cnm = copy.deepcopy(cnm_init)
cnm.update({str(k)+'_base':str(v)+'_base' for k, v in cnm_init.items()})
cnm.update({str(k)+'_vro':str(v)+'_vro' for k, v in cnm_init.items()})
#modify values
#Calculate average conversion factors for vro <-> base thrust to allow us to normalize new engines
thrust_factors = modified.groupby(['size', 'mk', 'type']).apply(lambda x: (x[cnm['thrust_forward_vro']] / x[cnm['thrust_forward_base']]).mean()).reset_index()
thrust_factors.rename(columns={0:'thrust_factor'}, inplace=True)
modified = modified.merge(thrust_factors, how='left', on=['size', 'mk', 'type'])
attack_factors = modified.groupby(['size', 'mk', 'type']).apply(lambda x: (x[cnm['travel_attack_vro']] / x[cnm['travel_attack_base']]).mean()).reset_index()
attack_factors.rename(columns={0:'attack_factor'}, inplace=True)
modified = modified.merge(attack_factors, how='left', on=['size', 'mk', 'type'])
#Calculate effective normalized thrust values
modified['thrust_forward_pre'] = modified[cnm['thrust_forward_vro']]
modified['boost_thrust_pre'] = modified['eff_boost_thrust_base'] / modified['thrust_forward_pre']
modified.loc[modified['boost_thrust_pre'].isna(), 'boost_thrust_pre'] = modified['eff_boost_thrust_vro'] / ( modified[cnm['thrust_forward_vro']] / modified['thrust_factor'])
modified['travel_thrust_pre'] = modified['eff_travel_thrust_base'] / modified['thrust_forward_pre']
modified.loc[modified['travel_thrust_pre'].isna(), 'travel_thrust_pre'] = modified['eff_travel_thrust_vro'] / ( modified[cnm['thrust_forward_vro']] / modified['thrust_factor'])
modified['eff_boost_thrust_pre'] = modified['thrust_forward_pre'] * modified['boost_thrust_pre']
modified['eff_travel_thrust_pre'] = modified['thrust_forward_pre'] * modified['travel_thrust_pre']
#Create initial boost and travel thrust rankings so we can match them later
modified.sort_values(['size', 'thrust_forward_pre'], inplace=True)
modified['travel_rank'] = modified.groupby('size')['eff_travel_thrust_pre'].rank(axis=1, ascending=False, method='first')
modified['boost_rank'] = modified.groupby('size')['eff_boost_thrust_pre'].rank(axis=1, ascending=False, method='first')
modified = pd.merge(modified, modified, left_on=['size', 'travel_rank'], right_on=['size', 'boost_rank'], suffixes=['_original', '_ranked'] )
#update name mapping
cnm.update({str(k)+'_base_original':str(v)+'_base_original' for k, v in cnm_init.items()})
cnm.update({str(k)+'_base_ranked':str(v)+'_base_ranked' for k, v in cnm_init.items()})
cnm.update({str(k)+'_vro_original':str(v)+'_vro_original' for k, v in cnm_init.items()})
cnm.update({str(k)+'_vro_ranked':str(v)+'_vro_ranked' for k, v in cnm_init.items()})
#-------------------------------------------------------------------------
#calculate final engine params based upon relative base boost and travel rank
modified[cnm['thrust_forward']] = modified[cnm['thrust_forward_vro_original']]
modified_cols.update({'thrust_forward': cnm['thrust_forward']})
modified[cnm['thrust_reverse']] = modified[cnm['thrust_reverse_base_original']] * modified['thrust_factor_original']
modified.loc[modified[cnm['thrust_reverse']].isna(), cnm['thrust_reverse']] = modified[cnm['thrust_reverse_vro_original']]
modified_cols.update({'thrust_reverse': cnm['thrust_reverse']})
modified['eff_boost_thrust'] = modified['eff_boost_thrust_pre_original']
modified[cnm['boost_thrust']] = modified['eff_boost_thrust'] / modified[cnm['thrust_forward']]
modified_cols.update({'boost_thrust': cnm['boost_thrust']})
modified[cnm['boost_duration']] = modified[cnm['boost_duration_base_original']]
modified.loc[modified[cnm['boost_duration']].isna(), cnm['boost_duration']] = modified[cnm['boost_duration_vro_original']] / modified['attack_factor_original']
modified_cols.update({'boost_duration': cnm['boost_duration']})
modified[cnm['boost_attack']] = modified[cnm['boost_attack_base_original']]
modified.loc[modified[cnm['boost_attack']].isna(), cnm['boost_attack']] = modified[cnm['boost_attack_vro_original']] / modified['attack_factor_original']
modified_cols.update({'boost_attack': cnm['boost_attack']})
modified[cnm['boost_release']] = modified[cnm['boost_release_base_original']]
modified.loc[modified[cnm['boost_release']].isna(), cnm['boost_release']] = modified[cnm['boost_release_vro_original']] / modified['attack_factor_original']
modified_cols.update({'boost_release': cnm['boost_release']})
modified.loc[(modified['race_original'].isin(['par'])) & (modified['size'].isin(['l', 'xl'])), cnm['boost_duration']] = modified[cnm['boost_duration']] * 2
modified.loc[(modified['race_original'].isin(['spl'])) & (modified['size'].isin(['l', 'xl'])) , cnm['boost_attack']] = modified[cnm['boost_attack']] * 0.5
modified.loc[(modified['race_original'].isin(['spl'])) & (modified['size'].isin(['l', 'xl'])) , cnm['boost_release']] = modified[cnm['boost_release']] * 0.5
modified.loc[(modified['race_original'].isin(['arg', 'tel'])) & (modified['size'].isin(['l', 'xl'])), cnm['boost_duration']] = modified[cnm['boost_duration']] * 1.33
modified.loc[(modified['race_original'].isin(['arg', 'tel'])) & (modified['size'].isin(['l', 'xl'])) , cnm['boost_attack']] = modified[cnm['boost_attack']] * 0.75
modified.loc[(modified['race_original'].isin(['arg', 'tel'])) & (modified['size'].isin(['l', 'xl'])) , cnm['boost_release']] = modified[cnm['boost_release']] * 0.75
modified['eff_travel_thrust'] = modified['eff_travel_thrust_pre_original']
modified.loc[modified['size'].isin(['s', 'm']), 'eff_travel_thrust'] = modified['eff_boost_thrust_pre_ranked']
modified.loc[modified['size'].isin(['l', 'xl']), 'eff_travel_thrust'] = modified['eff_travel_thrust'] * (5/3)
modified[cnm['travel_thrust']] = modified['eff_travel_thrust'] / modified[cnm['thrust_forward']]
modified_cols.update({'travel_thrust': cnm['travel_thrust']})
modified[cnm['travel_charge']] = modified[cnm['travel_charge_base_original']]
modified.loc[modified[cnm['travel_charge']].isna(), cnm['travel_charge']] = modified[cnm['travel_charge_vro_original']] / modified['attack_factor_original']
modified_cols.update({'travel_charge': cnm['travel_charge']})
modified[cnm['travel_attack']] = modified[cnm['travel_attack_base_original']]
modified.loc[modified[cnm['travel_attack']].isna(), cnm['travel_attack']] = modified[cnm['travel_attack_vro_original']] / modified['attack_factor_original']
modified_cols.update({'travel_attack': cnm['travel_attack']})
modified[cnm['travel_release']] = modified[cnm['travel_release_base_original']]
modified.loc[modified[cnm['travel_release']].isna(), cnm['travel_release']] = modified[cnm['travel_release_vro_original']] / modified['attack_factor_original']
modified_cols.update({'travel_release': cnm['travel_release']})
modified.loc[(modified['race_original'].isin(['ter'])) & (modified['size'].isin(['l', 'xl'])), cnm['travel_charge']] = modified[cnm['travel_charge']] * 0.75
return(modified, modified_cols)
#==============================================================================
if __name__ == "__main__":
pd.options.plotting.backend = "plotly"
pio.renderers.default ='browser'
#Params
#Output (mod) directories
sum_outdir = '.'
mod_shields = True
outdir_shields = 'F:/Steam/steamapps/common/X4 Foundations/extensions/al_shieldmod_vro'
mod_engines = True
outdir_engines = 'F:/Steam/steamapps/common/X4 Foundations/extensions/al_travelmod_vro'
#Unpacked root directories
base_root = 'F:/Games/Mods/x4_extracted'
vro_root = 'F:/Games/Mods/x4_extracted/extensions/vro'
#List of expansions to consider
resource_list = ['base', 'split', 'terran', 'vro_base']
#Hardcoded inputs for convenience
resources = pd.DataFrame(resource_list, columns=['resource'])
resources.loc[resources.resource == 'base', 'root'] = base_root
resources.loc[resources.resource == 'split', 'root'] = 'F:/Games/Mods/x4_extracted/extensions/ego_dlc_split'
resources.loc[resources.resource == 'terran', 'root'] = 'F:/Games/Mods/x4_extracted/extensions/ego_dlc_terran'
resources.loc[resources.resource == 'vro_base', 'root'] = vro_root
#Gather inputs for all expansions interactively if not hardcoded above
root = tk.Tk()
root.withdraw()
for grp in resource_list:
missingvals = resources.loc[resources['resource'] == grp, 'root'].isna()
if any(missingvals) or len(missingvals) == 0:
resources.loc[resources['resource'] == grp, 'root'] = filedialog.askdirectory(title=str(grp) + " dir")
#provide paths to vro expansion files given base game expansions identified in resource_list
for grp in resource_list:
vro_grp = 'vro_' + str(grp)
if (grp not in ['base', 'vro_base']) and (vro_grp not in resources.resource.unique()):
resources = resources.append({'resource':vro_grp,
'root':os.path.join(resources.loc[resources.resource=='vro_base', 'root'].values[0],
'extensions',
os.path.split(resources.loc[resources.resource==grp, 'root'].values[0])[1])},
ignore_index=True)
#Set source base vs vro input metadata
resources['source'] = 'base'
resources.loc[resources.resource.str.contains(r'^vro_.*'), 'source'] = 'vro'
#--------------------------------------------------------------------------
#Modify shield parameters
if mod_shields:
modified_shields, modified_shields_colmap = update_shields(resources=resources)
modified_shields['fullpath_final'] = modified_shields['fullpath_vro'].str.replace(vro_root, outdir_shields)
#Export diff files
modified_shields.apply(lambda x: export_asset_xml_diff(outfilepath = x['fullpath_final'],
attributes = x[modified_shields_colmap.values()].to_dict()),
axis=1)
#Validation
shields_fig = px.scatter(modified_shields,
x=modified_shields_colmap['recharge_delay'],
y=modified_shields_colmap['recharge_rate'],
text='basefilename_vro')
shields_fig.update_traces(textposition='top center')
shields_fig.update_layout(
height=800,
title_text='Recharge delay vs rate'
)
shields_fig.show()
shields_fig.write_image(os.path.join(sum_outdir, 'modified_shields.png'))
modified_shields.to_csv(os.path.join(sum_outdir, 'modified_shields.csv'))
#--------------------------------------------------------------------------
#Modify engine parameters
if mod_engines:
modified_engines, modified_engines_colmap = update_engines(resources=resources)
modified_engines['fullpath_final'] = modified_engines['fullpath_vro_original'].str.replace(vro_root, outdir_engines)
#Export diff files
modified_engines.apply(lambda x: export_asset_xml_diff(outfilepath = x['fullpath_final'],
attributes = x[modified_engines_colmap.values()].to_dict()),
axis=1)
#Validation
engines_fig = px.scatter(modified_engines,
x='eff_boost_thrust',
y='eff_travel_thrust',
text='basefilename_vro_original')
engines_fig.update_traces(textposition='top center')
engines_fig.update_layout(
height=800,
title_text='Boost vs travel thrust'
)
engines_fig.show()
engines_fig.write_image(os.path.join(sum_outdir, 'modified_engines.png'))
engines_fig_s = px.scatter(modified_engines[modified_engines['size'].isin(['s'])],
x='eff_boost_thrust',
y='eff_travel_thrust',
text='basefilename_vro_original')
engines_fig_s.update_traces(textposition='top center')
engines_fig_s.update_layout(
height=800,
title_text='Boost vs travel thrust, S ships'
)
engines_fig_s.show()
engines_fig_s.write_image(os.path.join(sum_outdir, 'modified_engines_s.png'))
modified_engines.to_csv(os.path.join(sum_outdir, 'modified_engines.csv'))
#--------------------------------------------------------------------------
|
python
|
import requests
import json
def heapify(n, i, ll = []): #heapify function for heapsort
smallest = i
left = 2*i+1
right = 2*i+2
if left < n and ll[smallest][1] > ll[left][1]:
smallest = left
if right < n and ll[smallest][1] > ll[right][1]:
smallest = right
if i != smallest:
ll[i], ll[smallest] = ll[smallest], ll[i]
heapify(n, smallest, ll)
def heapsort(ll = []): #heapsort function
n = len(ll)
for i in range(n//2-1, -1, -1):
heapify(n, i, ll)
def call_api(apicall, **kwargs): #recursion function to iterate through all the pages recieved.
data = kwargs.get('page', [])
resp = requests.get(apicall)
data += resp.json()
if len(data) > 4000: #Increase this number if repository count is more than 4000
return (data)
if 'next' in resp.links.keys():
return (call_api(resp.links['next']['url'], page=data))
return (data)
n, m, c = 6, 4, 0 # n = Top repos of organization, m = Top contributors of a particular repository
organization = 'microsoft' # organization = The organization we want to get result
s1 = 'https://api.github.com/orgs/'
s2 = '/repos?per_page=100'
api_get_repos = s1+organization+s2 #final api_call = https://api.github.com/orgs/:organization/repos?per_page=100
data = call_api(api_get_repos) #call of recursion function to recieve list of all repository
l = [] #list for storing the result
for xx in data:
if xx["private"] != "false": #omit this statement if we want to list private repository also
x = xx["name"] #name of repository
y = xx["forks"] #forks count of particular repository
if c < n:
l.append((x, y)) #tuple of (repository_name, forks_count)
c += 1
if c == n:
heapsort(l)
elif y > l[0][1]:
l[0] = (x, y)
heapify(c, 0, l)
length_of_repos = len(l)
for xx in range(length_of_repos-1, 0, -1):
l[0], l[xx] = l[xx], l[0]
heapify(xx, 0, l)
print "List of", n, "most popular repositories of", organization, "on the basis of number of forks and their top", m, "contributors are:"
print ""
count = 1
for i in range(0, length_of_repos): #loop to iterate through top repository recieved
s3 = 'https://api.github.com/repos/'
s4 = '/contributors?per_page=100'
top_contributors = s3+organization+'/'+l[i][0]+s4 #final api_call = https://api.github.com/repos/:organization/:repo_name/contributors?per_page=100
print count,"=> Repository_name:", l[i][0]," || Forks_count: ", l[i][1]
count += 1
print " Top", m, "contributors of repository", l[i][0], "are:"
c = 0
contributors_data = requests.get(top_contributors).json() #call to retrieve list of top m contributors of respective repository
for j in contributors_data: #loop all the top contributors of the particular respository
print " ", c+1, "->", "Login_name:", j["login"], " || Contributions: ", j["contributions"]
c += 1;
if c >= m:
break
print ""
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print ""
|
python
|
class JobError(RuntimeError):
def __init__(self, jobId):
message = "Job Failed: " + jobId
super(JobError, self).__init__(message)
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Tuple, Union
import torch
from pytext.config import ConfigBase
from pytext.models.module import create_module
from .bilstm_doc_slot_attention import BiLSTMDocSlotAttention
from .jointcnn_rep import JointCNNRepresentation
from .representation_base import RepresentationBase
from .seq_rep import SeqRepresentation
class ContextualIntentSlotRepresentation(RepresentationBase):
"""
Representation for a contextual intent slot model
The inputs are two embeddings: word level embedding containing dictionary features,
sequence (contexts) level embedding. See following diagram for the representation
implementation that combines the two embeddings. Seq_representation is concatenated
with word_embeddings.
::
+-----------+
| word_embed|--------------------------->+ +--------------------+
+-----------+ | | doc_representation |
+-----------+ +-------------------+ |-->+--------------------+
| seq_embed |-->| seq_representation|--->+ | word_representation|
+-----------+ +-------------------+ +--------------------+
joint_representation
"""
class Config(RepresentationBase.Config):
seq_representation: SeqRepresentation.Config = SeqRepresentation.Config()
joint_representation: Union[
BiLSTMDocSlotAttention.Config, JointCNNRepresentation.Config
] = BiLSTMDocSlotAttention.Config()
def __init__(self, config: Config, embed_dim: Tuple[int, ...]) -> None:
super().__init__(config)
assert len(embed_dim) == 2
self.seq_rep = create_module(config.seq_representation, embed_dim=embed_dim[1])
self.seq_representation_dim = self.seq_rep.representation_dim
self.joint_rep = create_module(
config.joint_representation,
embed_dim=embed_dim[0] + self.seq_representation_dim,
)
self.doc_representation_dim = self.joint_rep.doc_representation_dim
self.word_representation_dim = self.joint_rep.word_representation_dim
def forward(
self,
word_seq_embed: Tuple[torch.Tensor, torch.Tensor],
word_lengths: torch.Tensor,
seq_lengths: torch.Tensor,
*args,
) -> List[torch.Tensor]:
# Every batch is sorted by in descending or of word_lengths.
# We need to sort seq_lengths and seq_embed first before passing
# to seq_rep, then unsort the output of seq_rep so it aligns with batch order
(word_embed, seq_embed) = word_seq_embed
# sort seq_lengths and seq_embed
seq_lengths, sort_idx = torch.sort(seq_lengths, descending=True)
_, unsort_idx = torch.sort(sort_idx)
seq_embed = seq_embed[sort_idx]
seq_rep = self.seq_rep(embedded_seqs=seq_embed, seq_lengths=seq_lengths)
# unsort seq_out
seq_out = seq_rep[0][unsort_idx]
bsz, max_seq_len, dim = word_embed.size()
seq_rep_expand = seq_out.view(bsz, 1, -1).expand(-1, max_seq_len, -1)
new_embed = torch.cat([seq_rep_expand, word_embed], 2)
return self.joint_rep(new_embed, word_lengths)
|
python
|
from collections import namedtuple, defaultdict, Counter
import dbm
from datetime import datetime, timedelta
import json
from wit import Wit
from darksky import forecast
from apis.utils import load_parameter
# time is a unix timestamp
CacheKey = namedtuple('CacheKey', ['lat', 'lon', 'time', 'granularity'])
def cache_key_to_string(ck):
return ','.join(map(str, ck))
class WeatherProvider():
def __init__(self, darksky_key, wit_key):
self.darksky_key = darksky_key
self.wit_key = wit_key
self.wit_client = Wit(self.wit_key)
# Create new database
# TODO: persist cache to and from file OR make this a generic class
# that this can extend
self.cache = {}
def _cache_forecast(self, cache, forecast_result):
# Push current forecast into both hourly and daily forecasts
current = forecast_result.currently
ck = CacheKey(str(forecast_result.latitude), str(
forecast_result.longitude), current.time, 'minute')
cache[ck] = json.dumps(current._data)
ck = CacheKey(str(forecast_result.latitude), str(
forecast_result.longitude), current.time, 'hour')
cache[ck] = json.dumps(current._data)
ck = CacheKey(str(forecast_result.latitude), str(
forecast_result.longitude), current.time, 'day')
cache[ck] = json.dumps(current._data)
if hasattr(forecast_result, 'minutely'):
# include minutely forecast because people ask things like "what
# will the weather be in an hour?"
for minute in forecast_result.minutely:
ck = CacheKey(str(forecast_result.latitude), str(
forecast_result.longitude), minute.time, 'minute')
cache[ck] = json.dumps(minute._data)
for hour in forecast_result.hourly:
ck = CacheKey(str(forecast_result.latitude), str(
forecast_result.longitude), hour.time, 'hour')
cache[ck] = json.dumps(hour._data)
for day in forecast_result.daily:
ck = CacheKey(str(forecast_result.latitude), str(
forecast_result.longitude), day.time, 'day')
cache[ck] = json.dumps(day._data)
def transform_forecast(self, report, units='us'):
'''
Put data in a format ready to be displayed to the user. E.g. 13 -> 13°F, and 0.123 -> 10%
Everything should be logged in a consistent system of units
Takes a python object or DataPoint object
Returns a list of {name, value} objects
'''
try: # convert DataPoint to python object
report = report._data
except AttributeError:
pass
# TODO: units should be a user setting. https://xkcd.com/2292/
if units not in ('us', 'metric'):
raise ValueError('Unrecognized units', units)
# Map machine-readable summary to human-readable summary
icon_converter = defaultdict(lambda: 'unknown',
{
'clear-day': 'clear',
'clear-night': 'clear',
'rain': 'rain',
'snow': 'snow',
'sleet': 'sleet',
'wind': 'wind',
'fog': 'fog',
'cloudy': 'cloudy',
'partly-cloudy-day': 'partly cloudy',
'partly-cloudy-night': 'partly cloudy',
})
relevant_keys = [
{'name': 'icon', 'transform': lambda icon: icon_converter[icon]},
{'name': 'temperature', 'transform_us': '{:.0f}°F'.format,
'transform_metric': lambda t: '{:.0}°C'.format((t - 32) * 5 / 9)},
{'name': 'temperatureLow', 'transform_us': '{:.0f}°F'.format,
'transform_metric': lambda t: '{:.0}°C'.format((t - 32) * 5 / 9)},
{'name': 'temperatureHigh', 'transform_us': '{:.0f}°F'.format,
'transform_metric': lambda t: '{:.0}°C'.format((t - 32) * 5 / 9)},
{'name': 'precipProbability',
'transform': lambda p: '{:.0%}'.format(round(p, 1))},
{'name': 'precipAccumulation', 'transform_us': '{:.1f} in'.format,
'transform_metric': lambda p: '{:.1f} cm'.format(p * 2.54)},
{'name': 'precipTotal', 'transform_us': '{:.1f} in'.format,
'transform_metric': lambda p: '{:.1f} cm'.format(p * 2.54)},
{'name': 'precipIntensity',
'transform_us': lambda p: '{:.2f} in/hr'.format(float(p)),
'transform_metric': lambda p: '{:.1f} cm/hr'.format(p * 2.54)},
{'name': 'precipType'},
{'name': 'humidity',
'transform': lambda h: '{:.0%}'.format(round(h, 1))},
{'name': 'windSpeed', 'transform_us': '{:.1f} mph'.format,
'transform_metric': lambda s: '{:.1f} km/h'.format(s * 1.61)},
{'name': 'uvIndex'},
{'name': 'visibility', 'transform_us': '{:.1f} mi'.format,
'transform_metric': lambda d: '{:.1f} km'.format(d * 1.61)},
]
# transform strings
result = []
for key in relevant_keys:
if key['name'] in report:
key_name = key['name']
if 'transform' in key:
result.append({
'name': key_name,
'value': key['transform'](report[key_name])
})
elif 'transform_us' in key and units == 'us':
result.append({
'name': key_name,
'value': key['transform_us'](report[key_name])
})
elif 'transform_metric' in key and units == 'metric':
result.append({
'name': key_name,
'value': key['transform_metric'](report[key_name])
})
else:
result.append({
'name': key_name,
'value': report[key_name]
})
return result
def current_weather(self, lat, lon):
now = datetime.now()
forecast_result = forecast(self.darksky_key, lat, lon)
current_report = forecast_result.currently._data # dicts
daily_report = forecast_result.daily[0]._data
daily_report.update(current_report)
return self.transform_forecast(daily_report)
def aggregate_forecast(self, fc_range):
'''
fc_range - a list of darksky.data.DataPoint objects or of dicts
returns - a dict
'''
def most_frequent(fc_range, key):
'''return the most frequent value of fc_range[i][key] for all i'''
c = Counter(
block[key] for block in fc_range if hasattr(
block, key)).most_common(1)
if c:
return c[0][0]
else:
return None
def get_mean(fc_range, key):
arr = [getattr(block, key, 0) for block in fc_range]
if not arr:
return None
return sum(arr) / len(arr)
result = {
'icon': most_frequent(fc_range, 'icon'),
'temperatureHigh': max(max(getattr(block, 'temperatureHigh', -1000), getattr(block, 'temperature', -1000)) for block in fc_range),
'temperatureLow': min(min(getattr(block, 'temperatureLow', 1000), getattr(block, 'temperature', 1000)) for block in fc_range),
# this isn't actually how probability works, but good enough
'precipProbability': max(block['precipProbability'] for block in fc_range),
'precipAccumulation': sum(getattr(block, 'precipAccumulation', 0) for block in fc_range),
'precipTotal': sum(getattr(block, 'precipIntensity', 0) for block in fc_range) / float(len(fc_range)) * (fc_range[-1].time - fc_range[0].time) / 60 / 60,
'precipType': most_frequent(fc_range, 'precipType'),
'humidity': get_mean(fc_range, 'humidity'),
'windSpeed': get_mean(fc_range, 'windSpeed'),
'visibility': get_mean(fc_range, 'visibility'),
'uvIndex': get_mean(fc_range, 'uvIndex')
}
# Delete keys with unset values
result = {k: v for k, v in result.items() if v is not None}
return result
def weather_at_time(self, lat, lon, time_query):
'''
Fetch the weather at a specified location and time (between now and 7 days from now).
The future limit is because Darksky's forecast only extends that far.
It doesn't support times in the past because 1) it's not a common use case, and 2) implementing it for ranges of >1 day (e.g. last week) would require making multiple requests and more code
If desired, look at the time='whatever' parameter in darkskylib
lat, lon - floats
time_query - a string with a relative time. Something like 'tonight at 8' is fine
note: you should ensure that wit.ai app's timezone is set to GMT
darksky returns forecasts in local times without a timezone
'''
# find date/time with wit.ai and duckling
response = self.wit_client.message(time_query)['entities']
if not response['datetime']:
raise ValueError('could not parse time, <{}>'.format(time_query))
else:
# assume that time_query only contains one datetime object, but
# duckling is pretty good about handling ranges
time_instance = response['datetime'][0]
# parse a wit.ai datetime object, which should look like:
# {
# 'type': 'value',
# 'grain': 'second' | minute' | 'hour' | 'day' | 'week' | 'year'
# 'value': '2020-01-01T12:30:00.000+00:00'
# }
# OR an interval containing a start and end that each look like the
# above
is_interval = False
if time_instance['type'] == 'interval':
# for queries like "this weekend", the result will be a range.
# just use the start time.
start_dt = datetime.strptime(
time_instance['from']['value'],
"%Y-%m-%dT%H:%M:%S.%f+00:00")
end_dt = datetime.strptime(
time_instance['to']['value'],
"%Y-%m-%dT%H:%M:%S.%f+00:00")
grain = time_instance['from']['grain']
is_interval = True
elif time_instance['type'] == 'value':
start_dt = datetime.strptime(
time_instance['value'],
"%Y-%m-%dT%H:%M:%S.%f+00:00")
grain = time_instance['grain']
if grain == 'week':
end_dt = start_dt + timedelta(days=7)
is_interval = True
else:
raise Exception('unrecognized type', time_instance['type'])
# Get the full forecast that corresponds to the time granularity
fc = forecast(self.darksky_key, lat, lon, extend='hourly')
if grain == 'second' or grain == 'minute':
try:
# minutely forecasts are only available in some parts of the
# world. test if they work
fc_range = fc.minutely
# they also don't always include temperature, so test this too
_temperature_test = fc.minutely[0].temperature
except AttributeError:
fc_range = fc.hourly
grain = 'hour'
elif grain == 'hour':
fc_range = fc.hourly
elif grain == 'day' or grain == 'week' or grain == 'month' or grain == 'year':
fc_range = fc.daily
try:
summary = fc_range.summary
except AttributeError:
summary = fc_range[0].summary
# if we parsed an interval, create an aggregate forecast
if is_interval:
# trim the ends of the range. note: darksky only provides hourly
# forecasts for the next 48 hours, and daily forecasts for the next
# week
fc_filtered_range = [block for block in fc_range if start_dt.timestamp(
) <= block.time <= end_dt.timestamp()]
aggregate_fc = self.aggregate_forecast(fc_filtered_range)
transformed_forecast = self.transform_forecast(aggregate_fc)
return transformed_forecast + [
{'name': 'summary', 'value': summary},
{'name': 'start_date',
'value': start_dt.strftime('%a, %b %d')},
{'name': 'start_time', 'value': start_dt.strftime('%H:%M')},
{'name': 'end_date', 'value': end_dt.strftime('%a, %b %d')},
{'name': 'end_time', 'value': end_dt.strftime('%H:%M')},
{'name': 'grain', 'value': grain},
]
else:
# return the forecast for that time
transformed_forecast = self.transform_forecast(fc.currently)
return transformed_forecast + [
{'name': 'summary', 'value': summary},
{'name': 'date', 'value': start_dt.strftime('%a, %b %d')},
{'name': 'time', 'value': start_dt.strftime('%H:%M')},
{'name': 'grain', 'value': grain},
]
return timestamp
def rate_satisfaction(self):
return []
def __repr__(self):
'''For executable logs to work, `repr` must generate an executable initialization of this class'''
return 'WeatherInterface(keys["darksky"], keys["wit_date"])'
|
python
|
#!/usr/bin/env python3
# Copyright 2019 Christian Henning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@title :test_train.py
@author :ch
@contact :[email protected]
@created :08/13/2019
@version :1.0
@python_version :3.6.8
The major goal of these test cases is to ensure that the performance of the
toy regression does not change while this repo is under developement.
"""
# Do not delete the following import for all executable scripts!
import __init__ # pylint: disable=unused-import
import unittest
import sys
import tempfile
import os
import shutil
import contextlib
import time
from toy_example import train
#from tests. test_utils import nostdout
from tests.test_utils import unittest_verbosity
class TrainTestCase(unittest.TestCase):
def setUp(self):
pass # Nothing to setup.
def test_cl_hnet_setup(self):
"""This method tests whether the CL capabilities of the 3 polynomials
toy regression remain as reported in the readme of the corresponding
folder."""
verbosity_level = unittest_verbosity()
targets = [0.004187723621726036, 0.002387890825048089,
0.006071540527045727]
# Without timestamp, test would get stuck/fail if someone mistakenly
# starts the test case twice.
timestamp = int(time.time() * 1000)
out_dir = os.path.join(tempfile.gettempdir(),
'test_cl_hnet_setup_%d' % timestamp)
my_argv = ['foo', '--no_plots', '--no_cuda', '--beta=0.005',
'--emb_size=2', '--n_iter=4001', '--lr_hyper=1e-2',
'--data_random_seed=42', '--out_dir=%s' % out_dir]
sys.argv = list(my_argv)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
if verbosity_level == 2:
fmse, _, _ = train.run()
else:
#with nostdout():
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
fmse, _, _ = train.run()
shutil.rmtree(out_dir)
self.assertEqual(len(fmse), len(targets))
for i in range(len(fmse)):
self.assertAlmostEqual(fmse[i], targets[i], places=3)
def tearDown(self):
pass # Nothing to clean up.
if __name__ == '__main__':
unittest.main()
|
python
|
"""manage
BaoAI Backend Main File
PROJECT: BaoAI Backend
VERSION: 2.0.0
AUTHOR: henry <[email protected]>
WEBSITE: http://www.baoai.co
COPYRIGHT: Copyright © 2016-2020 广州源宝网络有限公司 Guangzhou Yuanbao Network Co., Ltd. ( http://www.ybao.org )
LICENSE: Apache-2.0
"""
import os
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell, Server, Command
from flask_script.commands import Clean, ShowUrls
from app import create_app, db
# app,celery = create_app()
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
from app.modules.admin.model import Admin
return dict(app=app, db=db, Admin=Admin)
# Get BaoAI version and URL # 获取BaoAI版本及官方URL
@manager.command
def baoai():
print('BaoAI v2.0.0 - http://www.baoai.co')
manager.add_command("runserver", Server(host='0.0.0.0', port=5000))
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand) # Database Manage # 数据库管理
manager.add_command("clean", Clean()) # Clean Cache File # 清理缓存文件
manager.add_command("url", ShowUrls()) # Print All URL # 打印所有URL
if __name__ == "__main__":
manager.run()
|
python
|
r"""
================================================
CLPT (:mod:`compmech.stiffener.models`)
================================================
.. currentmodule:: compmech.stiffener.models
"""
module_names = [
'bladestiff1d_clt_donnell_bardell',
'bladestiff2d_clt_donnell_bardell',
'tstiff2d_clt_donnell_bardell',
]
for module_name in module_names:
exec('from . import {0}'.format(module_name))
|
python
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLBase
from .name import Name
from .sdf import SDF
from .description import Description
from .author import Author
from .version import Version
class Model(XMLBase):
_NAME = 'model'
_TYPE = 'sdf_config'
_CHILDREN_CREATORS = dict(
name=dict(creator=Name, default=['model']),
sdf=dict(creator=SDF, default=['1.5'], n_elems='+', optional=True),
description=dict(creator=Description, default=['none']),
author=dict(creator=Author, n_elems='+', optional=True),
version=dict(creator=Version, default=['0.1.0'], optional=True)
)
def __init__(self):
super(Model, self).__init__()
self.reset()
@property
def name(self):
return self._get_child_element('name')
@name.setter
def name(self, value):
self._add_child_element('name', value)
@property
def version(self):
return self._get_child_element('version')
@version.setter
def version(self, value):
self._add_child_element('version', value)
@property
def description(self):
return self._get_child_element('description')
@description.setter
def description(self, value):
self._add_child_element('description', value)
@property
def sdfs(self):
return self._get_child_element('sdf')
@property
def authors(self):
return self._get_child_element('author')
def add_sdf(self, sdf=None):
if sdf is not None:
self._add_child_element('sdf', sdf)
else:
sdf = SDF()
self._add_child_element('sdf', sdf)
def add_author(self, author=None):
if author is not None:
self._add_child_element('author', author)
else:
author = Author()
self._add_child_element('author', author)
|
python
|
"""This module contains the general information for SuggestedStorageControllerSecurityKey ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class SuggestedStorageControllerSecurityKeyConsts:
pass
class SuggestedStorageControllerSecurityKey(ManagedObject):
"""This is SuggestedStorageControllerSecurityKey class."""
consts = SuggestedStorageControllerSecurityKeyConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("SuggestedStorageControllerSecurityKey", "suggestedStorageControllerSecurityKey", "suggested-sec-key", VersionMeta.Version209c, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'storageController'], [], ["Get"]),
"modular": MoMeta("SuggestedStorageControllerSecurityKey", "suggestedStorageControllerSecurityKey", "suggested-sec-key", VersionMeta.Version303a, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'storageController'], [], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version209c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"suggested_security_key": MoPropertyMeta("suggested_security_key", "suggestedSecurityKey", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, 1, 33, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"suggested_security_key": MoPropertyMeta("suggested_security_key", "suggestedSecurityKey", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 1, 33, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"suggestedSecurityKey": "suggested_security_key",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"suggestedSecurityKey": "suggested_security_key",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
self.suggested_security_key = None
ManagedObject.__init__(self, "SuggestedStorageControllerSecurityKey", parent_mo_or_dn, **kwargs)
|
python
|
from django.shortcuts import render, render_to_response, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.core.context_processors import csrf
from .forms import UserForm, DocForm, DocumentSearchForm
from .models import Document, Course
from haystack.query import SearchQuerySet
def user_login(request):
context = RequestContext(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect('/main/')
else:
return HttpResponse("Your account is disabled.")
else:
print "Invalid login details: {0}, {1}".format(username,password)
return HttpResponse("Invalid login details supplied.")
else:
return render_to_response('login.html',{}, context)
def register(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
new_user = User.objects.create_user(**form.cleaned_data)
return HttpResponseRedirect('/main/thank_you/')
else:
form = UserForm()
return render(request, 'register.html', {'form': form})
def thank_you(request):
return render(request, 'thank_you.html')
def main(request):
if request.user.is_authenticated():
return render(request,'logged_in.html')
return render(request, 'main.html')
def logout_view(request):
logout(request)
return render(request, 'logged_out.html')
def course_form(request):
if request.method == 'POST':
document_form = DocumentForm(request.POST, request.FILES)
if document_form.is_valid():
newdoc = Document(file = request.FILES['file'],
document_subject = request.POST['document_subject'],
)
newdoc.save()
return HttpResponseRedirect('/main/thank_you/')
else:
document_form = DocumentForm()
return render(request,'course.html', {'document_form':document_form})
def doc_form(request):
if request.method == 'POST':
doc_form = DocForm(request.POST,request.FILES)
if doc_form.is_valid():
newdoc = Document.objects.create(**doc_form.cleaned_data)
newdoc.save()
return HttpResponseRedirect('/main/thank_you/')
else:
doc_form = DocForm()
return render(request,'doc_form.html',{'doc_form':doc_form})
def search(request):
documents = SearchQuerySet().filter(content_auto=request.POST.get('search_text', ''))
return render_to_response('ajax_search.html', {'documents' : documents})
def document(request, document_id=1):
return render(request, 'document.html',
{'document': Document.objects.get(id=document_id) })
def more_info(request):
return render(request,'more_info.html')
|
python
|
#
## https://leetcode.com/problems/palindrome-number/
#
## -2147483648 <= x <= 2147483647
#
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
if int(str(x)[::-1]) > 2147483647 or int(str(x)[::-1]) != x:
return False
return True
|
python
|
from socket import *
def main():
# Cria host e port number
host = ""
port = 5000
# Cria socket
server = socket(AF_INET, SOCK_DGRAM)
# Indica que o servidor foi iniciado
print("Servidor iniciado")
# Bloco infinito do servidor
while True:
# Recebe a data e o endereço da conexão
data, endereço = server.recvfrom(1024)
# Imprime as informações da conexão
print("Menssagem recebida de", str(endereço))
print("Recebemos do cliente:", str(data))
# Vamos mandar de volta a menssagem em eco
resposta = "Eco=>" + str(data)
server.sendto(data, endereço)
# Fechamos o servidor
server.close()
if __name__ == '__main__':
main()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.