content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import boto3
from aws_cdk import (
core as cdk,
aws_events as events,
aws_events_targets as events_targets,
aws_glue as glue,
aws_iam as iam,
aws_lambda as lmb,
aws_lambda_python as lambda_python,
aws_logs as logs,
aws_s3 as s3,
aws_s3_notifications as s3_notifications,
aws_kinesisfirehose as kinesisfirehose,
custom_resources
)
from os import path
class SecurityHub(cdk.Construct):
"""Security Hub Contruct designed to act like an L2 CDK Construct"""
def __init__(self, scope: cdk.Construct, identifier: str):
super().__init__(scope, identifier)
self.this_dir = path.dirname(__file__)
enable_disable_function = lmb.Function(self, 'EnableSHFunction',
code=lmb.Code.from_asset(path.join(self.this_dir,
'../assets/lambdas/enable_security_hub_resource')),
handler='index.handler',
runtime=lmb.Runtime.PYTHON_3_8)
enable_disable_function.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'securityhub:EnableSecurityHub',
'securityhub:DisableSecurityHub'
],
resources=['*']
))
enable_provider = custom_resources.Provider(self, 'EnableSHProvider',
on_event_handler=enable_disable_function,
log_retention=logs.RetentionDays.ONE_DAY)
cdk.CustomResource(self, 'EnableSH',
service_token=enable_provider.service_token,
removal_policy=cdk.RemovalPolicy.RETAIN)
self.__enabled = True
@property
def is_enabled(self):
return self.__enabled
def stream_raw_findings_to_s3(self,
bucket_name: str,
bucket_arn: str,
bucket_region=None,
raw_prefix='raw/firehose'):
if bucket_region is None:
bucket_region = cdk.Aws.REGION
target_bucket = s3.Bucket.from_bucket_attributes(self, 'TargetBucket',
bucket_name=bucket_name,
bucket_arn=bucket_arn,
region=bucket_region
)
role = iam.Role(self, 'DeliveryRole',
assumed_by=iam.ServicePrincipal('firehose.amazonaws.com'))
target_bucket.grant_read_write(role)
delivery_stream = kinesisfirehose.CfnDeliveryStream(self, 'SHDeliveryStream',
delivery_stream_type='DirectPut',
extended_s3_destination_configuration=kinesisfirehose.CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty(
role_arn=role.role_arn,
bucket_arn=target_bucket.bucket_arn,
buffering_hints=kinesisfirehose.CfnDeliveryStream.BufferingHintsProperty(
interval_in_seconds=900,
size_in_m_bs=128
),
compression_format='UNCOMPRESSED',
prefix=raw_prefix
))
stream_rule = events.Rule(self, 'StreamFromKinesisToS3',
event_pattern=events.EventPattern(
source=['aws.securityhub'],
detail_type=['Security Hub Findings - Imported'],
))
target = events_targets.KinesisFirehoseStream(
stream=delivery_stream,
)
stream_rule.add_target(target)
def enable_import_findings_for_product(self, product_arn):
this_dir = path.dirname(__file__)
enable_disable_function = lmb.Function(self, 'EnableSHImportFunction',
code=lmb.Code.from_asset(path.join(self.this_dir,
'../assets/lambdas/enable_import_prowler_findings')),
handler='index.handler',
runtime=lmb.Runtime.PYTHON_3_8)
enable_provider = custom_resources.Provider(self, 'EnableSHImportProvider',
on_event_handler=enable_disable_function,
log_retention=logs.RetentionDays.ONE_DAY)
cdk.CustomResource(self, 'EnableSHImport',
service_token=enable_provider.service_token,
properties={
'product_arn': product_arn
},
removal_policy=cdk.RemovalPolicy.RETAIN)
|
python
|
import matplotlib.pyplot as plt
from typing import List, Tuple, Union
from mathplotlib.base import BaseElement, Curve2D
from mathplotlib.style import Style
from mathplotlib.utils import update_with_default
class Text(BaseElement):
"""
Draws a bit of text
"""
on_curve_params = dict(
horizontal_alignment="center",
backgroundcolor=None,
outlined=True,
strokecolor="white",
strokewidth=5,
)
def __init__(
self,
x: float,
y: float,
text: str,
size: int = 12,
rotation: int = 0,
horizontal_alignment: str = "center",
vertical_alignment: str = "center",
**kwargs,
):
super().__init__("Text", nolegend=True)
self.x, self.y = x, y
self.size, self.rotation = size, rotation
self.horizontal_alignment = horizontal_alignment
self.vertical_alignment = vertical_alignment
self.text = text
self.style = Style(**kwargs)
def __repr__(self) -> str:
return f"Text @ ({self.x:.2f}, {self.y:.2f})"
def draw(self, ax: plt.Axes):
text_actor = ax.text(
self.x,
self.y,
self.text,
size=self.size,
rotation=self.rotation,
ha=self.horizontal_alignment,
va=self.vertical_alignment,
color=self.style.textcolor,
alpha=self.style.alpha,
bbox=dict(
pad=2,
color=self.style.backgroundcolor,
joinstyle="round",
alpha=0.95,
),
fontweight=self.style.fontweight,
zorder=self.style.zorder,
)
if self.style.backgroundcolor is None:
text_actor.set_bbox(dict(alpha=0))
if self.style.outlined:
self.outline(text_actor, lw=2)
@classmethod
def on_curve(cls, curve: Curve2D, text: str, at: float = 1, **kwargs):
"""
Adds a text on a curve at a given X position along it
"""
try:
y_func = curve.y_func # type: ignore
except AttributeError:
raise AttributeError(
f'The curve object {curve} has no "y_func" method, cant reconstruct text position'
)
# compute the angle of the curve at the point
curve_angle = curve.angle_at_point(at)
rotation = kwargs.pop("rotation", curve_angle)
# get the color based on the curve
color = kwargs.pop("textcolor", curve.style.linecolor)
kwargs = update_with_default(kwargs, Text.on_curve_params)
return Text(
at, y_func(at), text, rotation=rotation, textcolor=color, **kwargs
)
class Annotation(BaseElement):
_default_arrow_params = dict(
arrowstyle="-|>",
connectionstyle="arc3,rad=-0.25",
shrinkA=4,
shrinkB=4,
lw=2,
fc="w",
mutation_scale=20,
)
def __init__(
self,
x: float,
y: float,
text: str,
x_shift: float = 1,
y_shift: float = 1,
size: Union[int, str] = "medium",
textcoords: str = "data",
arrow_params: dict = None,
additional_points: List[Tuple[float, float]] = None,
**kwargs,
):
super().__init__("Annotation", nolegend=True)
self.style = Style(**kwargs)
# get/set arrow paramters
if arrow_params is None:
arrow_params = self._default_arrow_params.copy()
arrow_params = update_with_default(
arrow_params, self._default_arrow_params
)
arrow_params["color"] = kwargs.pop("textcolor", self.style.textcolor)
self.x, self.y = x, y
self.x_shift, self.y_shift = x_shift, y_shift
self.size = size
self.textcoords = textcoords
self.arrow_params = arrow_params
self.text = text
self.arrow_params["color"] = self.arrow_params.pop(
"color", self.style.textcolor
)
self.additional_points = additional_points
def draw(self, ax: plt.Axes):
# draw arrow + add text
actors = [
ax.annotate(
self.text,
(self.x, self.y),
size=self.size,
color=self.style.textcolor,
xytext=(self.x + self.x_shift, self.y + self.y_shift),
textcoords=self.textcoords,
arrowprops=self.arrow_params,
zorder=self.style.zorder,
)
]
# add additional arrows
if self.additional_points is not None:
for xy in self.additional_points:
actors.append(
ax.annotate(
self.text,
xy,
size=self.size,
color=self.style.textcolor,
xytext=(self.x + self.x_shift, self.y + self.y_shift),
textcoords=self.textcoords,
arrowprops=self.arrow_params,
fontweight=0, # make the text invisible
)
)
if self.style.outlined:
for actor in actors:
self.outline(actor, lw=2)
@classmethod
def at_curve(cls, curve: BaseElement, text: str, at: float = 1, **kwargs):
"""
Draws an annotation pointing at a point along a curve
"""
try:
y_func = curve.y_func # type: ignore
except AttributeError:
raise AttributeError(
f'The curve object {curve} has no "y_func" method, cant reconstruct text position'
)
# get color
color = kwargs.pop("textcolor", curve.style.linecolor)
return Annotation(at, y_func(at), text, textcolor=color, **kwargs)
|
python
|
import argparse
import math
import PIL.Image
import PIL.ImageDraw
import sys
def choose_guideline_style(guideline_mod):
if guideline_mod % 16 == 0:
return ('#1f32ff', 3)
if guideline_mod % 8 == 0:
return ('#80f783', 2)
if guideline_mod % 4 == 0:
return ('#f4bffb', 1)
def in_ellipsoid(x, y, z, rad_x, rad_y, rad_z, center_x=None, center_y=None, center_z=None):
'''
Given a point (x, y, z), return whether that point lies inside the
ellipsoid defined by (x/a)^2 + (y/b)^2 + (z/c)^2 = 1
'''
if center_x is None: center_x = rad_x
if center_y is None: center_y = rad_y
if center_z is None: center_z = rad_z
#print(x, y, z, rad_x, rad_y, rad_z, center_x, center_y, center_z)
x = ((x - center_x) / rad_x) ** 2
y = ((y - center_y) / rad_y) ** 2
z = ((z - center_z) / rad_z) ** 2
distance = x + y + z
#print(distance)
return distance < 1
def voxelspheregenerator(WIDTH, HEIGH, DEPTH, WALL_THICKNESS=None, specific=None):
ODD_W = WIDTH % 2 == 1
ODD_H = HEIGH % 2 == 1
ODD_D = DEPTH % 2 == 1
RAD_X = WIDTH / 2
RAD_Y = HEIGH / 2
RAD_Z = DEPTH / 2
if WALL_THICKNESS:
INNER_RAD_X = RAD_X - WALL_THICKNESS
INNER_RAD_Y = RAD_Y - WALL_THICKNESS
INNER_RAD_Z = RAD_Z - WALL_THICKNESS
X_CENTER = {WIDTH // 2} if ODD_W else {WIDTH // 2, (WIDTH // 2) - 1}
Y_CENTER = {HEIGH // 2} if ODD_H else {HEIGH // 2, (HEIGH // 2) - 1}
Z_CENTER = {DEPTH // 2} if ODD_D else {DEPTH // 2, (DEPTH // 2) - 1}
layer_digits = len(str(DEPTH))
filename_form = '{w}x{h}x{d}w{wall}-{{layer:0{digits}}}.png'
filename_form = filename_form.format(
w=WIDTH,
h=HEIGH,
d=DEPTH,
wall=WALL_THICKNESS if WALL_THICKNESS else 0,
digits=layer_digits,
)
dot_highlight = PIL.Image.open('dot_highlight.png')
dot_normal = PIL.Image.open('dot_normal.png')
dot_corner = PIL.Image.open('dot_corner.png')
pixel_scale = dot_highlight.size[0]
# Space between each pixel
PIXEL_MARGIN = 7
# Space between the pixel area and the canvas
PIXELSPACE_MARGIN = 2
# Space between the canvas area and the image edge
CANVAS_MARGIN = 2
LABEL_HEIGH = 20
FINAL_IMAGE_SCALE = 1
PIXELSPACE_WIDTH = (WIDTH * pixel_scale) + ((WIDTH - 1) * PIXEL_MARGIN)
PIXELSPACE_HEIGH = (HEIGH * pixel_scale) + ((HEIGH - 1) * PIXEL_MARGIN)
CANVAS_WIDTH = PIXELSPACE_WIDTH + (2 * PIXELSPACE_MARGIN * pixel_scale)
CANVAS_HEIGH = PIXELSPACE_HEIGH + (2 * PIXELSPACE_MARGIN * pixel_scale)
IMAGE_WIDTH = CANVAS_WIDTH + (2 * CANVAS_MARGIN * pixel_scale)
IMAGE_HEIGH = CANVAS_HEIGH + (2 * CANVAS_MARGIN * pixel_scale) + LABEL_HEIGH
CANVAS_START_X = CANVAS_MARGIN * pixel_scale
CANVAS_START_Y = CANVAS_MARGIN * pixel_scale
CANVAS_END_X = CANVAS_START_X + CANVAS_WIDTH
CANVAS_END_Y = CANVAS_START_Y + CANVAS_HEIGH
PIXELSPACE_START_X = CANVAS_START_X + (PIXELSPACE_MARGIN * pixel_scale)
PIXELSPACE_START_Y = CANVAS_START_Y + (PIXELSPACE_MARGIN * pixel_scale)
PIXELSPACE_END_X = PIXELSPACE_START_X + PIXELSPACE_WIDTH
PIXELSPACE_END_Y = PIXELSPACE_START_Y + PIXELSPACE_HEIGH
GUIDELINE_MOD_X = math.ceil(RAD_X)
GUIDELINE_MOD_Y = math.ceil(RAD_Y)
def pixel_coord(x, y):
x = PIXELSPACE_START_X + (x * pixel_scale) + (x * PIXEL_MARGIN)
y = PIXELSPACE_START_Y + (y * pixel_scale) + (y * PIXEL_MARGIN)
return (x, y)
def make_layer_matrix(z):
layer_matrix = [[None for y in range(math.ceil(RAD_Y))] for x in range(math.ceil(RAD_X))]
# Generate the upper left corner.
furthest_x = RAD_X
furthest_y = RAD_Y
for y in range(math.ceil(RAD_Y)):
for x in range(math.ceil(RAD_X)):
ux = x + 0.5
uy = y + 0.5
uz = z + 0.5
within = in_ellipsoid(ux, uy, uz, RAD_X, RAD_Y, RAD_Z)
if WALL_THICKNESS:
in_hole = in_ellipsoid(
ux, uy, uz,
INNER_RAD_X, INNER_RAD_Y, INNER_RAD_Z,
RAD_X, RAD_Y, RAD_Z
)
within = within and not in_hole
if within:
if x in X_CENTER or y in Y_CENTER:
if z in Z_CENTER:
dot = dot_normal
else:
dot = dot_highlight
else:
if z in Z_CENTER:
dot = dot_highlight
else:
dot = dot_normal
layer_matrix[x][y] = dot
furthest_x = min(x, furthest_x)
furthest_y = min(y, furthest_y)
#layer_image.paste(dot, box=(pixel_coord_x, pixel_coord_y))
# Mark the corner pieces
furthest_y = math.floor(furthest_y)
for y in range(furthest_y, math.ceil(RAD_Y-1)):
for x in range(furthest_x, math.ceil(RAD_X-1)):
is_corner = (
layer_matrix[x][y] is not None and
layer_matrix[x-1][y+1] is not None and
layer_matrix[x+1][y-1] is not None and
(
# Outer corners
(layer_matrix[x][y-1] is None and layer_matrix[x-1][y] is None) or
# Inner corners, if hollow
(layer_matrix[x][y+1] is None and layer_matrix[x+1][y] is None)
)
)
if is_corner:
layer_matrix[x][y] = dot_corner
return layer_matrix
def make_layer_image(layer_matrix):
layer_image = PIL.Image.new('RGBA', size=(IMAGE_WIDTH, IMAGE_HEIGH), color=(0, 0, 0, 0))
draw = PIL.ImageDraw.ImageDraw(layer_image)
# Plot.
LABEL_Y = (2 * math.ceil(RAD_Y))
for y in range(math.ceil(RAD_Y)):
bottom_y = (HEIGH - 1) - y
for x in range(math.ceil(RAD_X)):
right_x = (WIDTH - 1) - x
if layer_matrix[x][y] is not None:
layer_image.paste(layer_matrix[x][y], box=pixel_coord(x, y))
layer_image.paste(layer_matrix[x][y], box=pixel_coord(right_x, y))
layer_image.paste(layer_matrix[x][y], box=pixel_coord(x, bottom_y))
layer_image.paste(layer_matrix[x][y], box=pixel_coord(right_x, bottom_y))
# Draw the counting helpers along the bottom.
# Start at the center top of the circle and walk along the edge.
# Every time the walker 'falls' down, mark the distance.
def put_counterhelper(start_x, end_x, y):
if start_x > end_x:
return
y = (HEIGH + 1) - y
span = end_x - start_x
center = start_x + 1
draw.text(pixel_coord(center, y), str(span), fill='#000')
y = 0
x = math.floor(RAD_X) - 1
end_x = x
start_y = None
while x >= y and y < RAD_Y:
#print(x, y, start_y)
pixel = layer_matrix[x][y]
if pixel is None:
y += 1
if x != end_x:
put_counterhelper(x, end_x, y)
if start_y is None:
start_y = y
else:
put_counterhelper(x, end_x, start_y)
end_x = x
continue
x -= 1
y += 1
put_counterhelper(x, end_x, y)
# To draw the guidelines, start from
for x in range(GUIDELINE_MOD_X % 4, WIDTH + 4, 4):
# Vertical guideline
as_if = GUIDELINE_MOD_X - x
#print(x, as_if)
line_x = PIXELSPACE_START_X + (x * pixel_scale) + (x * PIXEL_MARGIN)
line_x = line_x - PIXEL_MARGIN + (PIXEL_MARGIN // 2)
if line_x >= PIXELSPACE_END_X:
continue
(color, width) = choose_guideline_style(as_if)
draw.line((line_x, CANVAS_START_Y, line_x, CANVAS_END_Y - 1), fill=color, width=width)
draw.text((line_x, CANVAS_END_X), str(x), fill='#000')
for y in range(GUIDELINE_MOD_Y % 4, HEIGH + 4, 4):
# Horizontal guideline
as_if = GUIDELINE_MOD_Y - y
#print(y, as_if)
line_y = PIXELSPACE_START_Y + (y * pixel_scale) + (y * PIXEL_MARGIN)
line_y = line_y - PIXEL_MARGIN + (PIXEL_MARGIN // 2)
if line_y >= PIXELSPACE_END_Y:
continue
(color, width) = choose_guideline_style(as_if)
draw.line((CANVAS_START_X, line_y, CANVAS_END_X - 1, line_y), fill=color, width=width)
draw.text((CANVAS_END_X, line_y), str(y), fill='#000')
draw.rectangle((CANVAS_START_X, CANVAS_START_Y, CANVAS_END_X - 1, CANVAS_END_Y - 1), outline='#000')
draw.text((CANVAS_START_X, IMAGE_HEIGH - LABEL_HEIGH), layer_filename, fill='#000')
print(layer_filename)
if FINAL_IMAGE_SCALE != 1:
layer_image = layer_image.resize((FINAL_IMAGE_SCALE * IMAGE_WIDTH, FINAL_IMAGE_SCALE * IMAGE_HEIGH))
return layer_image
if specific is None:
zrange = range(DEPTH)
elif isinstance(specific, int):
zrange = [specific]
else:
zrange = specific
layer_matrices = []
for z in zrange:
if z < math.ceil(RAD_Z):
layer_matrix = make_layer_matrix(z)
layer_matrices.append(layer_matrix)
else:
layer_matrix = layer_matrices[(DEPTH - 1) - z]
layer_filename = filename_form.format(layer=z)
layer_image = make_layer_image(layer_matrix)
layer_image.save(layer_filename)
def voxelsphere_argparse(args):
height_depth_match = bool(args.height) == bool(args.depth)
if not height_depth_match:
raise ValueError('Must provide both or neither of height+depth. Not just one.')
if (args.height is args.depth is None):
args.height = args.width
args.depth = args.width
wall_thickness = int(args.wall_thickness) if args.wall_thickness else None
specific = int(args.specific) if args.specific else None
voxelspheregenerator(
WIDTH=int(args.width),
HEIGH=int(args.height),
DEPTH=int(args.depth),
WALL_THICKNESS=wall_thickness,
specific=specific,
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('width')
parser.add_argument('height', nargs='?', default=None)
parser.add_argument('depth', nargs='?', default=None)
parser.add_argument('--wall', dest='wall_thickness', default=None)
parser.add_argument('--specific', dest='specific', default=None)
parser.set_defaults(func=voxelsphere_argparse)
args = parser.parse_args(argv)
return args.func(args)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
python
|
class GQL:
# Client -> Server message types.
CONNECTION_INIT = "connection_init"
START = "start"
STOP = "stop"
CONNECTION_TERMINATE = "connection_terminate"
# Server -> Client message types.
CONNECTION_ERROR = "connection_error"
CONNECTION_ACK = "connection_ack"
DATA = "data"
ERROR = "error"
COMPLETE = "complete"
CONNECTION_KEEP_ALIVE = "ka"
|
python
|
# -*- coding:utf-8 -*-
import time
from tqdm import tqdm
import requests
from lxml import etree
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gb2312,utf-8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Connection': 'Keep-alive'
}
def run(url):
try:
res_text = requests.get(url=url, headers=headers)
res = etree.HTML(res_text.text)
# 提取文章页的链接并爬取
article_urls = res.xpath('//div[@class="article-list"]/div/h4/a/@href')
for article_url in article_urls:
article_text = requests.get(url=article_url, headers=headers)
article_result = etree.HTML(article_text.text)
title = article_result.xpath('//h1[@class="title-article"]/text()')[0]
publish_time = article_result.xpath('//div[@class="bar-content"]/span[@class="time"]/text()')[0]
print(publish_time, title)
except:
pass
if __name__ == '__main__':
start = time.time()
for i in range(1, 10): # 建立任务链接
url = 'https://blog.csdn.net/cui_yonghua/article/list/{}'.format(i)
run(url=url)
print('time cost:{}'.format(time.time()-start))
|
python
|
# coding: utf-8
"""
Eclipse Kapua REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DataMetricsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def data_metric_count(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the MetricInfos # noqa: E501
Counts the MetricInfos with the given MetricInfoQuery parameter returning the number of matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_count(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.data_metric_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def data_metric_count_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the MetricInfos # noqa: E501
Counts the MetricInfos with the given MetricInfoQuery parameter returning the number of matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_count_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_count`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `data_metric_count`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def data_metric_find(self, scope_id, metric_info_id, **kwargs): # noqa: E501
"""Gets an MetricInfo # noqa: E501
Gets the MetricInfo specified by the metricInfoId path parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_find(scope_id, metric_info_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested MetricInfo. (required)
:param str metric_info_id: The id of the requested MetricInfo (required)
:return: MetricInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_find_with_http_info(scope_id, metric_info_id, **kwargs) # noqa: E501
else:
(data) = self.data_metric_find_with_http_info(scope_id, metric_info_id, **kwargs) # noqa: E501
return data
def data_metric_find_with_http_info(self, scope_id, metric_info_id, **kwargs): # noqa: E501
"""Gets an MetricInfo # noqa: E501
Gets the MetricInfo specified by the metricInfoId path parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_find_with_http_info(scope_id, metric_info_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested MetricInfo. (required)
:param str metric_info_id: The id of the requested MetricInfo (required)
:return: MetricInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'metric_info_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_find" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_find`") # noqa: E501
# verify the required parameter 'metric_info_id' is set
if ('metric_info_id' not in params or
params['metric_info_id'] is None):
raise ValueError("Missing the required parameter `metric_info_id` when calling `data_metric_find`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'metric_info_id' in params:
path_params['metricInfoId'] = params['metric_info_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics/{metricInfoId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def data_metric_query(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the MetricInfos # noqa: E501
Queries the MetricInfos with the given MetricInfoQuery parameter returning all matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_query(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter results (required)
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.data_metric_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def data_metric_query_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the MetricInfos # noqa: E501
Queries the MetricInfos with the given MetricInfoQuery parameter returning all matching MetricInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_query_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param MetricInfoQuery body: The MetricInfoQuery to use to filter results (required)
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_query`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `data_metric_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricInfoListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def data_metric_simple_query(self, scope_id, **kwargs): # noqa: E501
"""Gets the MetricInfo list in the scope # noqa: E501
Returns the list of all the metricInfos associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_simple_query(scope_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param str client_id: The client id to filter results
:param str channel: The channel to filter results. It allows '#' wildcard in last channel level
:param str name: The metric name to filter results
:param int offset: The result set offset
:param int limit: The result set limit
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.data_metric_simple_query_with_http_info(scope_id, **kwargs) # noqa: E501
else:
(data) = self.data_metric_simple_query_with_http_info(scope_id, **kwargs) # noqa: E501
return data
def data_metric_simple_query_with_http_info(self, scope_id, **kwargs): # noqa: E501
"""Gets the MetricInfo list in the scope # noqa: E501
Returns the list of all the metricInfos associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.data_metric_simple_query_with_http_info(scope_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results (required)
:param str client_id: The client id to filter results
:param str channel: The channel to filter results. It allows '#' wildcard in last channel level
:param str name: The metric name to filter results
:param int offset: The result set offset
:param int limit: The result set limit
:return: MetricInfoListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'client_id', 'channel', 'name', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method data_metric_simple_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `data_metric_simple_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
if 'client_id' in params:
query_params.append(('clientId', params['client_id'])) # noqa: E501
if 'channel' in params:
query_params.append(('channel', params['channel'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/data/metrics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MetricInfoListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
python
|
import datetime
import sys
import pathlib
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../' )
from lib.config import get_camera_config
from service.camera import take_pictures
"""
cronによって毎分実行される
その分にタイマーが設定されているカメラの撮影リクエストを送信する
python3 cameras.py
"""
def main():
now = datetime.datetime.now()
hour, minute = now.hour, now.minute
cameras = get_camera_config()
target_camera_ids = []
# 全てのカメラをチェック
for camera in cameras:
# timerがなかったら次へ
if 'timer' not in camera:
continue
timers = camera['timer']
# 対象のカメラが今写真を取るべきかチェック
for timer in timers:
if timer['hour'] == hour and timer['minute'] == minute:
target_camera_ids.append(camera['camera_id'])
continue
take_pictures(target_camera_ids)
if __name__ == '__main__':
main()
|
python
|
"""Dataset specification for hit graphs using pytorch_geometric formulation"""
# System imports
import os
# External imports
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, random_split
import torch_geometric
def load_graph(filename):
with np.load(filename) as f:
x, y = f['X'], f['y']
Ri_rows, Ri_cols = f['Ri_rows'], f['Ri_cols']
Ro_rows, Ro_cols = f['Ro_rows'], f['Ro_cols']
n_edges = Ri_cols.shape[0]
edge_index = np.zeros((2, n_edges), dtype=int)
edge_index[0, Ro_cols] = Ro_rows
edge_index[1, Ri_cols] = Ri_rows
return x, edge_index, y
def load_sparse(filename):
with np.load(filename, allow_pickle=True) as f:
x, edge_index, y = f['X'], f['e'], f['y']
return x, edge_index, y
class HitGraphDataset(Dataset):
"""PyTorch dataset specification for hit graphs"""
def __init__(self, input_dir=None, filelist=None, n_samples=None, real_weight=1.0):
if filelist is not None:
self.metadata = pd.read_csv(os.path.expandvars(filelist))
filenames = self.metadata.file.values
elif input_dir is not None:
input_dir = os.path.expandvars(input_dir)
filenames = sorted([os.path.join(input_dir, f) for f in os.listdir(input_dir)
if f.startswith('event') and not f.endswith('_ID.npz')])
else:
raise Exception('Must provide either input_dir or filelist to HitGraphDataset')
self.filenames = filenames if n_samples is None else filenames[:n_samples]
self.real_weight = real_weight
self.fake_weight = 1 #real_weight / (2 * real_weight - 1)
def __getitem__(self, index):
""" We choose to load an already sparsified graph """
# x, edge_index, y = load_graph(self.filenames[index])
# print(self.filenames[index])
x, edge_index, y = load_sparse(self.filenames[index])
# Compute weights
w = y * self.real_weight + (1-y) * self.fake_weight
return torch_geometric.data.Data(x=torch.from_numpy(x),
edge_index=torch.from_numpy(edge_index),
y=torch.from_numpy(y), w=torch.from_numpy(w),
i=index)
def get_filelist(self):
return self.filenames
def __len__(self):
return len(self.filenames)
def get_datasets(n_train, n_valid, input_dir=None, filelist=None, real_weight=1.0):
data = HitGraphDataset(input_dir=input_dir, filelist=filelist,
n_samples=n_train+n_valid, real_weight=real_weight)
# Split into train and validation
train_data, valid_data = random_split(data, [n_train, n_valid])
return train_data, valid_data
|
python
|
"""
author: Rene Pickhardt ([email protected])
Date: 15.1.2020
License: MIT
Checks which nodes are currently online by establishing a connection to those nodes. Results can later be studied with `lightning-cli listpeers` or when `jq` is installed with `lcli getinfo | jq ".num_peers"`
This tool is intended to be used to make routing decisions in which we we chose a path to the destination in which all hops are online. This should reduce the failed routing attempts and the latency.
=== Support:
If you like my work consider a donation at https://patreon.com/renepickhardt or https://tallyco.in/s/lnbook
"""
from lightning import LightningRpc, RpcError
from multiprocessing import Process
from time import sleep
rpc = LightningRpc("/home/rpickhardt/.lightning/bitcoin/lightning-rpc")
def connect(nodeid):
try:
res = rpc.connect(nodeid)
print(nodeid, res)
except RpcError as e:
print("could not connect to", nodeid, str(e))
nodes = rpc.listnodes()["nodes"]
potential_nodes = []
for node in nodes:
if "nodeid" in node:
nodeid = node["nodeid"]
if "addresses" not in node:
continue
addresses = node["addresses"]
for addr in addresses:
if addr["type"] == "ipv4":
potential_nodes.append(nodeid)
print("known nodes:", len(nodes), "nodes with ipv4 addr:", len(potential_nodes))
for r, nodeid in enumerate(potential_nodes):
p = Process(target=connect, args=(nodeid,))
print("go", nodeid)
p.start()
sleep(0.2)
print(r)
|
python
|
"""Helper methods."""
from typing import Tuple, Optional
from rest_framework import status
from rest_framework.response import Response
def validate_request_body(
request,
) -> Tuple[Optional[Response], Optional[dict]]:
"""
Validate the json body of a request.
:param request: django request
:return: None or error response, None or json dict
"""
try:
return None, request.data
except Exception:
return (
Response(
data={"message": "Sent data is not valid"},
status=status.HTTP_400_BAD_REQUEST,
),
None,
)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 20:07:19 2020
https://zhuanlan.zhihu.com/p/78452993
@author: lenovo
"""
import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_geometric.nn import MessagePassing
from torch_geometric.datasets import TUDataset
# dataset = TUDataset(root='/tmp/ENZYMES', name='ENZYMES')
class EdgeConv(MessagePassing):
def __init__(self, F_in, F_out):
super(EdgeConv, self).__init__(aggr='max') # "Max" aggregation.
self.mlp = Seq(Lin(2 * F_in, F_out), ReLU(), Lin(F_out, F_out))
def forward(self, x, edge_index):
# x has shape [N, F_in]
# edge_index has shape [2, E]
return self.propagate(edge_index, x=x) # shape [N, F_out]
def message(self, x_i, x_j):
# x_i has shape [E, F_in]
# x_j has shape [E, F_in]
edge_features = torch.cat([x_i, x_j - x_i], dim=1) # shape [E, 2 * F_in]
return self.mlp(edge_features) # shape [E, F_out]
|
python
|
class Web3ClientException(BaseException):
pass
class MissingParameter(Web3ClientException):
pass
class TransactionTooExpensive(Web3ClientException):
pass
class NetworkNotFound(Web3ClientException):
pass
|
python
|
import gdo
def f():
import time
time.sleep(10)
gdo.concurrent(
gdo.RunGraph(
"slee", "sleep 2",
"slle2", "sleep 5",
"pysleep", f,
"true", "true")
.req("slee", "true")
)
|
python
|
#!/usr/bin/python
#PIN 0-8 3v3 pull-up default, 9-27 pull-down default
# Pin # for relay connected to heating element (Note: GPIO pin#)
he_pin = 26
brew_pin = 22
steam_pin = 27
led_pin = 13
# Default goal temperature
set_temp = 96.
set_steam_temp = 145.
#Use Fahrenheit?
use_fahrenheit = False
# Default alarm time
snooze = '07:00'
# Pressure gauge
pressure_enable = True
#circuit breaker time in minutes convert to seconds
circuitBreakerTime = 20 * 60
#temp lowpoint and high point (Celsius)
low_temp_b = 0
high_temp_b = 110
low_temp_s = 130
high_temp_s = 160
# Main loop sample rate in seconds
sample_time = 0.1
# PID Proportional, Integral, and Derivative value
P = 10
I = 1.5
D = 20.0
#Web/REST Server Options
host = '0.0.0.0'
port = 8080
|
python
|
# -*- coding: utf-8 -*-
import os
from codecs import open
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
# load the package's __version__.py module as a dictionary
about = {}
with open(os.path.join(here, "profiler", "__version__.py"), "r", "utf-8") as f:
exec(f.read(), about)
try:
with open("README.md", "r") as f:
readme = f.read()
except FileNotFoundError:
readme = about["__description__"]
packages = ["profiler"]
requires = ["tox==3.24.4", "coverage-badge==1.1.0", "scapy==2.4.5", "manuf==1.1.3"]
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme,
long_description_content_type="text/markdown",
author=about["__author__"],
author_email=about["__author_email__"],
url=about["__url__"],
python_requires="~=3.7,",
license=about["__license__"],
classifiers=[
"Natural Language :: English",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.7",
"Intended Audience :: System Administrators",
"Topic :: Utilities",
],
packages=packages,
project_urls={
"Documentation": "https://docs.wlanpi.com",
"Source": "https://github.com/wlan-pi/profiler",
},
include_package_data=True,
install_requires=requires,
entry_points={"console_scripts": ["profiler=profiler.__main__:main"]},
)
|
python
|
"""recode entities
Revision ID: 4212acfa7aec
Revises: 235fd19bb942
Create Date: 2016-12-01 10:24:07.638773
"""
import logging
# from pprint import pprint
from alembic import op
import sqlalchemy as sa
import uuid
log = logging.getLogger('migrate')
revision = '4212acfa7aec'
down_revision = '235fd19bb942'
SCHEMA = {
'/entity/person.json#': 'Person',
'/entity/organization.json#': 'Organization',
'/entity/entity.json#': 'LegalEntity',
'/entity/company.json#': 'Company'
}
def upgrade():
op.alter_column('document', 'collection_id', existing_type=sa.INTEGER(), nullable=False) # noqa
op.add_column('entity', sa.Column('collection_id', sa.Integer, nullable=True)) # noqa
op.create_index(op.f('ix_entity_collection_id'), 'entity', ['collection_id'], unique=False) # noqa
op.create_foreign_key(None, 'entity', 'collection', ['collection_id'], ['id']) # noqa
op.create_table('entity_identity',
sa.Column('id', sa.Integer, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('entity_id', sa.Unicode(255), nullable=True),
sa.Column('identity', sa.Unicode(255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
bind = op.get_bind()
meta = sa.MetaData()
meta.bind = bind
meta.reflect()
entity_table = meta.tables['entity']
entity_identity_table = meta.tables['entity_identity']
document_table = meta.tables['document']
collection_entity_table = meta.tables['collection_entity']
reference_table = meta.tables['reference']
permission_table = meta.tables['permission']
alert_table = meta.tables['alert']
q = sa.select([entity_table])
rp = bind.execute(q)
entities_all = rp.fetchall()
for i, entity in enumerate(entities_all):
log.info("Process [%s: %s]: %s", i, entity.id, entity.name)
if entity.deleted_at is not None:
cq = sa.delete(alert_table)
cq = cq.where(alert_table.c.entity_id == entity.id)
bind.execute(cq)
cq = sa.delete(collection_entity_table)
cq = cq.where(collection_entity_table.c.entity_id == entity.id)
bind.execute(cq)
cq = sa.delete(reference_table)
cq = cq.where(reference_table.c.entity_id == entity.id)
bind.execute(cq)
cq = sa.delete(entity_table)
cq = cq.where(entity_table.c.id == entity.id)
bind.execute(cq)
continue
data = entity['data']
data.pop('identifiers', None)
data['country'] = data.pop('jurisdiction_code', None)
data['birthDate'] = data.pop('birth_date', None)
data['deathDate'] = data.pop('death_date', None)
data['alias'] = []
for on in data.pop('other_names', []):
name = on.get('name')
if name is None:
continue
data['alias'].append(name)
for k, v in data.items():
if v is None or v == '':
data.pop(k)
schema = SCHEMA.get(entity.type)
cq = sa.select([alert_table])
cq = cq.where(alert_table.c.entity_id == entity.id)
alerts = bind.execute(cq).fetchall()
cq = sa.select([reference_table, document_table.c.collection_id])
cq = cq.select_from(reference_table.join(document_table, reference_table.c.document_id == document_table.c.id)) # noqa
cq = cq.where(reference_table.c.entity_id == entity.id)
references = bind.execute(cq).fetchall()
cq = sa.select([collection_entity_table])
cq = cq.where(collection_entity_table.c.entity_id == entity.id)
colls = bind.execute(cq).fetchall()
identity = uuid.uuid4().hex
for i, coll in enumerate(colls):
coll_id = coll.collection_id
eid = entity.id
if i == 0:
q = sa.update(entity_table)
q = q.where(entity_table.c.id == entity.id)
q = q.values(type=schema, data=data, collection_id=coll_id)
bind.execute(q)
else:
eid = uuid.uuid4().hex
ent = {
'id': eid,
'name': entity.name,
'state': entity.state,
'type': schema,
'data': data,
'collection_id': coll_id,
'created_at': entity.created_at,
'updated_at': entity.updated_at
}
q = sa.insert(entity_table).values(ent)
bind.execute(q)
if len(colls) > 1:
q = sa.insert(entity_identity_table).values({
'created_at': entity.updated_at,
'updated_at': entity.updated_at,
'entity_id': eid,
'identity': identity
})
bind.execute(q)
for alert in alerts:
cq = sa.select([permission_table])
cq = cq.where(permission_table.c.collection_id == coll_id)
cq = cq.where(permission_table.c.role_id == alert.role_id)
cq = cq.where(permission_table.c.read == True) # noqa
perm = bind.execute(cq).fetchone()
if perm is None and eid == entity.id:
q = sa.delete(alert_table)
q = q.where(alert_table.c.id == alert.id)
bind.execute(q)
if perm is not None and eid != entity.id:
ad = dict(alert)
ad.pop('id', None)
ad['entity_id'] = eid
q = sa.insert(alert_table).values(ad)
bind.execute(q)
for ref in references:
refdata = dict(ref)
collection_id = refdata.pop('collection_id')
if entity.state == 'pending' and coll_id == collection_id:
q = sa.update(reference_table)
q = q.where(reference_table.c.id == ref.id)
q = q.values(entity_id=eid)
bind.execute(q)
if entity.state == 'active' and eid != ref.entity_id:
refdata.pop('id', None)
refdata['entity_id'] = eid
q = sa.insert(reference_table).values(refdata)
bind.execute(q)
op.drop_table('collection_document')
op.drop_table('collection_entity')
# op.alter_column('entity', 'collection_id', nullable=False) # noqa
def downgrade():
pass
|
python
|
import os
# Reserves disk space by saving binary zeros to a file a given size
class DiskSpaceReserver:
def __init__(self, path: str, size: int):
self.path = path
self.size = size
def reserve(self):
with open(self.path, 'wb') as f:
f.write(b'\0' * self.size)
def release(self):
try:
os.remove(self.path)
except OSError:
pass
|
python
|
import uuid
from core import db, logging, plugin, model
from core.models import conduct, trigger, webui
from plugins.occurrence.models import action
class _occurrence(plugin._plugin):
version = 5.0
def install(self):
# Register models
model.registerModel("occurrence","_occurrence","_action","plugins.occurrence.models.action")
model.registerModel("occurrence clean","_occurrenceClean","_action","plugins.occurrence.models.action")
model.registerModel("occurrenceUpdate","_occurrenceUpdate","_action","plugins.occurrence.models.action")
# Finding conduct
foundConducts = conduct._conduct().query(query={"name" : "occurrenceCore" })["results"]
if len(foundConducts) == 0:
# Install
c = conduct._conduct().new("occurrenceCore")
c = conduct._conduct().get(c.inserted_id)
elif len(foundConducts) == 1:
# Reinstall
c = conduct._conduct().get(foundConducts[0]["_id"])
else:
# Count invalid
return False
# Finding trigger
foundTriggers = trigger._trigger(False).query(query={"name" : "occurrenceCore" })["results"]
if len(foundTriggers) == 0:
# Install
t = trigger._trigger().new("occurrenceCore")
t = trigger._trigger().get(t.inserted_id)
elif len(foundTriggers) == 1:
# Reinstall
t = trigger._trigger().get(foundTriggers[0]["_id"])
else:
# Count invalid
return False
# Finding action
foundActions = action._occurrenceClean().query(query={"name" : "occurrenceCore" })["results"]
if len(foundActions) == 0:
# Install
a = action._occurrenceClean().new("occurrenceCore")
a = action._occurrenceClean().get(a.inserted_id)
elif len(foundActions) == 1:
# Reinstall
a = action._occurrenceClean().get(foundActions[0]["_id"])
else:
# Count invalid
return False
c.triggers = [t._id]
flowTriggerID = str(uuid.uuid4())
flowActionID = str(uuid.uuid4())
c.flow = [
{
"flowID" : flowTriggerID,
"type" : "trigger",
"triggerID" : t._id,
"next" : [
{"flowID": flowActionID, "logic": True }
]
},
{
"flowID" : flowActionID,
"type" : "action",
"actionID" : a._id,
"next" : []
}
]
webui._modelUI().new(c._id,{ "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] },flowTriggerID,0,0,"")
webui._modelUI().new(c._id,{ "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] },flowActionID,100,0,"")
c.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
c.enabled = True
c.update(["triggers","flow","enabled","acl"])
t.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
t.schedule = "60-90s"
t.enabled = True
t.update(["schedule","enabled","acl"])
a.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
a.enabled = True
a.update(["enabled","acl"])
# Hide Created Models
temp = model._model().getAsClass(query={ "name" : "occurrence clean" })
if len(temp) == 1:
temp = temp[0]
temp.hidden = True
temp.update(["hidden"])
return True
def uninstall(self):
# deregister models
model.deregisterModel("occurrence","_occurrence","_action","plugins.occurrence.models.action")
model.deregisterModel("occurrence clean","_occurrenceClean","_action","plugins.occurrence.models.action")
model.deregisterModel("occurrenceUpdate","_occurrenceUpdate","_action","plugins.occurrence.models.action")
conduct._conduct().api_delete(query={"name" : "occurrenceCore" })
trigger._trigger().api_delete(query={"name" : "occurrenceCore" })
action._occurrenceClean().api_delete(query={"name" : "occurrenceCore" })
return True
def upgrade(self,LatestPluginVersion):
if self.version < 5:
pass
return True
|
python
|
#!/usr/bin/env python
import imp
import io
import os
from setuptools import setup, find_packages
def read(*filenames, **kwargs):
encoding = kwargs.get("encoding", "utf-8")
sep = kwargs.get("sep", "\n")
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
name = 'nengolib'
root = os.path.dirname(os.path.realpath(__file__))
version_module = imp.load_source(
'version', os.path.join(root, name, 'version.py'))
deps = [ # https://github.com/nengo/nengo/issues/508
"nengo>=2.2.0,<3.0",
"numpy>=1.13",
"scipy>=0.19.0",
]
download_url = (
'https://github.com/arvoelke/nengolib/archive/v%s.tar.gz' % (
version_module.version))
setup(
name=name,
version=version_module.version,
author="Aaron R. Voelker",
author_email="[email protected]",
description="Tools for robust dynamics in Nengo",
long_description=read("README.rst", "CHANGES.rst"),
url="https://github.com/arvoelke/nengolib/",
download_url=download_url,
license="Free for non-commercial use (see Nengo license)",
packages=find_packages(),
setup_requires=deps,
install_requires=deps,
keywords=[
'Neural Engineering Framework',
'Nengo',
'Dynamical Spiking Networks',
'Neural Dynamics',
'Reservoir Computing',
],
classifiers=[ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Framework :: Nengo',
'Intended Audience :: Science/Research',
'License :: Free for non-commercial use',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
)
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, nishta and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
from frappe.model.document import Document
from planning.planning.myfunction import mail_format_pms,daily_summary_mail
import datetime
class NNTask(Document):
def autoname(self):
max_no_old="0"
max_no_result=frappe.db.sql("""select max(max_count) from `tabNNTask`""")
if(max_no_result):
max_no_old=max_no_result[0][0]
if max_no_old<=0:
max_no_old=0
max_no_new=int(max_no_old)+int(1)
count_zero=""
if max_no_new<1000:
count_zero="0"
if max_no_new<100:
count_zero="00"
if max_no_new<10:
count_zero="000"
self.max_count=max_no_new
new_naming=str("-")+str(count_zero)+str(max_no_new)
self.task=self.task+new_naming
self.name=self.task
def validate(self):
allocate_to_arr=[]
i=1
for d in self.assign_to:
if d.members in allocate_to_arr:
frappe.msgprint("Allocate to "+ str(d.members) +" Already Exists ( Row No : "+ str(i) +")",raise_exception=1)
else:
allocate_to_arr.append(d.members)
def after_insert(self):
task_name=self.task
mode=0
mail_format_pms(task_name,mode)
@frappe.whitelist()
def employee_values_load(naming_series=None):
return_values=frappe.db.sql("""select employee_name,hourly_rate from tabEmployee where employee=%s""",naming_series)
return return_values
|
python
|
import logging
from impala.dbapi import connect
from .settings import ImpalaConstants, NEED_CERTIFICATE
from .error import ImpalaConnectError, ImpalaQueryError
class ImpalaWrapper:
def __init__(self, host=ImpalaConstants.HOST, port=ImpalaConstants.PORT,
user=ImpalaConstants.USER, database=None, sql=None,
auth_required=NEED_CERTIFICATE):
self.host = host
self.port = int(port)
self.user = user
self.database = database
self.sql = "explain %s" % sql
self.auth_required = auth_required
def cursor(self):
if self.auth_required:
auth_mechanism = 'GSSAPI'
else:
auth_mechanism = 'NOSASL'
try:
return connect(self.host, self.port,
auth_mechanism=auth_mechanism).cursor()
except Exception as err:
logging.error(err)
raise ImpalaConnectError(message=str(err))
def explain(self):
cursor = self.cursor()
try:
cursor.execute("use %s" % self.database)
cursor.execute("set explain_level=2")
cursor.execute(self.sql)
except Exception as err:
logging.warning(err)
raise ImpalaQueryError(message=str(err))
else:
for line in cursor:
yield line[0]
finally:
cursor.close()
|
python
|
from quixstreaming import QuixStreamingClient
from flask import Flask, request
from datetime import datetime
from waitress import serve
import os
import json
import hmac
import hashlib
# Quix injects credentials automatically to the client.
# Alternatively, you can always pass an SDK token manually as an argument.
client = QuixStreamingClient()
# Open the output topic where to write data out
output_topic = client.open_output_topic(os.environ["output"])
stream = output_topic.create_stream()
stream.properties.name = "Segment Data"
app = Flask("Segment Webhook")
# this is unauthenticated, anyone could post anything to you!
@app.route("/webhook", methods=['POST'])
def webhook():
# get the shared secret from environment variables
secret = os.environ["shared_secret"]
# convert to a byte array
secret_bytes = bytearray(secret, "utf-8")
# get the signature from the headers
header_sig = request.headers['x-signature']
# compute a hash-based message authentication code (HMAC)
hex_digest = hmac.new(secret_bytes, request.get_data(), hashlib.sha1).hexdigest()
# compare the HMAC to the header signature provided by Segment
if(header_sig != hex_digest):
# if they don't match its no bueno
return "ERROR", 401
# if they do then fly me to the moon
stream.events.add_timestamp(datetime.now())\
.add_value(request.json["type"], json.dumps(request.json))\
.write()
return "OK", 200
print("CONNECTED!")
# you can use app.run for dev, but its not secure, stable or particularly efficient
# app.run(debug=True, host="0.0.0.0", port=80)
# use waitress instead for production
serve(app, host='0.0.0.0', port=80)
|
python
|
import re
import json
from ..extractor.common import InfoExtractor
from ..utils import (
js_to_json
)
class sexixnetIE(InfoExtractor):
#http://www.txxx.com/videos/2631606/stepmom-seduces-teen-babe/
_VALID_URL = r'https?://(?:www\.)?sexix\.net'
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
thumbnail = self._search_regex(r'image: \'([^\']+)', webpage, 'thumbnail', default=None)
vid = self._search_regex(r'<iframe src="http://sexix.net/v.php\?u=([^"]+)', webpage, 'emb')
embUrl = 'http://sexix.net/v.php?u=%s' % vid
headers = {'Referer': url}
webpage = self._download_webpage(embUrl, vid, headers=headers)
jw_config = self._parse_json(
self._search_regex(
r'(?s)jwplayer\(([\'"])(?:(?!\1).)+\1\)\.setup\s*\((?P<options>.+?)\);',
webpage, 'jw config', group='options'),
'', transform_source=js_to_json)
playlist_url = jw_config['playlist']
webpage = self._download_webpage(playlist_url, vid, headers=headers)
#<jwplayer:source file="http://porn96.xyz/?u=8pFvAZ3bC8jsfGLlJzaPUxZ%2BIL%2FLuJ8hSylcUIoCCQo%2FAyyZHVBvIS27YLs6U8UeKy6oYUwHCtJ6O0YFMAkOSg%3D%3D" type="mp4" label="480p"/>
list = re.findall(r'file="(.+)"\s*type="(.+)"\s*label="([^"]+)p', webpage)
formats = []
for item in list:
if item[0]!='':
try:
formats.append({
'url': item[0],
'height': item[2],
'ext': item[1],
})
except:
pass
self._sort_formats(formats)
return ({
'id': '',
'title': title,
'thumbnail': thumbnail,
'formats': formats,
})
|
python
|
"""
https://www.hackerrank.com/challenges/no-idea
There is an array of integers. There are also disjoint sets, and , each containing integers. You like all the integers in set and dislike all the integers in set . Your initial happiness is . For each integer in the array, if , you add to your happiness. If , you add to your happiness. Otherwise, your happiness does not change. Output your final happiness at the end.
Note: Since and are sets, they have no repeated elements. However, the array might contain duplicate elements.
Constraints
Input Format
The first line contains integers and separated by a space.
The second line contains integers, the elements of the array.
The third and fourth lines contain integers, and , respectively.
Output Format
Output a single integer, your total happiness.
Sample Input
3 2
1 5 3
3 1
5 7
Sample Output
1
Explanation
You gain unit of happiness for elements and in set . You lose unit for in set . The element in set does not exist in the array so it is not included in the calculation.
Hence, the total happiness is .
"""
#!/bin/python3
# Enter your code here. Read input from STDIN. Print output to STDOUT
def get_points(array, like_list, dislike_list):
points = 0
for number in array:
if number in like_list:
points += 1
if number in dislike_list:
points -= 1
return points
if __name__ == "__main__":
n,m = tuple(map(int, input().split()))
array = tuple(map(int, input().split()))
like_list = set(map(int, input().split()))
dislike_list = set(map(int, input().split()))
print(get_points(array, like_list, dislike_list))
|
python
|
from entities import FeedEntity as FE
FEEDS = (
FE('rss', 'The Verge', 'https://www.theverge.com/rss/index.xml'),
FE('rss', 'VB', 'https://feeds.feedburner.com/venturebeat/SZYF'),
# FE('rss', 'TNW', 'https://thenextweb.com/feed/'),
FE('rss', 'ARS Technica', 'http://feeds.arstechnica.com/arstechnica/index'),
FE('rss', 'Wired', 'https://www.wired.com/feed/rss'),
FE('rss', 'The Atlantic', 'https://www.theatlantic.com/feed/all/.rss'),
# FE('rss', 'TechCrunch', 'http://feeds.feedburner.com/TechCrunch/'),
# FE('rss', 'addmeto (telegram)', 'https://addmeto.cc/rss/'),
FE('hn', 'Hacker News', 'https://news.ycombinator.com/', data={'max_news': 20}),
# FE('rss', 'BBC Tech', 'http://feeds.bbci.co.uk/news/technology/rss.xml'),
# FE('rss', 'NYT Tech', 'https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml'),
FE('rss', 'Engadged', 'https://www.engadget.com/rss.xml'),
# FE('rss', 'WSJ Tech', 'https://feeds.a.dj.com/rss/RSSWSJD.xml'),
FE('rss', 'BBC Science & Environment', 'http://feeds.bbci.co.uk/news/science_and_environment/rss.xml'),
FE('rss', 'dev.by', 'https://dev.by/rss'),
# FE('rss', 'NYT Home Page', 'https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'),
FE('rss', 'python PEP', 'https://www.python.org/dev/peps/peps.rss'),
FE('rss', 'tut.by', 'https://news.tut.by/rss/index.rss'),
)
|
python
|
class RoomAlreadyEmpty(Exception):
pass
class CannotAllocateRoom(Exception):
pass
|
python
|
a,b = [int(x) for x in input().split(' ')]
print(str(a+b))
|
python
|
# Source: https://stackoverflow.com/questions/9282967/how-to-open-a-file-using-the-open-with-statement
def filter(txt, oldfile, newfile):
'''\
Read a list of names from a file line by line into an output file.
If a line begins with a particular name, insert a string of text
after the name before appending the line to the output file.
'''
with open(newfile, 'w') as outfile, open(oldfile, 'r', encoding='utf-8') as infile:
for line in infile:
if line.startswith(txt):
line = line[0:len(txt)] + ' - Truly a great person!\n'
outfile.write(line)
# input the name you want to check against
text = input('Please enter the name of a great person: ')
letsgo = filter(text,'Spanish', 'Spanish2')
|
python
|
"""
SLM test for the cortex-to-hippocampus connectivity for individual subfields
usage: $ python s16_cortex_testSLM.py LSUB
"""
import os, sys
import h5py
import numpy as np
from numpy import genfromtxt
# definde data directories
ddir = '../data/' # data dir
cordir = '../data/tout_cortex/'
odir = '../data/tout_group'
# final subject list after QC
subjlist = os.path.join(ddir, 'subjectListS900_QC_gr.txt'); # 709 subjects
f = open(subjlist); mylist = f.read().split("\n"); f.close()
mylist = mylist[:-1]
totnum = len(mylist)
labeling_file = '../data/tout_group/glasser.csv'
mylabel = genfromtxt(labeling_file)
print('We have now %i subjects... ' % totnum)
# subfield = 'LSUB'
subfield = sys.argv[1]
# here we go
C360_all = np.zeros((len(mylist), 360))
i = 0
for subjID in mylist:
subjsub= os.path.join(cordir, subjID + '_cortex_%s.h5' % (subfield))
with h5py.File(subjsub, "r") as f:
subjdata = np.array(f[subjID])
C360_all[i, :] = subjdata.T
i +=1
print(C360_all.shape, C360_all.mean(axis=0).max())
# labeling from 360 to 64k points
C64k_all = np.zeros((len(mylist), 64984))
for i in range(0, len(mylist)):
for j in range(1,360+1):
C64k_all[i, np.where(mylabel == j)] = C360_all[i,(j-1)]
print(C64k_all.shape, C64k_all.mean(axis=0).max())
from brainspace.datasets import load_conte69
from brainspace.mesh import mesh_elements
# load poly data for 64k surface (for the test & plotting)
surf_lh, surf_rh = load_conte69()
# write surface coordinates and triangles in a dictionary
lh_coord = np.array(mesh_elements.get_points(surf_lh)).T
rh_coord = np.array(mesh_elements.get_points(surf_rh)).T
lh_tri = np.array(mesh_elements.get_cells(surf_lh))
rh_tri = np.array(mesh_elements.get_cells(surf_rh))
D = {}
D['coord'] = np.concatenate((lh_coord, rh_coord), axis=1) # (3, 64984)
D['tri'] = np.concatenate((lh_tri, rh_tri + lh_coord.shape[1])) # (129960, 3)
# run slm
from brainstat.stats.terms import FixedEffect
from brainstat.stats.SLM import SLM
Y = C64k_all
contrast = np.ones((len(mylist),1))
term_ = FixedEffect(contrast)
model_ = 1 + term_
slm = SLM(model_, contrast = contrast)
slm.fit(Y)
Tvals = slm.t
Tvals.shape
h = h5py.File(os.path.join(odir, 'Tvals_cortex709_%s.h5' % (subfield)), 'w')
h.create_dataset('data', data = Tvals)
h.close()
|
python
|
import csbuilder
from csbuilder.standard import Protocols, Roles, States
@csbuilder.protocols
class SFTProtocols(Protocols):
SFT = 8888
@csbuilder.roles(protocol=SFTProtocols.SFT)
class SFTRoles(Roles):
SENDER = 0
RECEIVER = 1
@csbuilder.states(SFTProtocols.SFT,SFTRoles.SENDER)
class SFTSenderStates(States):
IGNORE = 0
REQUEST = 1
INFO = 2
SEND = 3
DENY = 4
@csbuilder.states(SFTProtocols.SFT, SFTRoles.RECEIVER)
class SFTReceiverStates(States):
IGNORE = 0
ACCEPT = 1
DENY = 2
REQUIRE = 3
SUCCESS = 4
FAILURE = 5
REQUEST = 6
|
python
|
from django.core.management.base import BaseCommand
from django.db.models import Count
from project.pastebin.models import Country
class Command(BaseCommand):
help = 'countries statistics'
def handle(self, *args, **kwargs):
countries = Country.objects.annotate(
pastes_count=Count('users__pastes')).order_by(
'-pastes_count')[:5].values(*['id', 'pastes_count', 'title'])
[print(country) for country in countries]
|
python
|
"""Hass cmd."""
def breaking_change(number, cli=False):
"""Create breaking_change list for HA."""
import json
import requests
import os
from github import Github
comp_base = "https://www.home-assistant.io/components/"
pull_base = "https://github.com/home-assistant/home-assistant/pull/"
github = Github(os.environ["GHTOKEN"])
repo = github.get_repo("home-assistant/home-assistant.io")
posts = repo.get_dir_contents("source/_posts", "current")
this_post = None
for post in posts:
if "release" in post.path:
name = post.path.split("/")[-1].split(".")[0]
name = name.split("-")
rel_number = name[-1]
if rel_number == number:
this_post = post.html_url
if this_post is None:
print("Release for", number, "not found")
return
url = this_post
url_data = requests.get(url).text.split("\n")
raw_changes = []
changes = {}
changes["version"] = "0.{}.x".format(url.split(".markdown")[0].split("-")[-1])
changes["data"] = []
control = []
for line in url_data:
if "(breaking change)" in line:
raw_changes.append(line)
for change in raw_changes:
if change[0:3] == "<p>":
pass
else:
this = {}
try:
pull = str(change)
pull = pull.split("home-assistant/home-assistant/pull/")[1]
pull = pull.split('"')[0]
except:
pull = None
if pull not in control and pull is not None:
prlink = "{}{}".format(pull_base, pull)
try:
split = '<a href="/home-assistant/home-assistant.io/blob/'
split += "current/components/"
component = str(change)
component = component.split(split)[1]
component = component.split('">')[0]
except:
component = None
doclink = "{}{}".format(comp_base, component)
if len(change.split("<li>")) == 1:
desc = change.split("<li>")[0]
else:
desc = change.split("<li>")[1]
desc = desc.split("(<a ")[0]
desc = desc.replace("</code>", "")
desc = desc.replace('<code class="highlighter-rouge">', "")
desc = desc.replace("\u2019", "`")
desc = desc.replace("\u201c", "")
desc = desc.replace("\u201d", "")
this["pull_request"] = pull
this["prlink"] = prlink
this["component"] = component
this["doclink"] = doclink
this["description"] = desc
changes["data"].append(this)
control.append(pull)
if cli:
data = json.dumps(changes, sort_keys=True, indent=4, ensure_ascii=True)
print(data)
return changes
|
python
|
import logging
import boto3
import os
import pandas as pd
import argparse
from datetime import datetime
from dataactcore.models.domainModels import DUNS
from dataactcore.utils.parentDuns import sam_config_is_valid
from dataactcore.utils.duns import load_duns_by_row
from dataactvalidator.scripts.loader_utils import clean_data
from dataactvalidator.health_check import create_app
from dataactcore.interfaces.db import GlobalDB
from dataactcore.logging import configure_logging
from dataactcore.config import CONFIG_BROKER
import dataactcore.utils.parentDuns
logger = logging.getLogger(__name__)
# CSV column header name in DUNS file
column_headers = [
"awardee_or_recipient_uniqu", # DUNS Field
"registration_date", # Registration_Date
"expiration_date", # Expiration_Date
"last_sam_mod_date", # Last_Update_Date
"activation_date", # Activation_Date
"legal_business_name" # Legal_Business_Name
]
props_columns = {
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': []
}
column_mappings = {x: x for x in column_headers + list(props_columns.keys())}
def remove_existing_duns(data, sess):
""" Remove rows from file that already have a entry in broker database. We should only update missing DUNS
Args:
data: dataframe representing a list of duns
sess: the database session
Returns:
a new dataframe with the DUNS removed that already exist in the database
"""
duns_in_file = ",".join(list(data['awardee_or_recipient_uniqu'].unique()))
sql_query = "SELECT awardee_or_recipient_uniqu " +\
"FROM duns where awardee_or_recipient_uniqu = ANY('{" + \
duns_in_file +\
"}')"
db_duns = pd.read_sql(sql_query, sess.bind)
missing_duns = data[~data['awardee_or_recipient_uniqu'].isin(db_duns['awardee_or_recipient_uniqu'])]
return missing_duns
def clean_duns_csv_data(data):
""" Simple wrapper around clean_data applied just for duns
Args:
data: dataframe representing the data to be cleaned
Returns:
a dataframe cleaned and to be imported to the database
"""
return clean_data(data, DUNS, column_mappings, {})
def batch(iterable, n=1):
""" Simple function to create batches from a list
Args:
iterable: the list to be batched
n: the size of the batches
Yields:
the same list (iterable) in batches depending on the size of N
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def update_duns_props(df, client):
""" Returns same dataframe with address data updated"
Args:
df: the dataframe containing the duns data
client: the connection to the SAM service
Returns:
a merged dataframe with the duns updated with location info from SAM
"""
all_duns = df['awardee_or_recipient_uniqu'].tolist()
columns = ['awardee_or_recipient_uniqu'] + list(props_columns.keys())
duns_props_df = pd.DataFrame(columns=columns)
# SAM service only takes in batches of 100
for duns_list in batch(all_duns, 100):
duns_props_batch = dataactcore.utils.parentDuns.get_location_business_from_sam(client, duns_list)
# Adding in blank rows for DUNS where location data was not found
added_duns_list = []
if not duns_props_batch.empty:
added_duns_list = [str(duns) for duns in duns_props_batch['awardee_or_recipient_uniqu'].tolist()]
empty_duns_rows = []
for duns in (set(added_duns_list) ^ set(duns_list)):
empty_duns_row = props_columns.copy()
empty_duns_row['awardee_or_recipient_uniqu'] = duns
empty_duns_rows.append(empty_duns_row)
duns_props_batch = duns_props_batch.append(pd.DataFrame(empty_duns_rows))
duns_props_df = duns_props_df.append(duns_props_batch)
return pd.merge(df, duns_props_df, on=['awardee_or_recipient_uniqu'])
def run_duns_batches(file, sess, client, block_size=10000):
""" Updates DUNS table in chunks from csv file
Args:
file: path to the DUNS export file to use
sess: the database connection
client: the connection to the SAM service
block_size: the size of the batches to read from the DUNS export file.
"""
logger.info("Retrieving total rows from duns file")
start = datetime.now()
row_count = len(pd.read_csv(file, skipinitialspace=True, header=None, encoding='latin1', quotechar='"',
dtype=str, names=column_headers, skiprows=1))
logger.info("Retrieved row count of {} in {} s".format(row_count, (datetime.now()-start).total_seconds()))
duns_reader_obj = pd.read_csv(file, skipinitialspace=True, header=None, encoding='latin1', quotechar='"',
dtype=str, names=column_headers, iterator=True, chunksize=block_size, skiprows=1)
for duns_df in duns_reader_obj:
start = datetime.now()
# Remove rows where awardee_or_recipient_uniqu is null
duns_df = duns_df[duns_df['awardee_or_recipient_uniqu'].notnull()]
duns_to_load = remove_existing_duns(duns_df, sess)
duns_count = 0
# Only update database if there are DUNS from file missing in database
if not duns_to_load.empty:
duns_count = duns_to_load.shape[0]
# get address info for incoming duns
duns_to_load = update_duns_props(duns_to_load, client)
duns_to_load = clean_duns_csv_data(duns_to_load)
models = {}
load_duns_by_row(duns_to_load, sess, models, None)
sess.commit()
logger.info("Finished updating {} DUNS rows in {} s".format(duns_count,
(datetime.now()-start).total_seconds()))
def main():
""" Loads DUNS from the DUNS export file (comprised of DUNS pre-2014) """
parser = argparse.ArgumentParser(description='Adding historical DUNS to Broker.')
parser.add_argument('-size', '--block_size', help='Number of rows to batch load', type=int,
default=10000)
args = parser.parse_args()
sess = GlobalDB.db().session
client = sam_config_is_valid()
logger.info('Retrieving historical DUNS file')
start = datetime.now()
if CONFIG_BROKER["use_aws"]:
s3_client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
duns_file = s3_client.generate_presigned_url('get_object', {'Bucket': CONFIG_BROKER['archive_bucket'],
'Key': "DUNS_export_deduped.csv"}, ExpiresIn=10000)
else:
duns_file = os.path.join(CONFIG_BROKER["broker_files"], "DUNS_export_deduped.csv")
if not duns_file:
raise OSError("No DUNS_export_deduped.csv found.")
logger.info("Retrieved historical DUNS file in {} s".format((datetime.now()-start).total_seconds()))
try:
run_duns_batches(duns_file, sess, client, args.block_size)
except Exception as e:
logger.exception(e)
sess.rollback()
logger.info("Updating historical DUNS complete")
sess.close()
if __name__ == '__main__':
with create_app().app_context():
configure_logging()
with create_app().app_context():
main()
|
python
|
from datetime import datetime, timedelta
from cymepy.common import DATE_FORMAT
import math
import os
class Solver:
def __init__(self, cymepy, settings, logger):
self.Settings = settings
self._Logger = logger
self.cymepy = cymepy
self._mStepRes = settings['project']['time_step_min']
StartTimeMin = settings['project']['start_time']
EndTimeMin = settings['project']['end_time']
self._Time = datetime.strptime(StartTimeMin, DATE_FORMAT)
self._StartTime = self._Time
self._EndTime = datetime.strptime(EndTimeMin, DATE_FORMAT)
if settings['project']["simulation_type"] == "QSTS":
if self.Settings['profiles']["use_internal_profile_manager"]:
self.solverObj = cymepy.sim.LoadFlowWithProfiles()
self.solverObj.SetValue("SingleTimeMode", "Parameters.TimeParametersMode")
self.loadflowSettings(cymepy.sim.LoadFlow())
else:
self.solverObj = cymepy.sim.LoadFlow()
self.loadflowSettings(self.solverObj)
elif settings['project']["simulation_type"] == "Static":
self.solverObj = cymepy.sim.LoadFlow()
self.loadflowSettings(self.solverObj)
self._Logger.debug("Solver object created.")
return
def loadflowSettings(self, lf):
lf.SetValue('VoltageDropUnbalanced', 'ParametersConfigurations[0].AnalysisMode')
lf.SetValue(self.Settings['project']["max_iter"],
'ParametersConfigurations[0].MaximumIterations')
lf.SetValue(self.Settings['project']["error_tolerance"],
'ParametersConfigurations[0].VoltageTolerance')
return
def increment(self):
if self.Settings['project']["simulation_type"] == "QSTS":
if self.Settings['profiles']["use_profiles"]:
if self.Settings['profiles']["use_internal_profile_manager"]:
self.solverObj.SetValue(int(self._Time.timestamp()), "Parameters.SingleTime")
self.solverObj.Run()
#self._Logger.debug(f"CYME internal time: {self._Time}")
else:
self.solverObj.Run()
self._Time = self._Time + timedelta(minutes=self._mStepRes)
self._Logger.debug(f"CYMEPY time: {self._Time}")
elif self.Settings['project']["simulation_type"] == "Static":
raise Exception("'increment' method cannot be used in QSTS mode")
return
def resolve(self):
self.solverObj.Run()
self._Logger.debug(f"Resolving at time: {self._Time}")
def SimulationSteps(self):
Minutes = (self._EndTime - self._StartTime).total_seconds() / 60.0
Steps = math.ceil(Minutes / self._mStepRes)
return Steps, self._StartTime, self._EndTime
def GetTotalSeconds(self):
return (self._Time - self._StartTime).total_seconds()
def GetDateTime(self):
return self._Time
|
python
|
from flask import Flask, render_template, request
from github_api import GithubUser
from pprint import pprint
app = Flask('git connect')
MATCHED_PROFILES = {}
userororg = 'user'
ghuser = None
@app.route('/')
def hello():
return render_template('app/index.html', err='')
@app.route('/login', methods=['GET', 'POST'])
def handle_data():
global ghuser
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
try:
ghuser = GithubUser(username, password)
except Exception:
return render_template('app/index.html', err='Invalid auth')
if MATCHED_PROFILES.get(username, None) is None:
MATCHED_PROFILES[username] = ([], [])
else:
ghuser.active_matches = MATCHED_PROFILES[username][1]
# , username=ghuser.username, location=ghuser.location, bio=ghuser.bio, repocount=ghuser.repo_count, l1=ghuser.lang_name[0], l2=ghuser.lang_name[1], l3=ghuser.lang_name[2]
return render_template('app/profile.html', user_name=ghuser.username, location=ghuser.location, bio=ghuser.bio, repo_count=ghuser.repo_count, lang0=ghuser.lang_name[0][0], lang1=ghuser.lang_name[1][0], lang2=ghuser.lang_name[2][0], match_count=len(MATCHED_PROFILES[ghuser.username][1]), avatar_url=ghuser.avatar)
elif request.method == 'GET':
return render_template('app/index.html')
else:
return 'Please try again, this time using GET or POST'
@app.route('/profile')
def user():
global ghuser
if ghuser is None:
return render_template('app/index.html', err='Please log in')
pprint(MATCHED_PROFILES)
if len(MATCHED_PROFILES[ghuser.username][1]) > 0:
match = MATCHED_PROFILES[ghuser.username][1][0]
match_url = 'https://github.com/%s' % MATCHED_PROFILES[ghuser.username][1][0]
else:
match = ''
match_url = '#'
return render_template('app/profile.html', match=match, match_url=match_url, user_name=ghuser.username, location=ghuser.location, bio=ghuser.bio, repo_count=ghuser.repo_count, lang0=ghuser.lang_name[0][0], lang1=ghuser.lang_name[1][0], lang2=ghuser.lang_name[2][0], match_count=len(MATCHED_PROFILES[ghuser.username][1]), avatar_url=ghuser.avatar)
@app.route('/explore', methods=['GET', 'POST'])
def dislike_love():
global ghuser
if ghuser is None:
return render_template('app/index.html', err='Please log in')
pprint(MATCHED_PROFILES)
if request.method == 'POST':
if request.form['submit'] == 'dislike':
pass # do nothing
elif request.form['submit'] == 'love':
# matched user
muser = ghuser.matches[ghuser.mindex-1]['login']
if MATCHED_PROFILES.get(muser, None) is None:
MATCHED_PROFILES[muser] = ([ghuser.username], [])
elif muser in MATCHED_PROFILES[ghuser.username][0]:
MATCHED_PROFILES[muser][1].append(ghuser.username)
MATCHED_PROFILES[ghuser.username][1].append(muser)
else:
return 'unvalid'
match = ghuser.get_match()
murl = 'https://github.com/%s' % match['login']
return render_template("app/explore.html",murl=murl, followers=match['followers'], repos=match['repos'], avatar_url=match['avatar_url'], user_name=match['login'], location=ghuser.location)
elif request.method == 'GET':
match = ghuser.get_match()
murl = 'https://github.com/%s' % match['login']
return render_template("app/explore.html",murl=murl, followers=match['followers'], repos=match['repos'], avatar_url=match['avatar_url'], user_name=match['login'], location=ghuser.location)
else:
return 'BA MUI, GET sau POST'
if __name__ == '__main__':
app.run()
|
python
|
import urllib2
import logging
from random import choice, randint
from os.path import exists
from time import sleep
from os import getenv
logging.basicConfig(
format='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',
datefmt="%d/%b/%Y %H:%M:%S",
level=getenv('LOG_LEVEL', logging.DEBUG)
)
logger = logging.getLogger(__name__)
class EasyScrapper(object):
"""
Simple and Fast Scrapper base object to implement dataset scrappers.
It provides a set of method to make it faster and better.
Support:
- User Agents
- Web Proxies
"""
DEFAULT_USER_AGENTS = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
]
DEFAULT_PROXIES = [
'97.77.104.22:80',
'188.213.143.119:8118',
'47.88.137.179:8080',
'12.41.141.10:8080'
]
def __init__(self, proxies_filename='./proxies.txt', user_agents_filename='./user_agents.txt'):
"""
Initialize the ojects and all the needed resources.
:param proxies_filename filename to the proxy list separated by new line
:param user_agents_filename filename to the user agenst list separated by new line
"""
self.proxies = self.load_data_list(proxies_filename, self.DEFAULT_PROXIES)
self.user_agents = self.load_data_list(user_agents_filename, self.DEFAULT_USER_AGENTS)
def load_data_list(self, filename, defaults):
"""
Load an array from a file that contains one value per line. For example:
```
a
b
c
```
will return
['a', 'b', 'c']
:param filename the filename containing the list
:param defaults the default list if the file does not exist.
:returns the array with all the loaded elements
"""
logger.info("Loading data from {}...".format(filename))
all_data = []
if exists(filename):
with open(filename, 'r+') as fp:
data = fp.read()
all_data = filter(None, data.split("\n"))
else:
return defaults
return all_data
def save_data_list(self, filename, data):
"""
Save a list into a folder, one element per line.
:param filename the filename to save/create/override
:param data the list to save
"""
logger.info("Saving data to {}...".format(filename))
with open(filename, 'w+') as fp:
fp.write("\n".join(data))
def sleep(self, seconds_from, seconds_to):
"""
Sleep a random number of seconds between seconds_from and seconds_to. For example:
self.sleep(2, 30) will sleep ramdomly between 2 and 30 seconds.
:param seconds_from lower limit for the seconds to sleep
:param seconds_to upper limit for the seconds to sleep
"""
time_to_sleep = randint(seconds_from, seconds_to)
logger.info("Going to sleep for {} seconds...".format(time_to_sleep))
sleep(time_to_sleep)
def download_data(self, url, referer='http://www.google.com/', use_proxy=False, retries=1):
"""
Download all the data from the url faking the referer and user-agent. This method has the
power to use proxies and perform retries if the download fails.
:param url the url of the file to download
:param referer the url to send as referer (Identifies the address of the webpage that linked to the resource being requested)
:param use_proxy if TRUE it will download the resource using a proxy listed in the proxies file, if FALSE it will download it directly.
:param retries is the number of retries to try to download the resource if fails.
:returns the url data
"""
iteration = 1
while iteration <= retries:
try:
the_proxy = choice(self.proxies)
if use_proxy:
logger.info("Downloading {} through {} and retry {}/{} times.".format(url, the_proxy, iteration, retries))
else:
logger.info("Downloading {} and retry {}/{} times.".format(url, iteration, retries))
if use_proxy:
# Enable Proxies
urllib2.install_opener(
urllib2.build_opener(
urllib2.ProxyHandler({'http': the_proxy})
)
)
req = urllib2.Request(url, headers={
'referer': referer,
'User-Agent': choice(self.user_agents)
})
data = urllib2.urlopen(req).read()
if use_proxy:
# Disable all proxies
urllib2.install_opener(
urllib2.build_opener(
urllib2.ProxyHandler({})
)
)
return data
except Exception:
iteration += 1
logger.error("Download failed. Retry: {}".format(iteration))
raise Exception("Download failed: {}".format(url))
def start(self, *args, **kwargs):
"""
Method to override and create all the needed logic you need.
"""
raise NotImplemented
|
python
|
"""
polarAWB.py
Copyright (c) 2022 Sony Group Corporation
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import json
from pathlib import Path
import shutil
import numpy as np
from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image
from myutils.datautils import macbeth_position_txt_parse, compute_gt_illum, calc_ang_error
import myutils.polarutils as plutil
import myutils.weighturils as weutil
import myutils.wbutils as wbutil
if __name__ == "__main__":
params = json.load(open("parameters.json", "r"))
input_path = Path("images").joinpath(params["input_folder"])
result_path = Path("results").joinpath(input_path.name)
result_path.mkdir(parents=True, exist_ok=True)
shutil.copy("parameters.json", result_path)
with open(input_path.joinpath("macbeth_position.txt"), "r") as f:
lines = f.readlines()
for line in lines:
scene_name, x, y, w, h = macbeth_position_txt_parse(line)
imean_path = input_path.joinpath("{}_imean.png".format(scene_name))
i000_path = input_path.joinpath("{}_i000.png".format(scene_name))
i045_path = input_path.joinpath("{}_i045.png".format(scene_name))
i090_path = input_path.joinpath("{}_i090.png".format(scene_name))
i135_path = input_path.joinpath("{}_i135.png".format(scene_name))
macbeth_path = input_path.joinpath("{}_macbeth.png".format(scene_name))
imean = my_read_image(imean_path) / MAX_16BIT
i000 = my_read_image(i000_path) / MAX_16BIT
i045 = my_read_image(i045_path) / MAX_16BIT
i090 = my_read_image(i090_path) / MAX_16BIT
i135 = my_read_image(i135_path) / MAX_16BIT
macbeth = my_read_image(macbeth_path)
s0, s1, s2 = plutil.calc_s0s1s2_from_fourPolar(i000, i045, i090, i135)
dolp = plutil.calc_dolp_from_s0s1s2(s0, s1, s2)
aolp = plutil.calc_aolp_from_s1s2(s1, s2)
# Weights
w_valid = weutil.valid_weight_fourPolar(i000, i045, i090, i135, th=params["valid_th"])
w_dolp = weutil.sigmoid(
np.mean(dolp, axis=2), alpha=params["w_dolp_a"], center=params["w_dolp_b"])
w_dolp_ach = weutil.rg_bg_sigmoid_weight_achromatic(
dolp, alpha=params["w_dolp_ach_a"], center=params["w_dolp_ach_b"], normalize=True)
w_aolp_ach = weutil.rg_bg_sigmoid_weight_achromatic_phase(
aolp, alpha=params["w_aolp_ach_a"], center=params["w_aolp_ach_b"])
w_dolp_ch = weutil.rg_bg_sigmoid_weight_chromatic(
dolp, alpha=params["w_dolp_ch_a"], center=params["w_dolp_ch_b"], normalize=True)
w_aolp_ch = weutil.rg_bg_sigmoid_weight_achromatic_phase(
aolp, alpha=params["w_aolp_ch_a"], center=params["w_aolp_ch_b"])
weight_achromatic = w_valid * w_dolp * w_dolp_ach * w_aolp_ach
weight_chromatic = w_valid * w_dolp * w_dolp_ch * w_aolp_ch
# WB.
illum_est = wbutil.polarAWB(dolp, imean, weight_achromatic, weight_chromatic, params["alpha"])
# Compute Error.
illum_gt = compute_gt_illum(macbeth, x, y, w, h)
err_deg = calc_ang_error(illum_est, illum_gt)
with open(result_path.joinpath("error.txt"), "a") as f2:
f2.write("{}'s Error: {:.3f}\n".format(scene_name, err_deg))
# Save White-balanced Images.
macbeth_wb = np.copy(imean)
polar_wb = np.copy(imean)
polar_wb[..., 0] /= illum_est[..., 0]
polar_wb[..., 2] /= illum_est[..., 2]
polar_wb = np.clip(polar_wb, 0, 1) * MAX_16BIT
my_write_image(result_path.joinpath("{}_PolarWB.png".format(scene_name)), polar_wb)
r_gain = illum_gt[1] / illum_gt[0]
b_gain = illum_gt[1] / illum_gt[2]
macbeth_wb[..., 0] *= r_gain
macbeth_wb[..., 2] *= b_gain
macbeth_wb = np.clip(macbeth_wb, 0, 1) * MAX_16BIT
my_write_image(result_path.joinpath("{}_MacbethWB.png".format(scene_name)), macbeth_wb)
|
python
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import OrderedDict, defaultdict
from traceback import print_exc
import wx
from wx import EVT_MENU
from .Controls import CheckBox, RadioButton, Row, StaticText
class FormDialog(wx.Dialog):
def __init__(
self,
parent,
panel=None,
title="Unnamed Dialog",
modal=False,
sizes=(-1, -1),
offset=None,
gap=3,
position=None,
**kwargs
):
wx.Dialog.__init__(
self, parent, -1, title, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
)
if panel is not None:
self.SetTitle(title)
self.panel = panel(self, gap=gap, **kwargs)
self.panel.SetSizeHints(*sizes)
ds = wx.GridBagSizer(self.panel.gap, self.panel.gap)
ds.Add(self.panel, (0, 0), (1, 1), wx.EXPAND | wx.ALL, self.panel.gap)
ds.Add(
wx.StaticLine(self),
(1, 0),
(1, 1),
wx.EXPAND | wx.RIGHT | wx.LEFT,
self.panel.gap,
)
if "AddButtons" in self.panel.form:
self.bs = wx.GridBagSizer()
self.bs.AddGrowableCol(0)
for col, (label, wx_id) in enumerate(self.panel.form["AddButtons"].items(), start=1):
button = wx.Button(self, label=label, id=wx_id)
self.bs.Add(button, (0, col))
if hasattr(self.panel, f"on{label}"):
self.Bind(wx.EVT_BUTTON, getattr(self.panel, f"on{label}"), id=wx_id)
else:
self.bs = self.CreateButtonSizer(
self.panel.form.get("Buttons", wx.OK | wx.CANCEL)
)
self.Bind(wx.EVT_BUTTON, self.panel.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.panel.onClose, id=wx.ID_CANCEL)
ds.Add(self.bs, (2, 0), (1, 1), wx.ALIGN_RIGHT | wx.ALL, self.panel.gap)
ds.AddGrowableCol(0)
ds.AddGrowableRow(0)
self.SetSizerAndFit(ds)
if position is None:
self.Center()
else:
self.SetPosition(position)
if offset:
newpos = map(lambda x: x + offset, self.GetPosition())
self.SetPosition(wx.Point(*newpos))
for wrapper in self.panel.elements.values():
if not isinstance(wrapper, (RadioButton, CheckBox, StaticText)):
wrapper.SetFocus()
break
if modal:
self.res = self.ShowModal()
else:
self.Show()
def FocusNext(self):
for child in reversed(wx.GetTopLevelWindows()[0].GetChildren()):
if isinstance(child, FormDialog) and child is not self:
child.Raise()
break
self.Destroy()
class Form(wx.Panel):
# Flags for containers.
D = DEFAULT_FLAGS = 0
G = GROWABLE = 1
NC = NO_CONTAINER = 2
R = RIGHT_ALIGN = 4
VC = VERTICAL_ENTER = wx.EXPAND | wx.ALL
def __init__(
self, parent=None, id=-1, gap=3, sizes=(-1, -1), *args
): # @ReservedAssignment
wx.Panel.__init__(self, parent, id)
self.SetSizeHints(*sizes)
self.gap = gap
self.elements = OrderedDict([])
self.ATables = defaultdict(list)
if hasattr(self, "form"):
# Before building verify that several required sections exist in the form
# definition object.
if "Defaults" not in self.form:
self.form["Defaults"] = {}
if "Disabled" not in self.form:
self.form["Disabled"] = []
if "Validators" not in self.form:
self.form["Validators"] = {}
if "Options" not in self.form:
self.form["Options"] = {}
# Allow sub classes to add their own values or defaults.
self.loadDefaults()
self.loadOptions()
self.build()
if sizes == (-1, -1):
self.Parent.SetSize(self.Parent.GetBestVirtualSize())
if "Title" in self.form and hasattr(parent, "SetTitle"):
parent.SetTitle(self.form["Title"])
self.bind()
def __iter__(self):
return ((k, self[k]) for k in self.elements.keys())
def __getitem__(self, key):
try:
return self.h2m(key, self.elements[key].GetValue())
except:
return
def __setitem__(self, key, value=""):
try:
return self.elements[key].SetValue(self.m2h(key, value))
except:
print_exc()
def HumanToMachine(self, name, value=""):
if "Translations" in self.form:
if name in self.form["Translations"]:
value = self.form["Translations"][name][1].get(value, value)
return value
h2m = HumanToMachine
def MachineToHuman(self, name, value=""):
if "Translations" in self.form:
if name in self.form["Translations"]:
value = self.form["Translations"][name][0].get(value, value)
return value
m2h = MachineToHuman
def Bind(self, evtType, evtFunc, evtSrc, call=False, *args, **kwargs):
"""
I rewrote Bind a little bit to simplify binding events using the names
that you assign to individual elements. The call signature is the
same, and it only triggers when you pass the *wrong* type argument
as the event source, so it shouldn't affect existing Bind calls.
"""
if isinstance(evtSrc, str):
evtSrc = self.elements[evtSrc]
# if isinstance(evtType, wx.CommandEvent):
evtSrc.Bind(evtType, evtFunc)
# else:
# super(Form, self).Bind(evtType, evtFunc, evtSrc, *args, **kwargs)
if call:
evtFunc()
def Accel(self, key, func, elem, kind=wx.ACCEL_NORMAL):
"""
This convenience function is provided to simplify Accelerator Table
creation. It builds Accelerator Tables over repeated calls for
the windows indicated by `elem`. The tables will be set in the
bind method (the default behavior).
"""
self.ATables[elem].append((kind, key, func))
def build(self):
"""
The Build Method automates sizer creation and element placement by parsing
a properly constructed object.
"""
# The Main Sizer for the Panel.
panelSizer = wx.BoxSizer(wx.VERTICAL)
# Pass the outermost Parts and the container to the OrderedDict Parser.
self.parseContainer(self.form["Parts"], panelSizer)
self.SetSizerAndFit(panelSizer)
def bind(self):
# Attempt to accommodate non-dialog parents.
if not isinstance(self.Parent, FormDialog):
self.Parent.Bind(wx.EVT_CLOSE, self.onClose)
for name, table in self.ATables.items():
if table:
at = []
for kind, key, func in table:
at.append((kind, key, key))
EVT_MENU(self.elements[name], key, func)
self.elements[name].SetAcceleratorTable(wx.AcceleratorTable(at))
def parseContainer(self, container, outerSizer, pos=None, span=None):
sectionSizer = wx.BoxSizer(wx.VERTICAL)
for section in container.items():
region, proportion = self.parseSection(section)
sectionSizer.Add(region, proportion, flag=Form.VC, border=self.gap)
if isinstance(outerSizer, wx.GridBagSizer):
outerSizer.Add(
sectionSizer, pos, span, border=self.gap, flag=wx.ALIGN_CENTER_VERTICAL
)
if proportion:
row, col = pos
outerSizer.AddGrowableRow(row)
outerSizer.AddGrowableCol(col)
else:
outerSizer.Add(sectionSizer, 1, flag=Form.VC, border=self.gap)
def parseSection(self, section):
container, blocks = section
if isinstance(container, tuple):
display, flags = container
else:
# String instead of tuple.
flags = Form.D
display = container
self.flags = flags
sizerProportion = 1 if flags & Form.G else 0
if flags & Form.NC:
sectionSizer = wx.BoxSizer(wx.VERTICAL)
else:
box = wx.StaticBox(self, -1, display)
sectionSizer = wx.StaticBoxSizer(box, wx.VERTICAL)
for block in blocks:
self.parseBlock(block, sectionSizer)
return sectionSizer, sizerProportion
def parseBlock(self, block, sectionSizer):
"""
The form structure is a list of rows (blocks) in the form. Each row
consists of a single element, a row of elements, or a sub-grid of
elements. These are represented by dictionaries, tuples, or lists,
respectively and are each processed differently.
"""
proportion = 0
if isinstance(block, OrderedDict):
return self.parseContainer(block, sectionSizer)
if isinstance(block, list):
item = self.makeGrid(block)
elif isinstance(block, (tuple, Row)):
proportion = getattr(block, "proportion", proportion)
item = self.makeRow(block)
else:
proportion = block.proportion
item = self.makeWidget(block)
sectionSizer.Add(item, proportion, flag=Form.VC, border=self.gap)
def makeRow(self, fields):
"""
In the form structure a tuple signifies a row of elements. These items
will be arranged horizontally without dependency on other rows. Each
item may provide a proportion property which can cause that element to
expand horizontally to fill space.
"""
sizer = wx.BoxSizer(wx.HORIZONTAL)
for field in fields:
self.parseBlock(field, sizer)
return sizer
def makeGrid(self, rows):
"""
In the form structure a list signifies a grid of elements (equal width
columns, rows with similar numbers of elements, etc).
"""
sizer = wx.GridBagSizer(0, 0)
for row, fields in enumerate(rows):
for col, field in enumerate(fields):
# Each item may specify that its row or column 'grow' or expand to fill
# the available space in the form. Spans or specific positions are also
# possible.
flags = getattr(field, "flags", wx.ALL)
rowGrowable = getattr(field, "rowGrowable", False)
colGrowable = getattr(field, "colGrowable", True)
span = getattr(field, "span", (1, 1))
pos = (
getattr(field, "rowpos", row) or row,
getattr(field, "colpos", col) or col,
)
if isinstance(field, OrderedDict):
self.parseContainer(field, sizer, pos, span)
else:
element = self.makeWidget(field)
sizer.Add(
element,
pos,
span,
border=self.gap,
flag=wx.ALIGN_CENTER_VERTICAL | flags,
)
if (
rowGrowable
and row < sizer.GetRows()
and not sizer.IsRowGrowable(row)
):
sizer.AddGrowableRow(row)
if (
colGrowable
and col < sizer.GetCols()
and not sizer.IsColGrowable(col)
):
sizer.AddGrowableCol(col)
return sizer
def makeWidget(self, declarator):
"""
This function actually creates the widgets that make up the form.
Each element should provide a `make` method which takes as an argument
it's parent, and returns a wx item (sizer, form element, etc).
Other methods for each widget (defined with placeholders on
the wxPlaceholder Class) are
GetValue
SetValue
SetValidator
SetOptions
"""
# Attach the elements container to the declarator.
declarator._elements = self.elements
element = declarator.make(self)
if declarator.name:
self.elements[declarator.name] = declarator
# Disable if requested.
if declarator.name in self.form["Disabled"]:
declarator.Enable(False)
# Options need to exist early.
if hasattr(declarator, "SetOptions"):
declarator.SetOptions(self.form["Options"].get(declarator.name, []))
# We need to use the existing value if there isn't one in defaults
# to prevent StaticText's from ending up blank.
value = self.form["Defaults"].get(declarator.name, declarator.GetValue())
# Assign or populate any fields requiring it.
declarator.SetValue(self.m2h(declarator.name, value))
declarator.SetValidator(self.form["Validators"].get(declarator.name, None))
return element
def loadDefaults(self):
pass
def loadOptions(self):
pass
def onOk(self, evt):
evt.Skip()
self.onClose(evt)
def onClose(self, evt):
evt.Skip()
if isinstance(self.Parent, FormDialog):
self.Parent.FocusNext()
def fieldValidate(self):
if "Validators" not in self.form:
return True
success, messages = True, []
for name, field in self.elements.items():
if name in self.form["Validators"]:
s, m = field.Validate()
if not s:
success = False
messages.extend(m)
if messages:
text = "\r\n".join(messages)
wx.MessageDialog(self, text, "Form Field Error", wx.OK).ShowModal()
return success
if __name__ == "__main__":
from src.pyform.Demos import (
DemoForm,
DemoFormGrowable,
DemoNested,
DemoNestedHorizontal,
ComplicatedDemo,
ComprehensiveDemo,
AlternateDeclaration,
GridDemos,
DemoLeftStacked,
NonDialog,
)
app = wx.PySimpleApp()
f = wx.Frame(None)
NonDialog(f)
f.Show()
FormDialog(parent=f, panel=DemoForm)
FormDialog(parent=f, panel=DemoFormGrowable)
FormDialog(parent=f, panel=DemoNested)
FormDialog(parent=f, panel=DemoNestedHorizontal)
FormDialog(parent=f, panel=ComplicatedDemo)
FormDialog(parent=f, panel=ComprehensiveDemo)
FormDialog(parent=f, panel=AlternateDeclaration)
FormDialog(parent=f, panel=GridDemos)
FormDialog(parent=f, panel=DemoLeftStacked, gap=1)
app.MainLoop()
|
python
|
'''
Integration Test for creating KVM VM with all nodes shutdown and recovered.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import time
import os
vm = None
def test():
global vm
cmd = "init 0"
host_username = os.environ.get('nodeUserName')
host_password = os.environ.get('nodePassword')
zstack_ha_vip = os.environ.get('zstackHaVip')
node1_ip = os.environ.get('node1Ip')
test_util.test_logger("shutdown node: %s" % (node1_ip))
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
node2_ip = os.environ.get('node2Ip')
test_util.test_logger("shutdown node: %s" % (node2_ip))
rsp = test_lib.lib_execute_ssh_cmd(node2_ip, host_username, host_password, cmd, 180)
test_util.test_logger("recover node: %s" % (node1_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node1_ip))
test_util.test_logger("recover node: %s" % (node2_ip))
os.system('bash -ex %s %s' % (os.environ.get('nodeRecoverScript'), node2_ip))
test_util.test_dsc('Delete /var/lib/zstack/ha/ha.yaml, recover ha with zstack-ctl recover_ha, expect to fail')
cmd = "rm /var/lib/zstack/ha/ha.yaml"
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
if not rsp:
rsp = test_lib.lib_execute_ssh_cmd(node2_ip, host_username, host_password, cmd, 180)
cmd = "zstack-ctl recover_ha"
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
if not rsp:
rsp = test_lib.lib_execute_ssh_cmd(node2_ip, host_username, host_password, cmd, 180)
if rsp == False:
test_util.test_logger("Cannot recover ha without /var/lib/zstack/ha/ha.yaml when use zstack-ctl recover_ha, expect to False")
else:
test_util.test_fail('Expect to False, but get the different result when recover ha without /var/lib/zstack/ha/ha.yaml by using zstack-ctl recover_ha')
test_util.test_dsc('Recover with zstack-ctl install_ha, expect to pass')
cmd = "zstack-ctl install_ha --host1-info %s:%s@%s --host2-info %s:%s@%s --vip %s --recovery-from-this-host" % \
(host_username, host_password, node1_ip, host_username, host_password, node2_ip, zstack_ha_vip)
rsp = test_lib.lib_execute_ssh_cmd(node1_ip, host_username, host_password, cmd, 180)
if not rsp:
rsp = test_lib.lib_execute_ssh_cmd(node2_ip, host_username, host_password, cmd, 180)
time.sleep(180)
test_stub.exercise_connection(600)
vm = test_stub.create_basic_vm()
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
|
python
|
notebook_list = list()
first_entry=int(input("Enter your first value in the list: "))
second_entry=int(input("Enter your second value in the list: "))
notebook_list.append(first_entry)
notebook_list.append(second_entry)
# for i in range(5):
# val=(int(input("enter a value ")))
# arr.append(val)
# print (arr)
choise = input("If you want to add an entry press 1, If you want to delete an entry press 2, If you want to update an entry press 3 ")
def addition(entry):
notebook_list.append(entry)
return print(notebook_list)
def removal(entry_index):
notebook_list.remove(notebook_list[entry_index])
return print(notebook_list)
def update(val_old, val_new, notebook_list):
for item in range(0, len(notebook_list)):
if notebook_list[item] == val_old:
notebook_list[item] == val_new
print(notebook_list)
if choise == "1":
new_entry=(int(input("Enter the entry you wish to add")))
addition(new_entry)
elif choise == "2":
entry_index=(int(input("Which entry number do you want to delete. NOTE: 0 is the first element")))
removal(entry_index)
elif choise == "3":
val_old = int(input("Type the entry you want to change!"))
val_new = int(input("What do you want to change it with?"))
update(val_new,val_new,notebook_list)
|
python
|
#!/usr/bin/python
#partially based on: http://john.nachtimwald.com/2009/08/15/qtextedit-with-line-numbers/ (MIT license)
from __future__ import print_function
import sys, os, subprocess
from ..share import (Share, Signal, dbg_print, QtCore, QtGui, QtSvg, temp_dir)
##LMY: from highlighter import PythonHighlighter
class Editor(QtGui.QPlainTextEdit):
headerText = 'Edit'
prevCursorPos = -1
currentLineColor = None
editBecomesActive = Signal()
specialSaveFileName = None
fileName = None
highlighter = None
pointSizeF = 11.0
cursorWidth = 8
def __init__(self, book=None, **kw):
self.book = book
QtGui.QPlainTextEdit.__init__(self, **kw)
self.lineNumberArea = self.LineNumberArea(self)
self.viewport().installEventFilter(self)
self.newDocument = True
self.path = ''
css = '''
QPlainTextEdit {
font-family: monospace;
font-size: 10;
color: black;
background-color: white;
selection-color: white;
selection-background-color: #437DCD;
}'''
self.setStyleSheet(css)
font = self.font()
font.setPointSize(self.pointSizeF)
self.setFont(font)
self.setCursorWidth(self.cursorWidth)
self.setWindowTitle('title')
self.textChanged.connect(self.handleTextChanged)
self.editBecomesActive.connect(self.handleTextChanged)
self.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
self.cursorPositionChanged.connect(self.handleCursorMove)
self.originalText = None
self.haveLoadedFile = False
def Quote(self):
tC = self.textCursor()
c0 = '#' # dummy non-match!
while c0 not in "ABCDEFG":
tC.movePosition(tC.Left, tC.KeepAnchor)
sel = tC.selectedText()
c0 = sel[0]
tC.removeSelectedText()
tC.insertText('"'+ sel +'"')
def handleCursorMove(self):
self.book.counted = self.book.latency
return
def moveToRowCol(self, row=1, col=0):
block = self.document().findBlockByLineNumber (row-1)
desiredPosition = block.position() + col
dbg_print ('AbcEditor.moveToRowCol', row, col,
'desiredPosition', desiredPosition)
tc = self.textCursor()
tc.setPosition(desiredPosition)
self.setTextCursor(tc)
self.setFocus()
if self.highlighter:
self.highlighter.rehighlight()
def highlight(self, tc):
# n.b. unfortunate name - no relation to highlighter!
blockNumber = tc.blockNumber()
# Common.blockNumber = blockNumber
col0 = col = tc.positionInBlock()
l = tc.block().length()
dbg_print ("autoTrack", l)
blockText = tc.block().text()
if 0: # under review sine new approach to syntqx highlighting:
while col and ((col >= (l-1))
or not (str(blockText[col]).lower() in 'abcdefg^_=')):
col -= 1
dbg_print ('editor.highlight: row=%d, col=%d' %(blockNumber, col))
self.book.settledAt.emit(blockNumber+1, col)
if 0: # under review sine new approach to syntqx highlighting:
hi_selection = QtGui.QTextEdit.ExtraSelection()
hi_selection.format.setBackground(self.palette().alternateBase())
hi_selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection,
True)
if self.currentLineColor is not None:
hi_selection.format.setBackground(self.currentLineColor)
#setFontUnderline(True)
hi_selection.cursor = tc
self.setExtraSelections([hi_selection])
hi_selection.cursor.clearSelection()
def handleTextChanged(self):
self.book.counted = self.book.latency
dbg_print ('handleTextChanged', self.book.counted)
def handleLull(self, force=False):
if force or self.document().isModified():
dbg_print ("autoSave")
split = os.path.split(self.fileName)
fileName = 'autosave_'.join(split)
self.saveFile(
fileName=temp_dir+ '/autosave_' + os.path.split(self.fileName)[1])
tc = self.textCursor()
position = tc.position()
if position != self.prevCursorPos:
self.prevCursorPos = position
self.highlight(tc)
if self.highlighter:
self.highlighter.rehighlight()
def newFile(self, fileName='new.abc'):
self.clear()
self.setFileName(fileName)
self.book.fileLoaded.emit(self, fileName)
def closeFile(self):
self.clear()
self.haveLoadedFile = False
def cloneAnyFile(self):
fileName = QtGui.QFileDialog.getOpenFileName(self,
"Choose a data file",
'', '*.abc')[0]
dbg_print ("cloneAnyFile 2", fileName)
self.loadFile(fileName, newInstance=True)
def restart(self):
self.loadFile(self.fileName)
sys.exit(0)
def loadFile(self, fileName, newInstance=None, row=1, col=0):
dbg_print ("Editor.loadFile", fileName, newInstance, row, col)
if newInstance is None:
newInstance = False # self.haveLoadedFile
if newInstance:
dbg_print("need to create new instance for", fileName)
sys.argv[1:] = fileName,
subprocess.Popen(sys.argv)
return
self.setFileName(fileName)
f = QtCore.QFile(fileName)
if not f.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
return
self.highlighter = None # default, half-expecting to be overwritten by per-extension handler
self.book.fileLoaded.emit(self, fileName)
self.readAll(f)
f.close()
dbg_print ("Loaded %s" % fileName)
self.moveToRowCol(row, col) # primarily to gain focus!
# self.document().setModified(True) # force rewrite of Score
self.book.fileSaved.emit(fileName) # ???
def setFileName(self, fileName=None):
if fileName is not None:
self.fileName = fileName
title = "%s - %s" % (self.headerText, os.path.abspath(self.fileName))
dbg_print (title)
# self.book.dock.setWindowTitle(title)
self.haveLoadedFile = True
_dirname, _endname = os.path.split(self.fileName)
if _dirname:
os.chdir(_dirname)
def readAll(self, f):
dbg_print ('readAll', self, f)
stream = QtCore.QTextStream(f)
text = stream.readAll()
self.setPlainText(text)
def saveFile(self, fileName=None,):
self.specialSaveFileName = fileName # None if save is requested by user as opposed to temporary for score generation
if fileName is None:
fileName = self.fileName
if fileName is None:
return
#f = QtCore.QFile(fileName)
out = open(fileName, 'w')
if not out:
return
self.writeAll(out)
out.close()
dbg_print ("Saved %s " % fileName)
self.document().setModified(False)
self.book.fileSaved.emit(fileName)
return
def transpose(self):
semitones, ok = QtGui.QInputDialog.getInteger(self,
"Transpose (automatic clef change(s))",
"semitones (+/- for up/down:)", 0, -24, 24, 1)
if not ok:
return
newFileName, ok = QtGui.QFileDialog.getSaveFileName(self, "write tansposed to file",
"transposed.abc",
"(*.abc)")
if not ok:
return
transposedText = Share.abcRaft.abc2abc.process(self.fileName,
transpose=semitones)
with open(newFileName, 'w') as transposed_file:
transposed_file.write(transposedText)
self.book.openThemAll((newFileName,))
def writeAll(self, out):
text = self.toPlainText()
# dbg_print('len(text)=', len(text))
out.write(text)
def reloadFile(self):
dbg_print ("ReloadFile", self.fileName)
self.loadFile(self.fileName)
def saveFileAs(self, fileName=None, show=True):
"""
save the current panel contents to a new file.
"""
if fileName is None:
files = QtGui.QFileDialog.getSaveFileName(self,
"Save source to file as", '', '*.abc')
if not files:
return
fileName = files[0]
if show:
self.setFileName(fileName)
self.saveFile()
self.book.setTabText(self.book.currentIndex(), os.path.split(fileName)[1])
def resizeEvent(self,e):
self.lineNumberArea.setFixedHeight(self.height())
QtGui.QPlainTextEdit.resizeEvent(self,e)
def eventFilter(self, object, event):
if object is self.viewport():
self.lineNumberArea.update()
return False
return QtGui.QPlainTextEdit.eventFilter(object, event)
def keyPressEvent(self, event):
"""Reimplement Qt method"""
key = event.key()
# print (type(event))
meta = event.modifiers() & QtCore.Qt.MetaModifier
ctrl = event.modifiers() & QtCore.Qt.ControlModifier
shift = event.modifiers() & QtCore.Qt.ShiftModifier
plain = not (meta or ctrl or shift)
if key == QtCore.Qt.Key_Insert and plain:
self.setOverwriteMode(not self.overwriteMode())
if key == QtCore.Qt.Key_Tab and plain and self.highlighter:
return self.autoComplete(event)
else:
QtGui.QPlainTextEdit.keyPressEvent(self, event)
def autoComplete(self, event):
print ('autoComplete')
tc = self.textCursor()
snippet = self.highlighter.getSnippet(tc)
for i, piece in enumerate(snippet):
tc.insertText(piece)
if i==0:
pos = tc.position()
tc.setPosition(pos)
self.setTextCursor(tc)
def getSnippet(self, tc): #------ Drag and drop
col0 = col = tc.positionInBlock()
block = tc.block()
l = block.length()
print("ABC get snippet", l)
blockText = block.text()
while col and ((col >= (l - 1))
or not (str(blockText[col - 1]) in ' |!]')):
tc.deletePreviousChar()
col -= 1
key = blockText[col:col0]
print("autoComplete key %d:%d '%s'" % (col, col0, key))
return self.snippets.get(key, ("!%s!" % key,))
def dragEnterEvent(self, event):
"""Reimplement Qt method
Inform Qt about the types of data that the widget accepts"""
source = event.mimeData()
if source.hasUrls():
if 1: #mimedata2url(source, extlist=EDIT_EXT):
print ("dragEnterEvent", "hasUrls")
event.acceptProposedAction()
else:
event.ignore()
elif source.hasText():
print ("dragEnterEvent", "hasText")
event.acceptProposedAction()
else:
event.ignore()
def dragMoveEvent(self, event):
event.acceptProposedAction()
def dropEvent(self, event):
"""Reimplement Qt method
Unpack dropped data and handle it"""
source = event.mimeData()
if source.hasUrls():
#paths = map(filenameFromUrl, source.urls())
paths = [url.path() for url in source.urls()]
print ("dropEvent", "hasUrls", source.urls(), paths)
self.book.filenamesDropped.emit(paths)
elif source.hasText():
print ("dropEvent", "hasText")
#editor = self.get_current_editor()
#if editor is not None:
# editor.insert_text( source.text() )
event.acceptProposedAction()
def mousePressEvent(self, mouseEvent):
if (mouseEvent.button() in (QtCore.Qt.LeftButton, QtCore.Qt.RightButton)):
QtGui.QPlainTextEdit.mousePressEvent(self, mouseEvent)
print (mouseEvent.button() )
return
def wheelEvent(self, event):
modifiers = QtGui.QApplication.keyboardModifiers()
if modifiers != QtCore.Qt.ControlModifier:
return QtGui.QPlainTextEdit.wheelEvent(self, event)
dbg_print ("Editor.wheelEvent, delta = ", event.delta())
new_sizeF = self.pointSizeF + (event.delta() / 100.0)
if new_sizeF > 0:
self.pointSizeF = new_sizeF
self.font().setPointSizeF(new_sizeF)
event.accept()
class LineNumberArea(QtGui.QWidget):
def __init__(self, editor):
QtGui.QWidget.__init__(self, editor)
self.edit = editor
self.highest_line = 0
css = '''
QWidget {
font-family: monospace;
font-size: 10;
color: black;
}'''
self.setStyleSheet(css)
def update(self, *args):
width = QtGui.QFontMetrics(
self.edit.document().defaultFont()).width(
str(self.highest_line)) + 10
if self.width() != width:
self.setFixedWidth(width)
self.edit.setViewportMargins(width,0,0,0)
QtGui.QWidget.update(self, *args)
def paintEvent(self, event):
page_bottom = self.edit.viewport().height()
font_metrics = QtGui.QFontMetrics(
self.edit.document().defaultFont())
current_block = self.edit.document().findBlock(
self.edit.textCursor().position())
painter = QtGui.QPainter(self)
painter.fillRect(self.rect(), QtCore.Qt.lightGray)
block = self.edit.firstVisibleBlock()
viewport_offset = self.edit.contentOffset()
line_count = block.blockNumber()
painter.setFont(self.edit.document().defaultFont())
while block.isValid():
line_count += 1
# The top left position of the block in the document
position = self.edit.blockBoundingGeometry(block).topLeft() + viewport_offset
# Check if the position of the block is out side of the visible area
if position.y() > page_bottom:
break
# We want the line number for the selected line to be bold.
bold = False
x = self.width() - font_metrics.width(str(line_count)) - 3
y = round(position.y()) + font_metrics.ascent()+font_metrics.descent()-1
if block == current_block:
bold = True
font = painter.font()
font.setBold(True)
painter.setFont(font)
pen = painter.pen()
painter.setPen(QtCore.Qt.red)
painter.drawRect(0, y-14, self.width()-2, 20)
painter.setPen(pen)
# Draw the line number right justified at the y position of the
# line. 3 is a magic padding number. drawText(x, y, text).
painter.drawText(x, y, str(line_count))
# Remove the bold style if it was set previously.
if bold:
font = painter.font()
font.setBold(False)
painter.setFont(font)
block = block.next()
self.highest_line = line_count
painter.end()
QtGui.QWidget.paintEvent(self, event)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import yaml
import json
import requests
from copy import deepcopy
from lxml import html
from dateutil.parser import ParserError, parse
# loading external configuration
CONFIG = yaml.safe_load(open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.yml')))
URL_TPL = "https://www.eliteprospects.com/search/player?q=%s"
PLR_TPL = "https://www.eliteprospects.com/player/"
DOB_URL_TPL = "dob=%s"
POS_URL_TPL = "position=%s"
def get_ep_info_for_player(plr):
"""
Gets information from Eliteprospects for specified player.
"""
full_name = " ".join((plr['first_name'], plr['last_name']))
# searching by full name and (optionally) player dob first
search_name = full_name.replace(" ", "+")
url = URL_TPL % search_name
# adding date of birth to search string (if available)
if 'dob' in plr and plr['dob']:
dob = parse(plr['dob']).date()
url = "&".join((url, DOB_URL_TPL % dob))
else:
dob = None
# adding position to search string (if available)
if 'position' in plr:
url = "&".join((url, POS_URL_TPL % plr['position'][0]))
trs = get_trs_from_ep_plr_search(url)
# alternatively searching by last name and date of birth
if not trs and dob:
url = URL_TPL % plr['last_name']
url = "&".join((url, DOB_URL_TPL % dob))
trs = get_trs_from_ep_plr_search(url)
if not trs:
print("\t-> No Eliteprospects candidate found for %s [%d]" % (full_name, plr['player_id']))
return None, None
if len(trs) > 1:
print("\t-> Multiple Eliteprospects candidates found for %s [%d]" % (full_name, plr['player_id']))
for tr in trs:
ep_id, ep_dob = get_ep_id_dob_from_tr(tr, plr, False)
print("\t\t-> %s (%s)" % (ep_id, ep_dob))
return None, None
ep_id, ep_dob = get_ep_id_dob_from_tr(trs.pop(0), plr)
return ep_id, ep_dob
def get_trs_from_ep_plr_search(url):
"""
Gets table rows of interest from Eliteprospects player search page.
"""
r = requests.get(url)
doc = html.fromstring(r.text)
res_tbl = doc.xpath("//table[@class='table table-condensed table-striped players ']").pop(0)
trs = res_tbl.xpath("tbody/tr/td[@class='name']/ancestor::tr")
return trs
def get_ep_id_dob_from_tr(tr, plr, verbose=True):
"""
Gets player id and date of birth from search result table row on Eliteprospects player search page.
"""
orig_full_name = " ".join((plr['first_name'], plr['last_name']))
name_and_pos = tr.xpath("td[@class='name']/span/a/text()").pop(0)
if verbose:
print("[%d]: %s (%s) -> %s" % (plr['player_id'], orig_full_name, plr['position'], name_and_pos))
ep_id = tr.xpath("td[@class='name']/span/a/@href").pop(0)
ep_id = ep_id.replace(PLR_TPL, "")
ep_dob = tr.xpath("td[@class='date-of-birth']/span[@class='hidden-xs']/text()").pop(0)
try:
ep_dob = parse(ep_dob).date()
except ParserError:
print("Unable to parse date of birth %s" % ep_dob)
ep_dob = None
return ep_id, ep_dob
if __name__ == '__main__':
all_players_src_path = os.path.join(CONFIG['tgt_processing_dir'], 'del_players.json')
players = json.loads(open(all_players_src_path).read())
print("%d players loaded from repository of all players" % len(players))
# loading possibly existing Eliteprospects data sets
# player ids
tgt_id_path = os.path.join(CONFIG['tgt_processing_dir'], 'ep_ids.json')
if os.path.isfile(tgt_id_path):
ep_ids = json.loads(open(tgt_id_path).read())
else:
ep_ids = dict()
# dates of birth
tgt_dob_path = os.path.join(CONFIG['tgt_processing_dir'], 'ep_dobs.json')
if os.path.isfile(tgt_dob_path):
ep_dobs = json.loads(open(tgt_dob_path).read())
else:
ep_dobs = dict()
for plr in list(players.values())[:]:
if str(plr['player_id']) in ep_ids:
continue
# retrieving player id and date of birth from Eliteprospects
ep_id, ep_dob = get_ep_info_for_player(plr)
if ep_id:
ep_ids[str(plr['player_id'])] = ep_id
if ep_dob and not 'dob' in plr:
ep_dobs[str(plr['player_id'])] = ep_dob
ep_ids = dict(sorted(ep_ids.items()))
ep_dobs = dict(sorted(ep_dobs.items()))
open(tgt_id_path, 'w').write(json.dumps(ep_ids, indent=2, default=str))
open(tgt_dob_path, 'w').write(json.dumps(ep_dobs, indent=2, default=str))
|
python
|
#!/usr/bin/env python3
from putarm_ur3e_moveit_config.srv import GoToObj,GoToObjResponse
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
import tf
import numpy as np
def all_close(goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a tolerance of their counterparts in another list
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
all_equal = True
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)
return True
class GotoObject(object):
def __init__(self):
super(GotoObject,self).__init__()
self.moveit_commander.roscpp_initialize(sys.argv)
self.robot = self.moveit_commander.RobotCommander()
self.scene = self.moveit_commander.PlanningSceneInterface()
self.planning_group_name = "manipulator"
self.planning_move_group = self.moveit_commander.MoveGroupCommander(self.planning_group_name)
self.display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
self.planning_frame = self.planning_move_group.get_planning_frame()
self.eef_link = self.planning_move_group.get_end_effector_link()
self.group_names = self.robot.get_group_names()
s = rospy.Service('goto_object_service', GoToObj, self.goto_object)
rospy.loginfo("Ready to goto")
rospy.spin()
def plan_cartesian_path(self, scale=1):
move_group = self.move_group
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z -= scale * 0.1 # First move up (z)
wpose.position.y += scale * 0.2 # and sideways (y)
waypoints.append(copy.deepcopy(wpose))
wpose.position.x += scale * 0.1 # Second move forward/backwards in (x)
waypoints.append(copy.deepcopy(wpose))
wpose.position.y -= scale * 0.1 # Third move sideways (y)
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0 # jump_threshold
)
# Note: We are just planning, not asking move_group to actually move the robot yet:
return plan, fraction
def display_trajectory(self, plan):
robot = self.robot
display_trajectory_publisher = self.display_trajectory_publisher
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
display_trajectory_publisher.publish(display_trajectory)
def execute_plan(self, plan):
move_group = self.planning_move_group
move_group.execute(plan, wait=True)
def goto_object(self,req):
goal_position = req.pose.position
#goal_position.z -= 0.05
current_pose = self.planning_move_group.get_current_pose().pose
current_position = current_pose.position
no_samples = 50
x_linspace = np.linspace(current_position.x,goal_position.x,num=no_samples)
y_linspace = np.linspace(current_position.y,goal_position.y,num=no_samples)
z_linspace = np.linspace(current_position.z,goal_position.z,num=no_samples)
waypoints = []
new_pose = current_pose
new_pose.orientation = req.pose.orientation
for i in range(no_samples):
new_pose.position.x = x_linspace[i]
new_pose.position.y = y_linspace[i]
new_pose.position.z = z_linspace[i]
waypoints.append(copy.deepcopy(new_pose))
(plan, fraction) = self.planning_move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0, # jump_threshold
avoid_collisions=True)
output = self.planning_move_group.execute(plan,wait=True)
return GoToObjResponse(output)
|
python
|
from os import environ
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, parse_qs
import os
import json
from flask import Flask, jsonify, request
import requests
from flask_cors import CORS
def fetch_location():
"""
gets the geocode data for the searched location, returns it as a json object
"""
query = request.args.get('data')
GEOCODE_API_KEY = os.environ.get('GEOCODE_API_KEY')
url = f'https://maps.googleapis.com/maps/api/geocode/json?address={query}&key={GEOCODE_API_KEY}'
locations = requests.get(url).json()
new_location = Location(query, locations['results'][0])
return new_location
class Location():
def __init__(self, query, query_result):
self.search_query = query
self.formatted_query = query_result['formatted_address']
self.latitude = query_result['geometry']['location']['lat']
self.longitude = query_result['geometry']['location']['lng']
def serialize(self):
return vars(self)
|
python
|
def emulate_catchup(replica, ppSeqNo=100):
replica.on_catch_up_finished(last_caught_up_3PC=(replica.viewNo, ppSeqNo),
master_last_ordered_3PC=replica.last_ordered_3pc)
def emulate_select_primaries(replica):
replica.primaryName = 'SomeAnotherNode'
replica._setup_for_non_master_after_view_change(replica.viewNo)
def expect_suspicious(replica, suspicious_code):
def reportSuspiciousNodeEx(ex):
assert suspicious_code == ex.code
raise ex
replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx
def register_pp_ts(replica, pp, sender):
tpcKey = (pp.viewNo, pp.ppSeqNo)
ppKey = (pp, sender)
replica.pre_prepare_tss[tpcKey][ppKey] = replica.get_time_for_3pc_batch()
|
python
|
""" A tomography library for fusion devices
See:
https://github.com/ToFuProject/datastock
"""
# Built-in
import os
import subprocess
from codecs import open
# ... setup tools
from setuptools import setup, find_packages
# ... local script
import _updateversion as up
# == Getting version =====================================================
_HERE = os.path.abspath(os.path.dirname(__file__))
version = up.updateversion()
print("")
print("Version for setup.py : ", version)
print("")
# =============================================================================
# Get the long description from the README file
# Get the readme file whatever its extension (md vs rst)
_README = [
ff
for ff in os.listdir(_HERE)
if len(ff) <= 10 and ff[:7] == "README."
]
assert len(_README) == 1
_README = _README[0]
with open(os.path.join(_HERE, _README), encoding="utf-8") as f:
long_description = f.read()
if _README.endswith(".md"):
long_description_content_type = "text/markdown"
else:
long_description_content_type = "text/x-rst"
# =============================================================================
# =============================================================================
# Compiling files
setup(
name="datastock",
version=f"{version}",
# Use scm to get code version from git tags
# cf. https://pypi.python.org/pypi/setuptools_scm
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
# The version is stored only in the setup.py file and read from it (option
# 1 in https://packaging.python.org/en/latest/single_source_version.html)
use_scm_version=False,
# Description of what library does
description="A python library for generic class and data handling",
long_description=long_description,
long_description_content_type=long_description_content_type,
# The project's main homepage.
url="https://github.com/ToFuProject/datastock",
# Author details
author="Didier VEZINET",
author_email="[email protected]",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Physics",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
# In which language most of the code is written ?
"Natural Language :: English",
],
# What does your project relate to?
keywords="data analysis class container generic interactive plot",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(
exclude=[
"doc",
]
),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
"numpy",
"scipy",
"matplotlib",
],
python_requires=">=3.6",
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
"dev": [
"check-manifest",
"coverage",
"pytest",
"sphinx",
"sphinx-gallery",
"sphinx_bootstrap_theme",
]
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# # If any package contains *.txt, *.rst or *.npz files, include them:
# '': ['*.txt', '*.rst', '*.npz'],
# # And include any *.csv files found in the 'ITER' package, too:
# 'ITER': ['*.csv'],
# },
# package_data={},
# include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html
# installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# executable scripts can be declared here
# They can be python or non-python scripts
# scripts=[
# ],
# entry_points point to functions in the package
# Theye are generally preferable over scripts because they provide
# cross-platform support and allow pip to create the appropriate form
# of executable for the target platform.
# entry_points={},
# include_dirs=[np.get_include()],
py_modules=['_updateversion'],
)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 20:47:24 2019
@author: elif.ayvali
"""
import pandas as pd
import numpy as np
import matplotlib.collections as mc
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
def create_uniform_grid(low, high, bins=(10, 10)):
"""Define a uniformly-spaced grid that can be used to discretize a space.
Parameters
----------
low : array_like
Lower bounds for each dimension of the continuous space.
high : array_like
Upper bounds for each dimension of the continuous space.
bins : tuple
Number of bins along each corresponding dimension.
Returns
-------
grid : list of array_like
A list of arrays containing split points for each dimension.
"""
grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1] for dim in range(len(bins))]
print(grid)
return grid
def discretize(sample, grid):
"""Discretize a sample as per given grid.
Parameters
----------
sample : array_like
A single sample from the (original) continuous space.
grid : list of array_like
A list of arrays containing split points for each dimension.
Returns
-------
discretized_sample : array_like
A sequence of integers with the same number of dimensions as sample.
"""
return list(int(np.digitize(s, g)) for s, g in zip(sample, grid)) # apply along each dimension
def discretize_tile(sample, grid):
"""Discretize a sample as per given grid.
Parameters
----------
sample : array_like
A single sample from the (original) continuous space.
grid : list of array_like
A list of arrays containing split points for each dimension.
Returns
-------
discretized_sample : array_like
A sequence of integers with the same number of dimensions as sample.
"""
return tuple(int(np.digitize(s, g)) for s, g in zip(sample, grid))
def visualize_samples(samples, discretized_samples, grid, low=None, high=None):
"""Visualize original and discretized samples on a given 2-dimensional grid."""
fig, ax = plt.subplots(figsize=(10, 10))
# Show grid
ax.xaxis.set_major_locator(plt.FixedLocator(grid[0]))
ax.yaxis.set_major_locator(plt.FixedLocator(grid[1]))
ax.grid(True)
# If bounds (low, high) are specified, use them to set axis limits
if low is not None and high is not None:
ax.set_xlim(low[0], high[0])
ax.set_ylim(low[1], high[1])
else:
# Otherwise use first, last grid locations as low, high (for further mapping discretized samples)
low = [splits[0] for splits in grid]
high = [splits[-1] for splits in grid]
# Map each discretized sample (which is really an index) to the center of corresponding grid cell
grid_extended = np.hstack((np.array([low]).T, grid, np.array([high]).T)) # add low and high ends
grid_centers = (grid_extended[:, 1:] + grid_extended[:, :-1]) / 2 # compute center of each grid cell
locs = np.stack(grid_centers[i, discretized_samples[:, i]] for i in range(len(grid))).T # map discretized samples
ax.plot(samples[:, 0], samples[:, 1], 'o') # plot original samples
ax.plot(locs[:, 0], locs[:, 1], 's') # plot discretized samples in mapped locations
ax.add_collection(mc.LineCollection(list(zip(samples, locs)), colors='orange')) # add a line connecting each original-discretized sample
ax.legend(['original', 'discretized'])
def visualize_tilings(tilings):
"""Plot each tiling as a grid."""
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
linestyles = ['-', '--', ':']
legend_lines = []
fig, ax = plt.subplots(figsize=(10, 10))
for i, grid in enumerate(tilings):
for x in grid[0]:
l = ax.axvline(x=x, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)], label=i)
for y in grid[1]:
l = ax.axhline(y=y, color=colors[i % len(colors)], linestyle=linestyles[i % len(linestyles)])
legend_lines.append(l)
ax.grid('off')
ax.legend(legend_lines, ["Tiling #{}".format(t) for t in range(len(legend_lines))], facecolor='white', framealpha=0.9)
ax.set_title("Tilings")
return ax # return Axis object to draw on later, if needed
def create_tiling_grid(low, high, bins=(10, 10), offsets=(0.0, 0.0)):
"""Define a uniformly-spaced grid that can be used for tile-coding a space.
Parameters
----------
low : array_like
Lower bounds for each dimension of the continuous space.
high : array_like
Upper bounds for each dimension of the continuous space.
bins : tuple
Number of bins or tiles along each corresponding dimension.
offsets : tuple
Split points for each dimension should be offset by these values.
Returns
-------
grid : list of array_like
A list of arrays containing split points for each dimension.
Example
-------
if low = [-1.0, -5.0], high = [1.0, 5.0], bins = (10, 10), and offsets = (-0.1, 0.5),
then return a list of 2 NumPy arrays (2 dimensions) each containing the following split points (9 split points per dimension):
[array([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7]),
array([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5])]
Notice how the split points for the first dimension are offset by -0.1, and for the second dimension are offset by +0.5.
"""
#grid = [np.linspace(low[dim]+offsets[dim], high[dim]+offsets[dim], bins[dim] + 1)[1:-1] for dim in range(len(bins))]
grid = [np.linspace(low[dim], high[dim], bins[dim] + 1)[1:-1] + offsets[dim] for dim in range(len(bins))]
print("Tiling: [<low>, <high>] / <bins> + (<offset>) => <splits>")
for l, h, b, o, splits in zip(low, high, bins, offsets, grid):
print(" [{}, {}] / {} + ({}) => {}".format(l, h, b, o, splits))
return grid
def create_tilings(low, high, tiling_specs):
"""Define multiple tilings using the provided specifications.
Parameters
----------
low : array_like
Lower bounds for each dimension of the continuous space.
high : array_like
Upper bounds for each dimension of the continuous space.
tiling_specs : list of tuples
A sequence of (bins, offsets) to be passed to create_tiling_grid().
Returns
-------
tilings : list
A list of tilings (grids), each produced by create_tiling_grid().
"""
return [create_tiling_grid(low, high, bins, offsets) for bins, offsets in tiling_specs]
def tile_encode(sample, tilings, flatten=False):
"""Encode given sample using tile-coding.
Parameters
----------
sample : array_like
A single sample from the (original) continuous space.
tilings : list
A list of tilings (grids), each produced by create_tiling_grid().
flatten : bool
If true, flatten the resulting binary arrays into a single long vector.
Returns
-------
encoded_sample : list or array_like
A list of binary vectors, one for each tiling, or flattened into one.
"""
encoded_sample = [discretize_tile(sample, grid) for grid in tilings]
return np.concatenate(encoded_sample) if flatten else encoded_sample
def visualize_encoded_samples(samples, encoded_samples, tilings, low=None, high=None):
"""Visualize samples by activating the respective tiles."""
samples = np.array(samples) # for ease of indexing
# Show tiling grids
ax = visualize_tilings(tilings)
# If bounds (low, high) are specified, use them to set axis limits
if low is not None and high is not None:
ax.set_xlim(low[0], high[0])
ax.set_ylim(low[1], high[1])
else:
# Pre-render (invisible) samples to automatically set reasonable axis limits, and use them as (low, high)
ax.plot(samples[:, 0], samples[:, 1], 'o', alpha=0.0)
low = [ax.get_xlim()[0], ax.get_ylim()[0]]
high = [ax.get_xlim()[1], ax.get_ylim()[1]]
# Map each encoded sample (which is really a list of indices) to the corresponding tiles it belongs to
tilings_extended = [np.hstack((np.array([low]).T, grid, np.array([high]).T)) for grid in tilings] # add low and high ends
tile_centers = [(grid_extended[:, 1:] + grid_extended[:, :-1]) / 2 for grid_extended in tilings_extended] # compute center of each tile
tile_toplefts = [grid_extended[:, :-1] for grid_extended in tilings_extended] # compute topleft of each tile
tile_bottomrights = [grid_extended[:, 1:] for grid_extended in tilings_extended] # compute bottomright of each tile
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
for sample, encoded_sample in zip(samples, encoded_samples):
for i, tile in enumerate(encoded_sample):
# Shade the entire tile with a rectangle
topleft = tile_toplefts[i][0][tile[0]], tile_toplefts[i][1][tile[1]]
bottomright = tile_bottomrights[i][0][tile[0]], tile_bottomrights[i][1][tile[1]]
ax.add_patch(Rectangle(topleft, bottomright[0] - topleft[0], bottomright[1] - topleft[1],
color=colors[i], alpha=0.33))
# In case sample is outside tile bounds, it may not have been highlighted properly
if any(sample < topleft) or any(sample > bottomright):
# So plot a point in the center of the tile and draw a connecting line
cx, cy = tile_centers[i][0][tile[0]], tile_centers[i][1][tile[1]]
ax.add_line(Line2D([sample[0], cx], [sample[1], cy], color=colors[i]))
ax.plot(cx, cy, 's', color=colors[i])
# Finally, plot original samples
ax.plot(samples[:, 0], samples[:, 1], 'o', color='r')
ax.margins(x=0, y=0) # remove unnecessary margins
ax.set_title("Tile-encoded samples")
return ax
class QTable:
"""Simple Q-table."""
def __init__(self, state_size, action_size):
"""Initialize Q-table.
Parameters
----------
state_size : tuple
Number of discrete values along each dimension of state space.
action_size : int
Number of discrete actions in action space.
"""
self.state_size = state_size
self.action_size = action_size
self.q_table = np.zeros(shape=(self.state_size + (self.action_size,)))
print("QTable(): size =", self.q_table.shape)
class TiledQTable:
"""Composite Q-table with an internal tile coding scheme."""
def __init__(self, low, high, tiling_specs, action_size):
"""Create tilings and initialize internal Q-table(s).
Parameters
----------
low : array_like
Lower bounds for each dimension of state space.
high : array_like
Upper bounds for each dimension of state space.
tiling_specs : list of tuples
A sequence of (bins, offsets) to be passed to create_tilings() along with low, high.
action_size : int
Number of discrete actions in action space.
"""
self.tilings = create_tilings(low, high, tiling_specs)
self.state_sizes = [tuple(len(splits)+1 for splits in tiling_grid) for tiling_grid in self.tilings]
self.action_size = action_size
self.q_tables = [QTable(state_size, self.action_size) for state_size in self.state_sizes]
print("TiledQTable(): no. of internal tables = ", len(self.q_tables))
def get(self, state, action):
"""Get Q-value for given <state, action> pair.
Parameters
----------
state : array_like
Vector representing the state in the original continuous space.
action : int
Index of desired action.
Returns
-------
value : float
Q-value of given <state, action> pair, averaged from all internal Q-tables.
"""
# Encode state to get tile indices
encoded_state = tile_encode(state, self.tilings)
# Retrieve q-value for each tiling, and return their average
value = 0.0
for idx, q_table in zip(encoded_state, self.q_tables):
value += q_table.q_table[tuple(idx + (action,))]
value /= len(self.q_tables)
return value
def update(self, state, action, value, alpha=0.1):
"""Soft-update Q-value for given <state, action> pair to value.
Instead of overwriting Q(state, action) with value, perform soft-update:
Q(state, action) = alpha * value + (1.0 - alpha) * Q(state, action)
Parameters
----------
state : array_like
Vector representing the state in the original continuous space.
action : int
Index of desired action.
value : float
Desired Q-value for <state, action> pair.
alpha : float
Update factor to perform soft-update, in [0.0, 1.0] range.
"""
# Encode state to get tile indices
encoded_state = tile_encode(state, self.tilings)
# Update q-value for each tiling by update factor alpha
for idx, q_table in zip(encoded_state, self.q_tables):
value_ = q_table.q_table[tuple(idx + (action,))] # current value
q_table.q_table[tuple(idx + (action,))] = alpha * value + (1.0 - alpha) * value_
|
python
|
import sys
from je_web_runner import get_desired_capabilities
from je_web_runner import get_desired_capabilities_keys
from je_web_runner import get_webdriver_manager
try:
print(get_desired_capabilities_keys())
for keys in get_desired_capabilities_keys():
print(get_desired_capabilities(keys))
driver_wrapper = get_webdriver_manager("firefox", capabilities=get_desired_capabilities("firefox"))
driver_wrapper.quit()
except Exception as error:
print(repr(error), file=sys.stderr)
sys.exit(1)
|
python
|
# Generated by Django 3.1.8 on 2021-04-07 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unievents', '0014_auto_20210407_1416'),
]
operations = [
migrations.RemoveField(
model_name='event_tag',
name='event_tag_name',
),
migrations.AddField(
model_name='event_tag',
name='text',
field=models.TextField(db_column='text', default=None),
preserve_default=False,
),
]
|
python
|
from . import db
from flask import current_app
from flask_login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
import hashlib, os
import markdown
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
email = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
status = db.Column(db.Boolean, default=False)
role = db.Column(db.Boolean, default=False)
articles = db.relationship('Article', backref='author', lazy='dynamic')
@property
def password(self):
raise ArithmeticError('非明文密码,不可读。')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password=password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password=password)
def is_admin(self):
return self.role
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def is_author(self):
return Article.query.filter_by(author_id=self.id).first()
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def is_admin(self):
return False
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(64),unique=True,index=True)
name = db.Column(db.String(64), unique=True, index=True)
desp = db.Column(db.String(300))
articles = db.relationship('Article', backref='category', lazy='dynamic')
def __repr__(self):
return '<Name %r>' % self.name
article_tag = db.Table('article_tag',
db.Column('article_id',db.Integer,db.ForeignKey('article.id'),primary_key=True),
db.Column('tag_id',db.Integer,db.ForeignKey('tag.id'),primary_key=True))
class Tag(db.Model):
__tablename__ = 'tag'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(64),nullable=False, unique=True, index=True)
def __repr__(self):
return '<Name %r>' % self.name
class Article(db.Model):
__tablename__ = 'article'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), index=True)
name = db.Column(db.String(64),index=True,unique=True)
content = db.Column(db.Text)
content_html = db.Column(db.Text)
summary = db.Column(db.String(300))
thumbnail = db.Column(db.String(200))
state = db.Column(db.Integer,default=0)
vc = db.Column(db.Integer,default=0)
timestamp = db.Column(db.DateTime, index=True, default=datetime.now)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
tags = db.relationship('Tag',secondary=article_tag,backref=db.backref('articles',lazy='dynamic'),lazy='dynamic')
def content_to_html(self):
return markdown.markdown(self.content, extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
@property
def author(self):
"""返回作者对象"""
return User.query.get(self.author_id)
@property
def category(self):
"""返回文章分类对象"""
return Category.query.get(self.category_id)
@property
def category_name(self):
"""返回文章分类名称,主要是为了使用 flask-wtf 的 obj 返回对象的功能"""
return Category.query.get(self.category_id).name
@property
def previous(self):
"""用于分页显示的上一页"""
a = self.query.filter(Article.state==1,Article.id < self.id). \
order_by(Article.timestamp.desc()).first()
return a
@property
def next(self):
"""用于分页显示的下一页"""
a = self.query.filter(Article.state==1,Article.id > self.id). \
order_by(Article.timestamp.asc()).first()
return a
@property
def tag_names(self):
"""返回文章的标签的字符串,用英文‘, ’分隔,主要用于修改文章功能"""
tags = []
for tag in self.tags:
tags.append(tag.name)
return ', '.join(tags)
@property
def thread_key(self): # 用于评论插件
return hashlib.new(name='md5', string=str(self.id)).hexdigest()
def __repr__(self):
return '<Title %r>' % self.title
class Recommend(db.Model):
'''
推荐
'''
__tablename__ = 'recommend'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
img = db.Column(db.String(200))
url = db.Column(db.String(200))
sn = db.Column(db.Integer,default=0)
state = db.Column(db.Integer, default=1)
timestamp = db.Column(db.DateTime, default=datetime.now)
class AccessLog(db.Model):
'''
请求日志
'''
__tablename__ = 'access_log'
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.String(20))
url = db.Column(db.String(120))
timestamp = db.Column(db.DateTime, default=datetime.now)
remark = db.Column(db.String(32))
class Picture(db.Model):
'''
图片
'''
__tablename__ = 'picture'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64))
timestamp = db.Column(db.DateTime, default=datetime.now)
url = db.Column(db.String(120))
remark = db.Column(db.String(32))
class InvitationCode(db.Model):
'''
邀请码
'''
__tablename__ = 'invitation_code'
id = db.Column(db.Integer, primary_key = True)
code = db.Column(db.String(64),unique = True, nullable=False)
user = db.Column(db.String(64))
state = db.Column(db.Boolean, default=True)
class OnlineTool(db.Model):
'''
在线工具
'''
__tablename__ = 'online_tool'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
desp = db.Column(db.String(120))
img = db.Column(db.String(200))
url = db.Column(db.String(200))
sn = db.Column(db.Integer,default=0)
state = db.Column(db.Integer, default=1)
timestamp = db.Column(db.DateTime, default=datetime.now)
|
python
|
import colander
from cryptography.fernet import Fernet
class EncryptedExportField(colander.String):
"""
Serialize non-encrypted appstruct into encrypted cstruct.
"""
def __init__(self, fernet_key, *args, **kwargs):
self.fernet_key = fernet_key
self.fernet = Fernet(fernet_key)
super().__init__(*args, **kwargs)
def serialize(self, node, appstruct):
v = super().serialize(node, appstruct)
if v is colander.null:
return v
if v.strip():
if v == "data":
raise Exception()
return self.fernet.encrypt(v.encode("utf8")).decode("utf8")
return colander.null
def deserialize(self, node, cstruct):
v = super().deserialize(node, cstruct)
if v is colander.null:
return v
# encrypt
if v.strip():
v = self.fernet.decrypt(v.encode("utf8")).decode("utf8")
return v
return colander.null
class EncryptedStoreField(colander.String):
"""
Deserialize non-encrypted cstruct into encrypted appstruct.
"""
def __init__(self, fernet_key, *args, **kwargs):
self.fernet_key = fernet_key
self.fernet = Fernet(fernet_key)
super().__init__(*args, **kwargs)
def serialize(self, node, appstruct):
""" Decrypt appstruct """
v = super().serialize(node, appstruct)
if v is colander.null:
return v
if v.strip():
v = self.fernet.decrypt(v.encode("utf8")).decode("utf8")
return v
return colander.null
def deserialize(self, node, cstruct):
""" Encrypt cstruct """
v = super().deserialize(node, cstruct)
if v.strip():
return self.fernet.encrypt(v.encode("utf8")).decode("utf8")
return colander.null
|
python
|
import torch
import torch.nn as nn
from packaging import version
from mmcv.cnn import kaiming_init, normal_init
from .registry import INPUT_MODULES
from .utils import build_norm_layer
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
@INPUT_MODULES.register_module
class Conv1x1Block(nn.Module):
"""
Conv1x1 => Batch Norm => RELU input module
"""
def __init__(self, in_channels, out_channels):
super(Conv1x1Block, self).__init__()
self.net = nn.Sequential(
conv1x1(in_channels, out_channels),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def init_weights(self, init_linear='normal'):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm, nn.SyncBatchNorm, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
return self.net(x)
|
python
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright © 2010, RedJack, LLC.
# All rights reserved.
#
# Please see the LICENSE.txt file in this distribution for license
# details.
# ----------------------------------------------------------------------
import unittest
from ipset.c import *
IPV4_ADDR_1 = \
"\xc0\xa8\x01\x64"
IPV6_ADDR_1 = \
"\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x1e\xc2\xff\xfe\x9f\xe8\xe1"
class TestSet(unittest.TestCase):
def test_set_starts_empty(self):
s = ipset.ipset_new()
self.assert_(ipset.ipset_is_empty(s))
ipset.ipset_free(s)
def test_empty_sets_equal(self):
s1 = ipset.ipset_new()
s2 = ipset.ipset_new()
self.assert_(ipset.ipset_is_equal(s1, s2))
ipset.ipset_free(s1)
ipset.ipset_free(s2)
def test_ipv4_insert(self):
s = ipset.ipset_new()
ipset.ipset_ipv4_add(s, IPV4_ADDR_1)
self.assertFalse(ipset.ipset_is_empty(s))
ipset.ipset_free(s)
def test_ipv4_insert_network(self):
s = ipset.ipset_new()
ipset.ipset_ipv4_add_network(s, IPV4_ADDR_1, 24)
self.assertFalse(ipset.ipset_is_empty(s))
ipset.ipset_free(s)
def test_ipv6_insert(self):
s = ipset.ipset_new()
ipset.ipset_ipv6_add(s, IPV6_ADDR_1)
self.assertFalse(ipset.ipset_is_empty(s))
ipset.ipset_free(s)
def test_ipv6_insert_network(self):
s = ipset.ipset_new()
ipset.ipset_ipv6_add_network(s, IPV6_ADDR_1, 32)
self.assertFalse(ipset.ipset_is_empty(s))
ipset.ipset_free(s)
class TestMap(unittest.TestCase):
def test_map_starts_empty(self):
s = ipset.ipmap_new(0)
self.assert_(ipset.ipmap_is_empty(s))
ipset.ipmap_free(s)
def test_empty_maps_equal(self):
s1 = ipset.ipmap_new(0)
s2 = ipset.ipmap_new(0)
self.assert_(ipset.ipmap_is_equal(s1, s2))
ipset.ipmap_free(s1)
ipset.ipmap_free(s2)
def test_ipv4_insert(self):
s = ipset.ipmap_new(0)
ipset.ipmap_ipv4_set(s, IPV4_ADDR_1, 1)
self.assertFalse(ipset.ipmap_is_empty(s))
ipset.ipmap_free(s)
def test_ipv4_insert_network(self):
s = ipset.ipmap_new(0)
ipset.ipmap_ipv4_set_network(s, IPV4_ADDR_1, 24, 1)
self.assertFalse(ipset.ipmap_is_empty(s))
ipset.ipmap_free(s)
def test_ipv6_insert(self):
s = ipset.ipmap_new(0)
ipset.ipmap_ipv6_set(s, IPV6_ADDR_1, 1)
self.assertFalse(ipset.ipmap_is_empty(s))
ipset.ipmap_free(s)
def test_ipv6_insert_network(self):
s = ipset.ipmap_new(0)
ipset.ipmap_ipv6_set_network(s, IPV6_ADDR_1, 32, 1)
self.assertFalse(ipset.ipmap_is_empty(s))
ipset.ipmap_free(s)
|
python
|
#-*- coding: utf-8 -*-
#!/usr/bin/env python
from os import path
# dirs
BASE_DIR = path.dirname(path.realpath(__file__)) + '/'
MODULES_DIR = BASE_DIR + 'modules/'
AUDIO_DIR = BASE_DIR + 'audio/'
# api url (server)
SERVER_API_URL = 'http://localhost:3000/'
# voice lang
LANG = 'en-EN'
|
python
|
""" Main entrypoint for starttls-policy CLI tool """
import argparse
import os
from starttls_policy_cli import configure
GENERATORS = {
"postfix": configure.PostfixGenerator,
}
def _argument_parser():
parser = argparse.ArgumentParser(
description="Generates MTA configuration file according to STARTTLS-Everywhere policy",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-g", "--generate",
choices=GENERATORS,
help="The MTA you want to generate a configuration file for.",
dest="generate", required=True)
# TODO: decide whether to use /etc/ for policy list home
parser.add_argument("-d", "--policy-dir",
help="Policy file directory on this computer.",
default="/etc/starttls-policy/", dest="policy_dir")
parser.add_argument("-e", "--early-adopter",
help="Early Adopter mode. Processes all \"testing\" domains in policy list "
"same way as domains in \"enforce\" mode, effectively requiring strong TLS "
"for domains in \"testing\" mode too. This mode is useful for participating"
" in tests of recently added domains with real communications and earlier "
"security hardening at the cost of increased probability of delivery "
"degradation. Use this mode with awareness about all implications.",
action="store_true",
dest="early_adopter")
return parser
def _ensure_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def _generate(arguments):
_ensure_directory(arguments.policy_dir)
config_generator = GENERATORS[arguments.generate](arguments.policy_dir,
arguments.early_adopter)
config_generator.generate()
config_generator.manual_instructions()
def main():
""" Entrypoint for CLI tool. """
parser = _argument_parser()
_generate(parser.parse_args())
if __name__ == "__main__":
main() # pragma: no cover
|
python
|
import numpy as np
class ValueLog():
"""Implemements a key/value aggregating dictionary log with optional
grouping/precision and custom aggregation modes"""
def __init__(self):
self.log_values = {}
def log(self, key, val, agg="mean", scope="get", group=None,
precision=None):
"""Logs a value
Args:
key: The key for this value, this will be the key in the resulting
log dictionary
val: The value to log
agg: How to aggregate all the values received, should be the name
of a valid numpy operation like mean/max/sum etc...
scope: Scope over which to aggregate/reset the values for this key.
Valid values are:
get: Aggregate and reset each time get() is called
None: Never reset (Aggregate all values received from the
start)
<number>: Aggregate the last <number> values received
group: Optionally place this key in a sub-key called 'group'. Can
set a nested group using '->', e.g. "training->general"
precision: Precision to round the final value to after aggregation
Note: agg/scope/precision must be the same for each value logged with
the same key+group
"""
dest = self.log_values
if group is not None:
for subkey in group.split("->"):
if subkey not in dest:
dest[subkey] = {}
dest = dest[subkey]
if key not in dest:
dest[key] = {
"data": [],
"scope": scope,
"agg": agg,
"precision": precision
}
else:
assert(dest[key]['agg'] == agg)
assert(dest[key]['precision'] == precision)
assert(dest[key]['scope'] == scope)
dest[key]['data'].append(val)
scope = dest[key]['scope']
# If scope is a number, leave only that last amount in the history
if isinstance(scope, int):
dest[key]['data'] = dest[key]['data'][-int(scope):]
def log_dict(self, source, agg="auto", group=None):
"""Logs values from a given dictionary in the same group/key structure
"""
for key, val in source.items():
if isinstance(val, dict):
sub_group = key if group is None else group+"->"+key
self.log_dict(val, agg=agg, group=sub_group)
else:
self.log(key, val, group=group, agg=agg)
def _get_aggregator_for_key(self, key, agg_mode):
if agg_mode == "auto":
# 'auto' uses mean unless one of the supported modes is in
# the key name (Example 'reward_max' will use max)
supported_modes = ['min', 'mean', 'median', 'max', 'std', 'sum']
# Example auto-keys might be 'reward_max', or just 'max'
mode = key.split("_")[-1]
if mode not in supported_modes:
agg_mode = "mean"
else:
agg_mode = mode
return getattr(np, agg_mode)
def _aggregate_log_values(self, source, dest):
"""Aggregates the log values recursively from source->dest"""
remove = []
for key, item in source.items():
if "data" not in item:
# Assume it's a sub-group
dest[key] = {}
self._aggregate_log_values(item, dest[key])
else:
aggregator = self._get_aggregator_for_key(key, item['agg'])
value = aggregator(item['data'])
if item['precision'] is not None:
value = round(value, item['precision'])
dest[key] = value
if item['scope'] == 'get':
remove.append(key)
for key in remove:
del source[key]
def get(self):
res = {}
self._aggregate_log_values(self.log_values, res)
return res
|
python
|
from time import sleep
import threading
import datetime
import paho.mqtt.client as mqtt
#### CONSTANTS ####
#MQTTServer="home.bodhiconnolly.com"
MQTTServer="192.168.1.100"
MQTTPort=1882
waitTime=datetime.timedelta(milliseconds=50)
ledTopic="room/lights/strips/"
functionTopic="room/function/#"
systemTopic="system/functions/room"
lastTime=datetime.datetime.now()
#### MQTT SENDING ####
def sendMQTT(topic,message):
client.publish(topic,message)
def setRGB(r=None,g=None,b=None):
if not (r==None):
sendMQTT(ledTopic+"r",r)
if not (g==None):
sendMQTT(ledTopic+"g",g)
if not (b==None):
sendMQTT(ledTopic+"b",b)
def setRGBWait(r=None,g=None,b=None):
global lastTime
if datetime.datetime.now()-lastTime>waitTime:
setRGB(r,g,b)
lastTime=datetime.datetime.now()
def updateStatus(function,wake=None):
pass
#### THREAD FUNCTIONS ####
class StoppableThread(threading.Thread):
def __init__(self):
super(StoppableThread, self).__init__()
self._stop = threading.Event()
#print "Thread Started"
def stop(self):
self._stop.set()
class FadeThread(StoppableThread):
def __init__(self,fadeSpeed):
super(FadeThread, self).__init__()
self.fadeSpeed=fadeSpeed/255
def run(self):
updateStatus("fade")
print "Starting fade"
setRGB(0,0,255)
while not self._stop.isSet():
self.fade(self.fadeSpeed)
def fade(self,fadespeed):
lastTime=datetime.datetime.now()
for i in range(0,256,1):
if not self._stop.isSet():
setRGBWait(r=i)
sleep(fadespeed)
else:
break
for i in range(255,-1,-1):
if not self._stop.isSet():
setRGBWait(b=i)
sleep(fadespeed)
else:
break
for i in range(0,256,1):
if not self._stop.isSet():
setRGBWait(g=i)
sleep(fadespeed)
else:
break
for i in range(255,-1,-1):
if not self._stop.isSet():
setRGBWait(r=i)
sleep(fadespeed)
else:
break
for i in range(0,256,1):
if not self._stop.isSet():
setRGBWait(b=i)
sleep(fadespeed)
else:
break
for i in range(255,-1,-1):
if not self._stop.isSet():
setRGBWait(g=i)
sleep(fadespeed)
else:
break
def setSpeed(fadeSpeed):
self.fadeSpeed=fadeSpeed/255
class SleepThread(StoppableThread):
def __init__(self,sleepTime):
super(SleepThread, self).__init__()
self.sleepTime=sleepTime
def run(self):
updateStatus("sleep")
print "Starting sleep"
self.ledSleep(self.sleepTime)
def ledSleep(self,sleepTime):
sleepDelay=(sleepTime)/255
for i in range(255,100,-1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(sleepDelay*0.2)
else:
break
for i in range(100,50,-1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(sleepDelay*1)
else:
break
for i in range(50,10,-1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(sleepDelay*2)
else:
break
for i in range(10,-1,-1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(sleepDelay*10)
else:
break
updateStatus("none","asleep")
class WakeThread(StoppableThread):
def __init__(self,wakeTime):
super(WakeThread, self).__init__()
self.wakeTime=wakeTime
def run(self):
print "Starting wake"
updateStatus("wake")
self.wake(self.wakeTime)
def wake(self,sleepTime):
wakeDelay=(self.wakeTime)/255
for i in range(1,11,1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(wakeDelay*10)
else:
break
for i in range(11,51,1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(wakeDelay*2)
else:
break
for i in range(51,101,1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(wakeDelay*1)
else:
break
for i in range(101,255,1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(wakeDelay*0.2)
else:
break
setRGB(255,255,255)
updateStatus("none","awake")
class FastWakeThread(StoppableThread):
def __init__(self,wakeTime):
super(FastWakeThread, self).__init__()
self.wakeTime=wakeTime
def run(self):
print "Starting fast wake"
updateStatus("wake")
self.wake(self.wakeTime)
def wake(self,sleepTime):
wakeDelay=(self.wakeTime)/255
for i in range(1,256,1):
if not self._stop.isSet():
setRGBWait(i,i,i)
sleep(wakeDelay)
else:
break
setRGB(255,255,255)
updateStatus("none","awake")
#### CONTROLLING OBJECT ####
class ledController(object):
def __init__(self):
self.fadeThread=FadeThread(1)
self.sleepThread=SleepThread(1)
self.wakeThread=WakeThread(1)
self.fastwakeThread=FastWakeThread(1)
def stopThreads(self):
self.fadeThread.stop()
self.sleepThread.stop()
self.wakeThread.stop()
self.fastwakeThread.stop()
def on_connect(self, client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe(functionTopic)
client.publish(systemTopic,"Function Controller On")
def parseMessage(self, client, userdata, msg):
print msg.topic+" "+str(msg.payload)
topic=msg.topic.split("/")
payload=msg.payload
if topic[0]=='room' and topic[1]=='function':
self.stopThreads()
updateStatus("none")
if topic[2]=='sleep':
self.sleepThread=SleepThread(float(payload))
self.sleepThread.start()
elif topic[2]=='wake':
self.wakeThread=WakeThread(float(payload))
self.wakeThread.start()
elif topic[2]=='fastwake':
self.fastwakeThread=FastWakeThread(float(payload))
self.fastwakeThread.start()
elif topic[2]=='fade':
self.fadeThread=FadeThread(float(payload))
self.fadeThread.start()
elif topic[2]=='stop':
pass
else:
print "Not a valid function: " + str(topic)
#### RUNTIME ####
if __name__ == "__main__":
l=ledController()
client = mqtt.Client()
client.on_connect = l.on_connect
client.on_message = l.parseMessage
client.connect(MQTTServer, MQTTPort, 60)
client.loop_forever()
|
python
|
import json
import string
import random
import os
import httplib2
import requests
# Flask Imports
from flask import Flask, render_template, request, redirect, url_for, jsonify
from flask import abort, g, flash, Response, make_response
from flask import session as login_session
from flask_httpauth import HTTPBasicAuth
# SQLAlchemy imports
from models import Items, Users, Base
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
from sqlalchemy import desc
auth = HTTPBasicAuth()
engine = create_engine('sqlite:///items.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
db = DBSession()
app = Flask(__name__)
def redirect_url(default='/'):
return request.args.get('next') or request.referrer or url_for(default)
#############################
# User Login and Registration
############################
@auth.verify_password
def verify_password(username, password):
user = db.query(Users).filter_by(username=username).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
def createUser(login_session):
newUser = Users(username=login_session['username'])
db.add(newUser)
db.commit()
user = db.query(Users).filter_by(username=login_session['username']).first()
return user.id
def getUserInfo(user_id):
user = db.query(Users).filter_by(id=user_id).first()
return user
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'GET':
return render_template('signup.html',
login_session=login_session)
elif request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
verifyPass = request.form.get('verifyPassword')
if username is None or password is None or password != verifyPass:
flash('You must enter a valid username and password')
return render_template('signup.html')
# Check if user is already in database
user = db.query(Users).filter_by(username=username).first()
if user:
flash('The user "%s" is already registered, please login to continue' % user.username)
return render_template('signup.html')
else:
user = Users(username=username)
user.hash_password(password)
db.add(user)
db.commit()
flash('User %s has been created, please login to continue' % user.username)
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html',
login_session=login_session)
elif request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
if verify_password(username, password):
user = db.query(Users).filter_by(username=username).first()
login_session['username'] = user.username
login_session['user_id'] = user.id
flash("Welcome, %s" % user.username)
g.user = user
return redirect(url_for('index'))
else:
flash('Wrong Username or Password')
return render_template('login.html')
else:
return redirect(url_for('index'))
@app.route('/logout')
def logout():
del login_session['username']
del login_session['user_id']
flash('You have been logged out')
return redirect(url_for('index'))
##################
# VIEWS
##################
@app.route('/')
def index():
if ('username' in login_session):
# Only show items added by this user
items = db.query(Items).filter_by(author_id=login_session['user_id']).all()
return render_template('home.html', items=items, login_session=login_session)
else:
# Don't show items when user is not logged in
return render_template('home.html',
login_session=login_session)
# Method for Setting/Adding new Key Value Pairs
@app.route('/add', methods=['GET', 'POST'])
def addItem():
if request.method == 'GET':
# Make sure only logged in users can access this page
if ('username' in login_session):
return render_template('addItem.html',
login_session=login_session)
else:
flash('Please login in order to add key/value pairs')
return redirect(url_for('login'))
elif request.method == 'POST':
# Make sure only logged in users are adding key/value pairs
if ('username' in login_session):
key = request.form.get('key')
value = request.form.get('value')
item = db.query(Items).filter_by(key=key).first()
# Make sure key is unique/not already added
if item:
flash('"%s" has already been added' % item.key)
return redirect(url_for('addItem'))
if key is not None and key != '':
item = Items(key=key)
if value is not None and value != '':
item.value = value
item.author = getUserInfo(login_session['user_id'])
else:
flash('You need to provide a proper Key/Value pair')
return redirect(url_for('addItem'))
db.add(item)
db.commit()
flash('Item Added!')
return redirect(url_for('index'))
else:
flash('Please login in order to add key/value pairs')
return redirect(url_for('login'))
else:
return redirect(url_for('index'))
@app.route('/edit/<item_key>', methods=['GET', 'POST'])
def editItem(item_key):
if request.method == 'GET':
if ('username' in login_session):
# find key/value pair that we want to edit
item = db.query(Items).filter_by(key=item_key).first()
# Make sure user is editing only their key/value pair
if (item.author.username == login_session['username']):
return render_template('editItem.html',
item=item,
login_session=login_session)
else:
flash('Please login to edit key/value pairs')
return redirect(url_for('login'))
elif request.method == 'POST':
# Make sure only a logged in user is requesting edit
if ('username' in login_session):
key = request.form.get('key')
value = request.form.get('value')
item = db.query(Items).filter_by(key=item_key).first()
# Make sure only user that added this item can edit this
if (item.author_id != login_session['user_id']):
flash('You are not allowed to edit this')
return redirect(url_for('index'))
# Update the Key/Value pair
if key is not None and key != '':
item.key = key
if value is not None and value != '':
item.value = value
# Commit changes to the Database
db.add(item)
db.commit()
flash('Key/value pair has been updated')
return redirect(url_for('index'))
else:
flash('Please login to edit key/value pairs')
return redirect(url_for('login'))
else:
return redirect(url_for('index'))
@app.route('/delete/<item_key>', methods=['GET', 'POST'])
def deleteItem(item_key):
if request.method == 'GET':
if ('username' in login_session):
# find key/value pair that we want to edit
item = db.query(Items).filter_by(key=item_key).first()
# Make sure user is deleting only their key/value pair
if (item.author.username == login_session['username']):
return render_template('deleteItem.html',
item=item,
login_session=login_session)
else:
flash('Please login to delete key/value pairs')
return redirect(url_for('login'))
if request.method=='POST':
item = db.query(Items).filter_by(key=item_key).first()
# Make sure the right user is requesting the delete
if login_session['username'] != item.author.username:
flash('You do not have the permission to delete that')
return redirect(url_for('index'))
# Delete item and commit changes
db.delete(item)
db.commit()
flash('Key/Value pair deleted')
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=3000)
|
python
|
# BaseOperator.py
#
# Base class for all machines and human operators.
#
# Attributes:
# name
# states: a list of states that this operator can be in, at any time. For example: ["busy", idle"]
# start_time: the time at which the behavior starts.
#
# Member functions:
# methods to change state, and print the fraction of time spent in each state.
#
# Author: Neha Karanjkar
# Date: 20 Nov 2017
import random
import simpy
class BaseOperator(object):
def __init__(self, env, name):
self.env=env
self.name=name
#start_time
self.start_time=0
#default states:
self.states = ["none"]
#power rating of the machine/operator for each state
self.power_ratings = [0.0]
self.time_spent_in_state = [0.0 for s in self.states]
# current state
self.current_state = "none"
# variable to remember the time instant
# at which the last state change occured.
self.state_change_timestamp = 0.0
# function (to be called inside the constructor of all derived classes)
# to define the set of states for a particular type of machine.
# Optionally the power (in watts) for each state can also be specified.
def define_states(self,states, start_state):
self.states = states
self.time_spent_in_state = [0.0 for s in states]
assert(start_state in states)
self.current_state=start_state
self.power_ratings = [0.0 for s in states]
def set_power_ratings(self, power_ratings):
assert (len(power_ratings)==len(self.states))
for p in power_ratings:
assert(p>0)
self.power_ratings = power_ratings
# function to record the time spent in the current state
# since the last timestamp
def update_time_spent_in_current_state(self):
i = self.states.index(self.current_state)
self.time_spent_in_state[i] += self.env.now - self.state_change_timestamp
# change state
def change_state(self, new_state):
prev_state = self.current_state
self.update_time_spent_in_current_state()
self.current_state = new_state
self.state_change_timestamp=self.env.now
if(new_state!=prev_state):
print("T=", self.env.now+0.0, self.name, "changed state to ",new_state)
def get_utilization(self):
utilization = []
self.update_time_spent_in_current_state()
total_time = sum(self.time_spent_in_state)
assert (total_time>0)
for i in range(len(self.states)):
t = self.time_spent_in_state[i]
t_percent = self.time_spent_in_state[i]/total_time*100.0
utilization.append(t_percent)
return utilization
# print time spent in each state
def print_utilization(self):
u = self.get_utilization()
print(self.name,":",end=' ')
for i in range(len(self.states)):
print(self.states[i], "=",end=' ')
print("{0:.2f}".format(u[i])+"%",end=' ')
print("")
# calculate energy consumption (in joules)
# for each state that the machine was in.
def get_energy_consumption(self):
e = []
for i in range(len(self.states)):
e.append(self.power_ratings[i]*self.time_spent_in_state[i])
return e
# print energy consumption
def print_energy_consumption(self):
e = self.get_energy_consumption()
total_e = sum(e)
denominator = max(sum(e),1.0)
print(self.name,": (",end=' ')
for i in range(len(self.states)):
print(self.states[i], "=",end=' ')
e_percent = e[i]/denominator*100.0
print("{0:.2f}".format(e_percent)+"%",end=' ')
print (") Total energy = ","{0:.2f}".format(total_e/1e3)," Kilo Joules.",end=' ')
print("")
|
python
|
# coding=utf-8
import os
import unittest
from parameterized import parameterized
from conans.client.conf import default_settings_yml
from conans.model.editable_cpp_info import EditableCppInfo
from conans.model.settings import Settings
def _make_abs(base_path, *args):
p = os.path.join(*args)
if base_path:
p = os.path.join(base_path, p)
p = os.path.abspath(p)
return p
class WorkOnItemsTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(".", EditableCppInfo._work_on_item("", None, None, None))
@parameterized.expand([(False,), (True,)])
def test_basic(self, make_abs):
base_path = os.path.dirname(__file__) if make_abs else None
self.assertIn(_make_abs(base_path, '.'),
EditableCppInfo._work_on_item(".", base_path, None, None))
self.assertIn(_make_abs(base_path, 'src', 'include'),
EditableCppInfo._work_on_item("src/include", base_path, None, None))
self.assertIn(_make_abs(base_path, '..', 'relative', 'include'),
EditableCppInfo._work_on_item("../relative/include", base_path, None, None))
self.assertIn(_make_abs(base_path, 'src', 'path with spaces', 'include'),
EditableCppInfo._work_on_item("src/path with spaces/include",
base_path, None, None))
self.assertIn(_make_abs(base_path, 'ending-slash', 'include'),
EditableCppInfo._work_on_item("ending-slash/include/", base_path, None, None))
@parameterized.expand([(False,), (True,)])
def test_windows(self, make_abs):
base_path = os.path.dirname(__file__) if make_abs else None
self.assertIn(os.path.join('C:' + os.sep, 'Windows-single-slash', 'include'),
EditableCppInfo._work_on_item("C:\Windows-single-slash\include",
base_path, None, None))
self.assertIn(os.path.join('D:' + os.sep, 'Windows-double-slash', 'include'),
EditableCppInfo._work_on_item("D:\\Windows-double-slash\\include",
base_path, None, None))
@parameterized.expand([(False,), (True,)])
def test_unix(self, make_abs):
base_path = os.path.dirname(__file__) if make_abs else None
self.assertIn(os.path.join(os.sep, 'abs', 'path', 'include'),
EditableCppInfo._work_on_item("/abs/path/include", base_path, None, None))
@parameterized.expand([(False,), (True,)])
def test_placeholders(self, make_abs):
base_path = os.path.dirname(__file__) if make_abs else None
settings = Settings.loads(default_settings_yml)
settings.compiler = 'Visual Studio'
settings.compiler.version = '14'
settings.build_type = 'Debug'
self.assertIn(_make_abs(base_path, 'src', 'Visual Studio14', 'Debug', 'include'),
EditableCppInfo._work_on_item("src/{settings.compiler}{settings.compiler.version}/{settings.build_type}/include",
base_path=base_path, settings=settings,
options=None))
self.assertIn(os.path.join('C:' + os.sep, 'Visual Studio', 'include'),
EditableCppInfo._work_on_item("C:\\{settings.compiler}\\include\\",
base_path=base_path, settings=settings,
options=None))
self.assertIn(os.path.join(os.sep, 'usr', 'path with spaces', 'Visual Studio', 'dir'),
EditableCppInfo._work_on_item("/usr/path with spaces/{settings.compiler}/dir",
base_path=base_path, settings=settings,
options=None))
|
python
|
from dags.spark_common import SparkJobCfg, spark_job, user_defined_macros, EntityPattern
from dags.spark_common import dag_schema_path, hadoop_options, LOCAL_INPUT, LOCAL_DATAWAREHOUSE
from datetime import timedelta
from airflow import DAG
args = {
'owner': 'alexey',
'start_date': '2021-06-10'
}
dag = DAG(
'spark_hudi',
schedule_interval=None,
dagrun_timeout=timedelta(minutes=60),
default_args=args,
user_defined_macros=user_defined_macros,
max_active_runs=1)
entity_patterns = [
EntityPattern("orders", "orders", "orderId", "last_update_time"),
]
cfg = SparkJobCfg(
input_path=LOCAL_INPUT,
output_path=LOCAL_DATAWAREHOUSE,
entity_patterns=entity_patterns,
reader_options=["header:true"],
hadoop_options=hadoop_options(),
partition_by=["year", "month", "day"],
input_schema_path=dag_schema_path,
output_format="hudi",
trigger_interval=-1
)
JAR_PATH = "{{fromjson(connection.etl_jobs_emr_jar.extra)['path']}}"
load_to_table = spark_job('load_to_table', cfg, 'etljobs.emr.HudiIngestor', dag, None, True, JAR_PATH)
load_to_table
if __name__ == "__main__":
dag.cli()
|
python
|
from .mesh import import_mesh
from .curve import import_curve
from .brep import import_brep
#from .default import import_default
|
python
|
#!/usr/bin/python
import sdk_common
# Block in charge of tagging the release
class SDKNewsAndTag(sdk_common.BuildStep):
def __init__(self, logger=None):
super(SDKNewsAndTag, self).__init__('SDK News & Tag', logger)
self.branch_name = self.common_config.get_config().get_branch_name()
self.github_token = self.common_config.get_config().get_github_token()
self.url_with_token = self.common_config.get_config().get_origin_url_combined_with_token()
self.version = self.common_config.get_config().get_version()
self.is_commit_already_tagged = self.common_config.get_config().is_commit_tagged()
self.should_tag = (
not self.is_commit_already_tagged) and self.common_config.get_config().is_for_release() and (
not self.common_config.get_config().is_from_private())
self.news_folder = self.common_config.get_config().get_news_folder()
self.changelog = self.common_config.get_config().get_changelog_file()
self.property_file = self.common_config.get_config().get_project_property_file()
def execute(self):
self.print_title()
try:
self.log_info("Generating the changelog")
if self.is_commit_already_tagged:
self.log_info(
"The commit was already tagged [%s]. No need to generate the changelog file" % self.fetch_commit_tag())
return True
if self.news_folder:
self.check_shell_command_output("towncrier --yes --name="" --version=%s" % self.version,
self.news_folder)
except:
self.log_error('Failed to generate the changelog file')
return False
try:
if not self.should_tag:
self.log_info("No need for tagging.")
if self.is_commit_already_tagged:
self.log_info(
"The commit was already tagged [%s]" % self.fetch_commit_tag())
return True
self.tag_github()
except:
self.log_error('Failed to tag the repository')
return False
self.log_info("Done.")
return True
def fetch_commit_tag(self):
return self.common_config.get_config().get_commit_tag()
def tag_github(self):
self.log_info("Committing the changelog")
if not self.common_config.get_config().get_user_name() or not self.common_config.get_config().get_user_email():
self.git_setup_env()
if not self.url_with_token:
if not self.github_token:
raise Exception("The GitHub token has not been set properly")
else:
raise Exception("The remote URL could not be resolved")
self.git_set_remote_url(self.url_with_token)
self.git_set_upstream_branch(self.branch_name)
if self.news_folder:
self.git_add_folder( self.news_folder)
if self.changelog:
self.git_add_file(self.changelog)
if self.property_file:
self.git_add_file(self.property_file)
self.git_commit(':checkered_flag: Release %s' % self.version)
self.log_info("Tagging the project")
self.git_tag(self.version,'SDK Release')
self.log_info("Pushing changes back to GitHub")
self.git_push_and_follow_tags()
self.log_info("Marking this commit as latest")
self.git_soft_tag('latest')
self.git_force_push_tags()
|
python
|
from .ast_transformers import InvertGenerator, transformAstWith
from .descriptor_magic import \
wrapMethodAndAttachDescriptors, BindingExtensionDescriptor
import six
import inspect
def coroutine(func):
def start(*args, **kwargs):
g = func(*args, **kwargs)
six.next(g)
return g
return start
def _funcIsMethod(stackFromFunc):
""" Determine whether a function being decorated is actually a method of a
class, given the stack frames above the decorator invocation. """
funcFrame = stackFromFunc[0]
potentialClassName = funcFrame[3]
nextFrame = stackFromFunc[1]
return nextFrame[3] == '<module>' and \
nextFrame[4][0].startswith('class ' + potentialClassName)
def hasInvertibleMethods(cls):
""" Class decorator that transforms methods that have been marked with
"invertibleGenerator" """
#frames = inspect.stack()
#from pprint import PrettyPrinter
#globs = map(lambda frame: frame[0].f_globals, frames)
#locs = map(lambda frame: frame[0].f_locals, frames)
#pp = PrettyPrinter(indent=4)
#for (glob, loc) in zip(globs, locs):
#print "GLOBALS:"
#pp.pprint(glob)
#print "LOCALS:"
#pp.pprint(loc)
for name, method in six.iteritems(cls.__dict__):
if hasattr(method, "markForConversion"):
# TODO: transform and wrap
# But need globals/locals
pass
return cls
def _makeInvertibleUsingFrame(frame, func):
""" Add a co method to a generator function, that is the equivalent
coroutine. """
return coroutine(
transformAstWith(
frame[0].f_globals,
frame[0].f_locals,
[InvertGenerator])(func)
)
def invertibleGenerator(func):
""" Add a co method to a generator function, that is the equivalent
coroutine. """
frames = inspect.stack()
nextFrame = frames[1]
transformedFunc = _makeInvertibleUsingFrame(nextFrame, func)
if _funcIsMethod(frames[1:]):
# TODO: either remove, or use in class decorator
func.markForConversion = True
return wrapMethodAndAttachDescriptors({
'co': BindingExtensionDescriptor(transformedFunc)
})(func)
else:
func.co = transformedFunc
return func
|
python
|
import re
import xmlsec
from lxml import etree
def parse_tbk_error_message(raw_message):
message_match = re.search(r'<!--(.+?)-->', raw_message)
if message_match:
message = message_match.group(1).strip()
match = re.search(r'(.+?)\((\d+?)\)', message)
if match:
error = match.group(1)
code = int(match.group(2))
return error, code
return message, -1
return raw_message, -1
def get_key_format_value(key_format):
try:
return getattr(xmlsec.KeyFormat, key_format)
except AttributeError:
raise ValueError("Key format {} unsupported".format(key_format))
def load_key_from_data(key_data, cert_data=None, password=None, key_format='PEM'):
key_format = get_key_format_value(key_format)
key = xmlsec.Key.from_memory(key_data, key_format, password)
if cert_data:
key.load_cert_from_memory(cert_data, key_format)
return key
def xml_to_string(tree):
return etree.tostring(tree).decode('utf-8')
def create_xml_element(tag_name, nsmap=None):
return etree.Element(tag_name, nsmap=nsmap)
|
python
|
import os
import djcelery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'findingaids.settings')
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
os.environ['VIRTUAL_ENV'] = '/home/httpd/findingaids/env/'
djcelery.setup_loader()
# from django.core.handlers.wsgi import WSGIHandler
# application = WSGIHandler()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
python
|
from acme import Product
import random
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']
def generate_products(n=30, price_range=(5, 10), weight_range=(5, 100)):
"""Generate n number of products within a specified price and weight range"""
products = []
for i in range(1, n + 1):
name = random.choice(ADJECTIVES) + ' ' + random.choice(NOUNS)
price = random.randrange(price_range[0], price_range[1] + 1)
weight = random.randrange(weight_range[0], weight_range[1] + 1)
flammability = random.uniform(0.0, 2.5)
product = Product(name, price, weight, flammability)
products.append(product)
return products
def inventory_report(prod_list):
"""Creates an inventory report for a given product list"""
prod_list = list(set(prod_list))
x = 0
price = 0
weight = 0
flammability = 0
stealability = 0
for item in prod_list:
x += 1
price += item.price
weight += item.weight
flammability += item.flammability
if stealability != 'Not so stealable...':
stealability += 1
avg_price = price / x
avg_weight = weight / x
avg_flammability = flammability / x
print(f'There are {x} unique products in this list. The average price is {avg_price}, '
f'average weight is {avg_weight},'
f'and the average flammability is {avg_flammability}.')
if stealability >= len(prod_list) / 2:
print('Many of these items are highly stealable!')
return avg_price, avg_weight, avg_flammability
if __name__ == '__main__':
inventory_report(generate_products())
|
python
|
import sys, os
sys.path.append('/Users/syrus/Proyectos/exercita/website/')
sys.path.append('/Users/syrus/Sites/exercita/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
TEMPLATE_EXAMPLE = '''<ul>
{% for gender in gender_list %}
<li>{{ gender.grouper }}
<ul>
{% for item in gender.list %}
<li>{{ item.first_name|default:a|join:", " }} {{ item.last_name|center:a }}</li>
{% endfor %}
</ul>
</li>
{% endfor %}
</ul>'''
TEMPLATE_EXAMPLE2 = '''{% extends "admin/base.html" %}
{% load i18n %}
{% block title %}{{ title }} | {% trans 'Django site admin'|default:'a' %}{% endblock %}
{% block branding %}
<h1 id="site-name">{% trans 'Django administration' %}</h1>
{% endblock %}
{% filter force_escape %}
Autoescapar
{% endfilter %}
{% autoescape on %}asd{% endautoescape %}
{% block nav-global %}{% endblock %}
{% trans 'hola' %}
{% for user in users %}
<li class="{% cycle 'odd' 'even' %}">{{ user }}</li>
<li class="{% cycle 'odd' 'eveojkn' as b %}">{{ user }}</li>
<li class="{% cycle b %}">{{ user }}b</li>
{% include "admin/base.html" with a='1' r='2' aa='1' %}
{% empty %}
Vacio
{% endfor %}
{% ifequal a b %}a{% endifequal %}
{% ifequal a b %}a{% else %}b{% endifequal %}
{% firstof a b 'hola' %}
{% if a|length > 0 or b and c|default:'b' <= c|default:'a' and not b%}
asd
{% endif %}
{% blocktrans with amount=article.price count years=i.length %}
That will cost $ {{ amount }} per year.
{% plural %}
That will cost $ {{ amount }} per {{ years }} years.
{% endblocktrans %}
{% regroup people|dictsort:"gender" by gender_by as gender_list %}
{% url path.to.view arg arg2 as the_url %}
{% url app_views.client client.id %}
'''
TEMPLATE_ADMIN = '''{% extends "admin/base_site.html" %}
{% load i18n %}
{% block breadcrumbs %}<div class="breadcrumbs"><a href="/">{% trans "Home" %}</a> › {% trans "Server error" %}</div>{% endblock %}
{% block title %}{% trans 'Server error (500)' %}{% endblock %}
{% block content %}
<h1>{% trans 'Server Error <em>(500)</em>' %}</h1>
<p>{% trans "There's been an error. It's been reported to the site administrators via e-mail and should be fixed shortly. Thanks for your patience." %}</p>
{% endblock %}
'''
#from django.template.loader import *
from django.template import Template
from djinja.template.utils import DjinjaAdapter
dj = DjinjaAdapter(Template(TEMPLATE_EXAMPLE2))
print dj.process()
|
python
|
"""
My purpose in life is to take the NWS AWIPS Geodata Zones Shapefile and
dump them into the PostGIS database! I was bootstraped like so:
python ugcs_update.py z_16mr06 2006 03 16
python ugcs_update.py z_11mr07 2007 03 11
python ugcs_update.py z_31my07 2007 05 31
python ugcs_update.py z_01au07 2007 08 01
python ugcs_update.py z_5sep07 2007 09 05
python ugcs_update.py z_25sep07 2007 09 25
python ugcs_update.py z_01ap08 2008 04 01
python ugcs_update.py z_09se08 2008 09 09
python ugcs_update.py z_03oc08 2008 10 03
python ugcs_update.py z_07my09 2009 05 07
python ugcs_update.py z_15jl09 2009 07 15
python ugcs_update.py z_22jl09 2009 07 22
python ugcs_update.py z_04au11 2011 08 04
python ugcs_update.py z_13oc11 2011 10 13
python ugcs_update.py z_31my11 2011 05 31
python ugcs_update.py z_15de11 2011 12 15
python ugcs_update.py z_23fe12 2012 02 23
python ugcs_update.py z_03ap12 2012 04 03
python ugcs_update.py z_12ap12 2012 04 12
python ugcs_update.py z_07jn12 2012 06 07
python ugcs_update.py z_11oc12 2012 10 11
python ugcs_update.py z_03de13a 2013 12 03
python ugcs_update.py z_05fe14a 2014 02 05
"""
import sys
import os
import zipfile
import requests
import geopandas as gpd
from shapely.geometry import MultiPolygon
from pyiem.util import utc, logger
# Put the pywwa library into sys.path
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "../parsers")
)
# pylint: disable=wrong-import-position
from pywwa.database import get_sync_dbconn # noqa: E402
LOG = logger()
# Change Directory to /tmp, so that we can rw
os.chdir("/tmp")
def do_download(zipfn):
"""Do the download steps"""
if not os.path.isfile(zipfn):
req = requests.get(
("https://www.weather.gov/source/gis/Shapefiles/%s/%s")
% ("County" if zipfn.startswith("c_") else "WSOM", zipfn)
)
LOG.info("Downloading %s ...", zipfn)
with open(zipfn, "wb") as fh:
fh.write(req.content)
LOG.info("Unzipping")
shpfn = None
with zipfile.ZipFile(zipfn, "r") as zipfp:
for name in zipfp.namelist():
LOG.info("Extracting %s", name)
with open(name, "wb") as fh:
fh.write(zipfp.read(name))
if name[-3:] == "shp":
shpfn = name
return shpfn
def new_poly(geo):
"""Sort and return new multipolygon"""
if geo.geom_type == "Polygon":
return geo
# This is tricky. We want to have our multipolygon have its
# biggest polygon first in the multipolygon.
# This will allow some plotting simplification
# later as we will only consider the first polygon
maxarea = 0
polys = []
for poly in geo:
area = poly.area
if area > maxarea:
maxarea = area
polys.insert(0, poly)
else:
polys.append(poly)
return MultiPolygon(polys)
def db_fixes(cursor, valid):
"""Fix some issues in the database"""
cursor.execute(
"update ugcs SET geom = st_makevalid(geom) where end_ts is null "
"and not st_isvalid(geom) and begin_ts = %s",
(valid,),
)
LOG.info("Fixed %s entries that were ST_Invalid()", cursor.rowcount)
cursor.execute(
"""
UPDATE ugcs SET simple_geom = ST_Multi(
ST_Buffer(ST_SnapToGrid(geom, 0.01), 0)
),
centroid = ST_Centroid(geom),
area2163 = ST_area( ST_transform(geom, 2163) ) / 1000000.0
WHERE begin_ts = %s or area2163 is null
""",
(valid,),
)
LOG.info(
"Updated simple_geom,centroid,area2163 for %s rows", cursor.rowcount
)
# Check the last step that we don't have empty geoms, which happened once
def _check():
"""Do the check."""
cursor.execute(
"""
SELECT end_ts from ugcs
where begin_ts = %s and (
simple_geom is null or
ST_IsEmpty(simple_geom) or
ST_Area(simple_geom) / ST_Area(geom) < 0.9
)
""",
(valid,),
)
_check()
if cursor.rowcount > 0:
LOG.info(
"%s rows with empty, too small simple_geom, decreasing tolerance",
cursor.rowcount,
)
cursor.execute(
"""
UPDATE ugcs
SET simple_geom = ST_Multi(
ST_Buffer(ST_SnapToGrid(geom, 0.0001), 0)
)
WHERE begin_ts = %s and (
simple_geom is null or
ST_IsEmpty(simple_geom) or
ST_Area(simple_geom) / ST_Area(geom) < 0.9
)
""",
(valid,),
)
_check()
if cursor.rowcount > 0:
LOG.info(
"Found %s rows with empty simple_geom, FIXME SOMEHOW!",
cursor.rowcount,
)
def truncate(cursor, valid, ugc, source):
"""Stop the bleeding."""
cursor.execute(
"UPDATE ugcs SET end_ts = %s WHERE ugc = %s and end_ts is null "
"and source = %s",
(valid, ugc, source),
)
return cursor.rowcount
def workflow(argv, pgconn, cursor):
"""Go Main Go"""
# NWS correspondence indicates the date on the website is assumed to be
# an implementation time at 18 z of that date.
valid = utc(int(argv[2]), int(argv[3]), int(argv[4]), 18)
zipfn = "%s.zip" % (argv[1],)
shpfn = do_download(zipfn)
# track domain
source = zipfn[:2].replace("_", "")
LOG.info("Processing, using '%s' as the database source", source)
df = gpd.read_file(shpfn)
# Ensure CRS is set
df["geometry"] = df["geometry"].set_crs("EPSG:4326", allow_override=True)
if df.empty:
LOG.info("Abort, empty dataframe from shapefile read.")
sys.exit()
# make all columns upper
df.columns = [x.upper() if x != "geometry" else x for x in df.columns]
# Compute the ugc column
if zipfn[:2] in ("mz", "oz", "hz"):
df["STATE"] = ""
df["ugc"] = df["ID"]
wfocol = "WFO"
elif zipfn.startswith("c_"):
geo_type = "C"
df["ugc"] = df["STATE"] + geo_type + df["FIPS"].str.slice(-3)
df["NAME"] = df["COUNTYNAME"]
wfocol = "CWA"
else:
geo_type = "Z"
df["ugc"] = df["STATE"] + geo_type + df["ZONE"]
wfocol = "CWA"
# Check that UGCs are not all null
if df["ugc"].isna().all():
LOG.info("Abort as all ugcs are null")
sys.exit()
postgis = gpd.read_postgis(
"SELECT * from ugcs where end_ts is null and source = %s",
pgconn,
params=(source,),
geom_col="geom",
index_col="ugc",
)
postgis["covered"] = False
LOG.info(
"Loaded %s '%s' type rows from the database",
len(postgis.index),
source,
)
# Compute the area and then sort to order duplicated UGCs :/
# Database stores as sq km
df["area2163"] = df["geometry"].to_crs(2163).area / 1e6
df.sort_values(by="area2163", ascending=False, inplace=True)
gdf = df.groupby("ugc").nth(0)
LOG.info(
"Loaded %s/%s unique entries from %s",
len(gdf.index),
len(df.index),
shpfn,
)
countnew = 0
countdups = 0
for ugc, row in gdf.iterrows():
if ugc in postgis.index:
postgis.at[ugc, "covered"] = True
# Some very small number, good enough
current = postgis.loc[ugc]
if isinstance(current, gpd.GeoDataFrame):
LOG.info("abort, more than one %s found in postgis", ugc)
sys.exit()
dirty = False
# arb size decision
if abs(row["area2163"] - current["area2163"]) > 0.2:
dirty = True
LOG.debug(
"%s updating sz diff %.2d -> %.2d",
ugc,
current["area2163"],
row["area2163"],
)
elif row["NAME"] != current["name"]:
dirty = True
LOG.debug(
"%s updating due to name change %s -> %s",
ugc,
current["name"],
row["NAME"],
)
elif row[wfocol] != current["wfo"]:
dirty = True
LOG.debug(
"%s updating due to wfo change %s -> %s",
ugc,
current["wfo"],
row[wfocol],
)
if not dirty:
countdups += 1
continue
res = truncate(cursor, valid, ugc, source)
LOG.info(
"%s creating new entry for %s",
"Truncating old" if res > 0 else "",
ugc,
)
# Finally, insert the new geometry
cursor.execute(
"INSERT into ugcs (ugc, name, state, begin_ts, wfo, geom, "
"source) VALUES (%s, %s, %s, %s, %s, "
"ST_Multi(ST_SetSRID(ST_GeomFromEWKT(%s),4326)), %s)",
(
ugc,
row["NAME"].strip(),
row["STATE"],
"1980-01-01" if res == 0 else valid,
row[wfocol],
new_poly(row["geometry"]).wkt,
source,
),
)
countnew += 1
for ugc, _row in postgis[~postgis["covered"]].iterrows():
LOG.info("%s not found in update, truncating.", ugc)
truncate(cursor, valid, ugc, source)
LOG.info("NEW: %s Dups: %s", countnew, countdups)
db_fixes(cursor, valid)
def main(argv):
"""Go Main Go"""
if len(argv) != 5:
LOG.info("ERROR: You need to specify the file date to download + date")
LOG.info("Example: python ugcs_update.py z_01dec10 2010 12 01")
sys.exit(0)
pgconn = get_sync_dbconn("postgis")
cursor = pgconn.cursor()
workflow(argv, pgconn, cursor)
cursor.close()
pgconn.commit()
pgconn.close()
LOG.info("Done!")
if __name__ == "__main__":
# Get the name of the file we wish to download
main(sys.argv)
|
python
|
import requests
import logging
from lxml import html
class HTDownloader():
def __init__(self, htid, res, i):
self.htid = htid
self.i = i
self.res = res
def get(self):
logging.debug("Download image: {}".format(self.i))
return down_img(self.htid, self.i, self.res)
def get_hathi_section_element(hid):
url = "https://babel.hathitrust.org/cgi/pt?id=" + hid
req = requests.get(url)
tree = html.fromstring(req.content)
return tree.xpath('.//section[@id="section"]')[0]
def down_img(htid, i, res):
rot = 0
url = "https://babel.hathitrust.org/cgi/imgsrv/image?id={htid};seq={seq};size={res};rotation={rot}".format(
htid=htid, seq=i, res=res, rot=rot)
req = requests.get(url)
content_type = req.headers['content-type']
logging.debug("Page {}: {}".format(i, content_type))
return req.content, content_type
def dl_images(htid, res=10000):
section_elem = get_hathi_section_element(htid)
maxseq = int(section_elem.attrib['data-total-seq'])
logging.debug("Num pages: {}".format(maxseq))
for i in range(1, maxseq + 1):
yield htid, i, HTDownloader(htid, res, i)
|
python
|
import asyncio
from contextlib import asynccontextmanager
from sys import version_info
from typing import AsyncIterator
import pytest
from aioredis import create_redis_pool
from aiohttp_client_cache.backends.redis import DEFAULT_ADDRESS, RedisBackend, RedisCache
from aiohttp_client_cache.session import CachedSession
from test.integration import BaseBackendTest, BaseStorageTest
def is_db_running():
"""Test if a Redis server is running locally on the default port"""
async def get_db_info():
client = await create_redis_pool(DEFAULT_ADDRESS)
await client.info()
client.close()
await client.wait_closed()
try:
asyncio.run(get_db_info())
return True
except OSError:
return False
pytestmark = [
pytest.mark.asyncio,
pytest.mark.skipif(
version_info >= (3, 10) or not is_db_running(),
reason='Redis server required for integration tests',
),
]
class TestRedisCache(BaseStorageTest):
storage_class = RedisCache
picklable = True
class TestRedisBackend(BaseBackendTest):
backend_class = RedisBackend
@asynccontextmanager
async def init_session(self, **kwargs) -> AsyncIterator[CachedSession]:
async with super().init_session(**kwargs) as session:
yield session
await session.cache.close()
|
python
|
from django.contrib import admin
from .models import Book, Author, Publisher, Loaned
# Register your models here.
class BookAdmin(admin.ModelAdmin):
list_display = ('name', 'date_added')
search_fields = ["name"]
ordering = ["name"]
admin.site.register(Book, BookAdmin)
admin.site.register(Author)
admin.site.register(Publisher)
admin.site.register(Loaned)
|
python
|
from .manage import *
|
python
|
#!/usr/bin/env python
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the protocol parsing logic."""
from __future__ import absolute_import, division, print_function
import json
import unittest
import avro.protocol
import avro.schema
try:
unicode
except NameError:
unicode = str
try:
basestring # type: ignore
except NameError:
basestring = (bytes, unicode)
class TestProtocol(object):
"""A proxy for a protocol string that provides useful test metadata."""
def __init__(self, data, name='', comment=''):
if not isinstance(data, basestring):
data = json.dumps(data)
self.data = data
self.name = name or data
self.comment = comment
def parse(self):
return avro.protocol.parse(str(self))
def __str__(self):
return str(self.data)
class ValidTestProtocol(TestProtocol):
"""A proxy for a valid protocol string that provides useful test metadata."""
valid = True
class InvalidTestProtocol(TestProtocol):
"""A proxy for an invalid protocol string that provides useful test metadata."""
valid = False
HELLO_WORLD = ValidTestProtocol({
"namespace": "com.acme",
"protocol": "HelloWorld",
"types": [
{"name": "Greeting", "type": "record", "fields": [
{"name": "message", "type": "string"}]},
{"name": "Curse", "type": "error", "fields": [
{"name": "message", "type": "string"}]}
],
"messages": {
"hello": {
"request": [{"name": "greeting", "type": "Greeting" }],
"response": "Greeting",
"errors": ["Curse"]
}
}
})
EXAMPLES = [HELLO_WORLD, ValidTestProtocol({
"namespace": "org.apache.avro.test",
"protocol": "Simple",
"types": [
{"name": "Kind", "type": "enum", "symbols": ["FOO","BAR","BAZ"]},
{"name": "MD5", "type": "fixed", "size": 16},
{"name": "TestRecord", "type": "record", "fields": [
{"name": "name", "type": "string", "order": "ignore"},
{"name": "kind", "type": "Kind", "order": "descending"},
{"name": "hash", "type": "MD5"}
]},
{"name": "TestError", "type": "error", "fields": [{"name": "message", "type": "string"}]}
],
"messages": {
"hello": {
"request": [{"name": "greeting", "type": "string"}],
"response": "string"
}, "echo": {
"request": [{"name": "record", "type": "TestRecord"}],
"response": "TestRecord"
}, "add": {
"request": [{"name": "arg1", "type": "int"}, {"name": "arg2", "type": "int"}],
"response": "int"
}, "echoBytes": {
"request": [{"name": "data", "type": "bytes"}],
"response": "bytes"
}, "error": {
"request": [],
"response": "null",
"errors": ["TestError"]
}
}
}), ValidTestProtocol({
"namespace": "org.apache.avro.test.namespace",
"protocol": "TestNamespace",
"types": [
{"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
{"name": "TestRecord", "type": "record", "fields": [
{"name": "hash", "type": "org.apache.avro.test.util.MD5"}
]},
{"name": "TestError", "namespace": "org.apache.avro.test.errors", "type": "error",
"fields": [ {"name": "message", "type": "string"}]}
],
"messages": {
"echo": {
"request": [{"name": "record", "type": "TestRecord"}],
"response": "TestRecord"
}, "error": {
"request": [],
"response": "null",
"errors": ["org.apache.avro.test.errors.TestError"]
}
}
}), ValidTestProtocol({
"namespace": "org.apache.avro.test.namespace",
"protocol": "TestImplicitNamespace",
"types": [
{"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
{"name": "ReferencedRecord", "type": "record",
"fields": [ {"name": "foo", "type": "string"}]},
{"name": "TestRecord", "type": "record",
"fields": [{"name": "hash", "type": "org.apache.avro.test.util.MD5"},
{"name": "unqualified", "type": "ReferencedRecord"}]
},
{"name": "TestError", "type": "error", "fields": [{"name": "message", "type": "string"}]}
],
"messages": {
"echo": {
"request": [{"name": "qualified", "type": "org.apache.avro.test.namespace.TestRecord"}],
"response": "TestRecord"
}, "error": {
"request": [],
"response": "null",
"errors": ["org.apache.avro.test.namespace.TestError"]
}
}
}), ValidTestProtocol({
"namespace": "org.apache.avro.test.namespace",
"protocol": "TestNamespaceTwo",
"types": [
{"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
{"name": "ReferencedRecord", "type": "record",
"namespace": "org.apache.avro.other.namespace",
"fields": [{"name": "foo", "type": "string"}]},
{"name": "TestRecord", "type": "record",
"fields": [{"name": "hash", "type": "org.apache.avro.test.util.MD5"},
{"name": "qualified",
"type": "org.apache.avro.other.namespace.ReferencedRecord"}]
},
{"name": "TestError",
"type": "error", "fields": [{"name": "message", "type": "string"}]}],
"messages": {
"echo": {
"request": [{"name": "qualified", "type": "org.apache.avro.test.namespace.TestRecord"}],
"response": "TestRecord"
}, "error": {
"request": [],
"response": "null",
"errors": ["org.apache.avro.test.namespace.TestError"]
}
}
}), ValidTestProtocol({
"namespace": "org.apache.avro.test.namespace",
"protocol": "TestValidRepeatedName",
"types": [
{"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
{"name": "ReferencedRecord", "type": "record",
"namespace": "org.apache.avro.other.namespace",
"fields": [{"name": "foo", "type": "string"}]},
{"name": "ReferencedRecord", "type": "record",
"fields": [{"name": "bar", "type": "double"}]},
{"name": "TestError",
"type": "error", "fields": [{"name": "message", "type": "string"}]}],
"messages": {
"echo": {
"request": [{"name": "qualified", "type": "ReferencedRecord"}],
"response": "org.apache.avro.other.namespace.ReferencedRecord"},
"error": {
"request": [],
"response": "null",
"errors": ["org.apache.avro.test.namespace.TestError"]}
}
}), InvalidTestProtocol({
"namespace": "org.apache.avro.test.namespace",
"protocol": "TestInvalidRepeatedName",
"types": [
{"name": "org.apache.avro.test.util.MD5", "type": "fixed", "size": 16},
{"name": "ReferencedRecord", "type": "record",
"fields": [ {"name": "foo", "type": "string"}]},
{"name": "ReferencedRecord", "type": "record",
"fields": [ {"name": "bar", "type": "double"}]},
{"name": "TestError",
"type": "error", "fields": [{"name": "message", "type": "string"}]}],
"messages": {
"echo": {
"request": [{"name": "qualified", "type": "ReferencedRecord"}],
"response": "org.apache.avro.other.namespace.ReferencedRecord"
}, "error": {
"request": [],
"response": "null",
"errors": ["org.apache.avro.test.namespace.TestError"]
}
}
}),
ValidTestProtocol({
"namespace": "org.apache.avro.test",
"protocol": "BulkData",
"types": [],
"messages": {
"read": {
"request": [],
"response": "bytes"
}, "write": {
"request": [ {"name": "data", "type": "bytes"} ],
"response": "null"
}
}
}), ValidTestProtocol({
"protocol": "API",
"namespace": "xyz.api",
"types": [{
"type": "enum",
"name": "Symbology",
"namespace": "xyz.api.product",
"symbols": ["OPRA", "CUSIP", "ISIN", "SEDOL"]
}, {
"type": "record",
"name": "Symbol",
"namespace": "xyz.api.product",
"fields": [{"name": "symbology", "type": "xyz.api.product.Symbology"},
{"name": "symbol", "type": "string"}]
}, {
"type": "record",
"name": "MultiSymbol",
"namespace": "xyz.api.product",
"fields": [{"name": "symbols",
"type": {"type": "map", "values": "xyz.api.product.Symbol"}}]
}],
"messages": {}
}),
]
VALID_EXAMPLES = [e for e in EXAMPLES if e.valid]
class TestMisc(unittest.TestCase):
def test_inner_namespace_set(self):
print('')
print('TEST INNER NAMESPACE')
print('===================')
print('')
proto = HELLO_WORLD.parse()
self.assertEqual(proto.namespace, "com.acme")
greeting_type = proto.types_dict['Greeting']
self.assertEqual(greeting_type.namespace, 'com.acme')
def test_inner_namespace_not_rendered(self):
proto = HELLO_WORLD.parse()
self.assertEqual('com.acme.Greeting', proto.types[0].fullname)
self.assertEqual('Greeting', proto.types[0].name)
# but there shouldn't be 'namespace' rendered to json on the inner type
self.assertFalse('namespace' in proto.to_json()['types'][0])
class ProtocolParseTestCase(unittest.TestCase):
"""Enable generating parse test cases over all the valid and invalid example protocols."""
def __init__(self, test_proto):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super(ProtocolParseTestCase, self).__init__(
'parse_valid' if test_proto.valid else 'parse_invalid')
self.test_proto = test_proto
def parse_valid(self):
"""Parsing a valid protocol should not error."""
try:
self.test_proto.parse()
except avro.protocol.ProtocolParseException:
self.fail("Valid protocol failed to parse: {!s}".format(self.test_proto))
def parse_invalid(self):
"""Parsing an invalid schema should error."""
try:
self.test_proto.parse()
except (avro.protocol.ProtocolParseException, avro.schema.SchemaParseException):
pass
else:
self.fail("Invalid protocol should not have parsed: {!s}".format(self.test_proto))
class ErrorSchemaTestCase(unittest.TestCase):
"""Enable generating error schema test cases across all the valid test protocols."""
def __init__(self, test_proto):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super(ErrorSchemaTestCase, self).__init__('check_error_schema_exists')
self.test_proto = test_proto
def check_error_schema_exists(self):
"""Protocol messages should always have at least a string error schema."""
p = self.test_proto.parse()
for k, m in p.messages.items():
self.assertIsNotNone(m.errors, "Message {} did not have the expected implicit "
"string error schema.".format(k))
class RoundTripParseTestCase(unittest.TestCase):
"""Enable generating round-trip parse test cases over all the valid test protocols."""
def __init__(self, test_proto):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super(RoundTripParseTestCase, self).__init__('parse_round_trip')
self.test_proto = test_proto
def parse_round_trip(self):
"""The string of a Schema should be parseable to the same Schema."""
parsed = self.test_proto.parse()
round_trip = avro.protocol.parse(str(parsed))
self.assertEqual(parsed, round_trip)
def load_tests(loader, default_tests, pattern):
"""Generate test cases across many test schema."""
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromTestCase(TestMisc))
suite.addTests(ProtocolParseTestCase(ex) for ex in EXAMPLES)
suite.addTests(RoundTripParseTestCase(ex) for ex in VALID_EXAMPLES)
return suite
if __name__ == '__main__':
unittest.main()
|
python
|
import blocksci
import re
import sys
chain = blocksci.Blockchain("/home/hturki/bitcoin-blocksci.bak")
address_file = open("/home/hturki/stackoverflow_addr_raw.txt", "r").read()
addresses = address_file[1:-1].split("', '")
len(addresses)
blocksci_addresses = {}
bad_addresses = set({addresses[10], addresses[18], addresses[85], addresses[204], addresses[298], addresses[302], addresses[314], addresses[340], addresses[393], addresses[500], addresses[549], addresses[715], addresses[729], addresses[736], addresses[776], addresses[1033], addresses[1131], addresses[1136], addresses[1186]})
for address in addresses:
if len(address) != 34 or re.match(r"[a-zA-Z1-9]{27,35}$", address) is None:
print("%s not an address" % address)
elif (address in bad_addresses):
print("%s makes BlockSci segfault" % address)
else:
blocksci_addresses[address] = (blocksci.Address.from_string(address))
print("%s parsed correctly" % address)
sys.stdout.flush()
prefix = len("address_type.")
for address in blocksci_addresses:
blocksci_address = blocksci_addresses[address]
if (blocksci_address != None):
print("%s,%d,%s" % (address, blocksci_address.address_num, str(blocksci_address.type)[prefix:]))
|
python
|
#!/usr/bin/env python
'''
We get the lidar point cloud and use it to determine if there are any obstacles ahead
Author:
Sleiman Safaoui
Email:
[email protected]
Github:
The-SS
Date:
Oct 3, 2018
'''
# python
from __future__ import print_function
import numpy as np
import copy
import math
from numpy import pi
# ROS
import rospy
from sensor_msgs.msg import LaserScan
class ScanSub:
'''
Subscribes to the lidar laser scan topic
'''
def __init__(self):
self.scan_data = []
self.scan_sub = rospy.Subscriber("/scan", LaserScan, self.callback, queue_size =1)
def callback(self, data):
self.scan_data = data
def get_scan(self):
return self.scan_data
class ScanDetect:
'''
Uses the obtained laser scan to determine if there are any obstacles ahead
'''
def __init__(self, ang_range = 40.):
self.ang_range = ang_range #math.radians(ang_range) # range of angles to sweep in radian(about forward)
#self.ang_min = -float(self.ang_range)/2.0 # lower bound for ang_range
#self.ang_max = +float(self.ang_range)/2.0 # upper bound for ang_range
self.scan = [] # scan data
self.detected_points = [] # ranges detected in the area to scan
self.detected_points_ang = [] # angles of points detected in the area to scan
def scan_area(self, scan):
if scan == []: # no data
return [],[]
self.scan = scan
self.detected_points = [] # reset detected points
self.detected_points_ang = [] # reset detected points
if (scan.angle_min == scan.angle_max): # no lidar data
print("Lidar data invalid")
return [],[]
if (self.ang_range > 350):
self.detected_points = scan.ranges
self.detected_points_ang = np.arange(0, 360, scan.angle_increment).tolist()
return self.detected_points, self.detected_points_ang
half_ang = float(self.ang_range)/2.0
print(half_ang)
first_half_end = 0.0 + half_ang # first half angle interval: 0 --> first_half end
print(first_half_end)
second_half_start = math.degrees(2 * pi) - half_ang # second half angle interval: second_half_start --> 2*PI
print(second_half_start)
first_half_cnt = math.floor((first_half_end - 0.0) / math.degrees(scan.angle_increment)) + 1 # number of angle increments in first half
second_half_cnt = math.floor((math.degrees(2* pi) - second_half_start) / math.degrees(scan.angle_increment)) # number of angle increments in second half
if (len(scan.ranges) < (first_half_cnt + second_half_cnt)):
print ("Invalid increment count")
return [], []
for i in range(0, int(first_half_cnt)):
print(i)
self.detected_points.append(scan.ranges[i])
self.detected_points_ang.append(i * scan.angle_increment)
for i in range(int(math.ceil(second_half_start)), int(math.ceil(second_half_start) + second_half_cnt)):
print(i)
print(int(math.ceil(second_half_start)))
print(int(math.ceil(second_half_start) + second_half_cnt))
self.detected_points.append(scan.ranges[i])
self.detected_points_ang.append(i * scan.angle_increment)
return self.detected_points, self.detected_points_ang
'''
ang_min = self.ang_min
ang_max = self.ang_max
if (ang_min < scan.angle_min):
ang_min_idx = 0
ang_min = scan.angle_min
else:
ang_min_idx = math.ceil((ang_min-scan.angle_min)/scan.angle_increment) + 1 # number of increments between the lidar min. angle and the desired min. angle
ang_min = ang_min_idx * scan.angle_increment
if (ang_max > scan.angle_max):
ang_max_idx = len(scan.ranges)
ang_max = scan.angle_max
else:
ang_max_idx = math.floor((ang_max-scan.angle_min)/scan.angle_increment) + 1 # number of increments between the lidar min. angle and the desired max. angle
ang_max = ang_max_idx * scan.angle_increment
if ang_min_idx > ang_max_idx:
return [],[]
for i in range(int(ang_min_idx), int(ang_max_idx)+1):
self.detected_points.append(scan.ranges[i])
self.detected_points_ang.append(scan.angle_min + i * math.degrees(scan.angle_increment))
#return self.detected_points, self.detected_points_ang
return scan.ranges, []
'''
class ScanPub:
'''
Publishes data about lidar detection
'''
def __init__(self):
self.pub_data = 0.0
# self.scan_pub = rospy.
def main():
rospy.init_node("lidar_detect")
rate = rospy.Rate(15)
nodename = "/lidar_detect"
old_seq = -1
# Initialize nodes
scan_sub = ScanSub()
scan_detect = ScanDetect()
scan_pub = ScanPub()
while not rospy.is_shutdown():
scan = scan_sub.get_scan() #get laser scan
if (scan != []): # if scan was obtained
if (scan.header.seq != old_seq): # new data obtained
old_seq = scan.header.seq
#detect using scan
dists, angs = scan_detect.scan_area(scan)
print ('dists', dists)
print ('angs', angs)
# publish result data
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException as e:
rospy.logfatal("ROS interrupt. Shutting down lidar_detect node")
print (e)
pass
|
python
|
from rdflib import Literal
from .namespaces import BRICK, TAG, OWL
parameter_definitions = {
"Parameter": {
"tags": [TAG.Point, TAG.Parameter],
"subclasses": {
"Delay_Parameter": {
"tags": [TAG.Point, TAG.Delay, TAG.Parameter],
"subclasses": {
"Alarm_Delay_Parameter": {
"tags": [TAG.Point, TAG.Alarm, TAG.Delay, TAG.Parameter],
},
},
},
"Humidity_Parameter": {
"tags": [TAG.Point, TAG.Humidity, TAG.Parameter],
"subclasses": {
"High_Humidity_Alarm_Parameter": {
"tags": [
TAG.Point,
TAG.High,
TAG.Humidity,
TAG.Alarm,
TAG.Parameter,
],
},
"Low_Humidity_Alarm_Parameter": {
"tags": [
TAG.Point,
TAG.Low,
TAG.Humidity,
TAG.Alarm,
TAG.Parameter,
],
},
},
},
"Load_Parameter": {
"tags": [TAG.Point, TAG.Load, TAG.Parameter],
"subclasses": {
"Max_Load_Setpoint": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Load,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Temperature_Parameter": {
"tags": [TAG.Point, TAG.Temperature, TAG.Parameter],
"subclasses": {
"High_Temperature_Alarm_Parameter": {
"tags": [
TAG.Point,
TAG.High,
TAG.Temperature,
TAG.Alarm,
TAG.Parameter,
],
},
"Low_Temperature_Alarm_Parameter": {
"tags": [
TAG.Point,
TAG.Low,
TAG.Temperature,
TAG.Alarm,
TAG.Parameter,
],
},
"Low_Freeze_Protect_Temperature_Parameter": {
"tags": [
TAG.Point,
TAG.Low,
TAG.Freeze,
TAG.Protect,
TAG.Temperature,
TAG.Parameter,
],
},
"Lockout_Temperature_Differential_Parameter": {
"tags": [
TAG.Point,
TAG.Lockout,
TAG.Temperature,
TAG.Differential,
TAG.Sensor,
],
"subclasses": {
"Outside_Air_Lockout_Temperature_Differential_Parameter": {
"tags": [
TAG.Point,
TAG.Outside,
TAG.Air,
TAG.Lockout,
TAG.Temperature,
TAG.Differential,
TAG.Parameter,
],
"subclasses": {
"Low_Outside_Air_Lockout_Temperature_Differential_Parameter": {
"tags": [
TAG.Point,
TAG.Low,
TAG.Outside,
TAG.Air,
TAG.Lockout,
TAG.Temperature,
TAG.Differential,
TAG.Parameter,
],
},
"High_Outside_Air_Lockout_Temperature_Differential_Parameter": {
"tags": [
TAG.Point,
TAG.High,
TAG.Outside,
TAG.Air,
TAG.Lockout,
TAG.Temperature,
TAG.Differential,
TAG.Parameter,
],
},
},
},
},
},
},
},
"PID_Parameter": {
"tags": [TAG.Point, TAG.Parameter, TAG.PID],
"subclasses": {
"Gain_Parameter": {
"tags": [TAG.Point, TAG.Parameter, TAG.PID, TAG.Gain],
"subclasses": {
"Integral_Gain_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Gain,
TAG.Integral,
],
"subclasses": {
"Supply_Air_Integral_Gain_Parameter": {
"tags": [
TAG.Point,
TAG.Supply,
TAG.Air,
TAG.Integral,
TAG.Gain,
TAG.Parameter,
TAG.PID,
],
}
},
},
"Proportional_Gain_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Gain,
TAG.Proportional,
],
"subclasses": {
"Supply_Air_Proportional_Gain_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Gain,
TAG.Proportional,
TAG.Supply,
TAG.Air,
],
},
},
},
"Derivative_Gain_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Gain,
TAG.Derivative,
],
},
},
},
"Step_Parameter": {
"tags": [TAG.Point, TAG.Parameter, TAG.Step],
"subclasses": {
"Differential_Pressure_Step_Parameter": {
"subclasses": {
"Chilled_Water_Differential_Pressure_Step_Parameter": {
"tags": [
TAG.Point,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Step,
TAG.Parameter,
],
}
},
"tags": [
TAG.Point,
TAG.Differential,
TAG.Pressure,
TAG.Step,
TAG.Parameter,
],
},
"Static_Pressure_Step_Parameter": {
"subclasses": {
"Air_Static_Pressure_Step_Parameter": {
"tags": [
TAG.Point,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Step,
TAG.Parameter,
],
"subclasses": {
"Discharge_Air_Static_Pressure_Step_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Step,
TAG.Parameter,
],
},
},
}
},
"tags": [
TAG.Point,
TAG.Static,
TAG.Pressure,
TAG.Step,
TAG.Parameter,
],
},
"Temperature_Step_Parameter": {
"subclasses": {
"Air_Temperature_Step_Parameter": {
"tags": [
TAG.Point,
TAG.Air,
TAG.Temperature,
TAG.Step,
TAG.Parameter,
],
"subclasses": {
"Discharge_Air_Temperature_Step_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Step,
TAG.Parameter,
],
},
"Supply_Air_Temperature_Step_Parameter": {
OWL.equivalentClass: BRICK[
"Discharge_Air_Temperature_Step_Parameter"
],
"tags": [
TAG.Point,
TAG.Supply,
TAG.Air,
TAG.Temperature,
TAG.Step,
TAG.Parameter,
],
},
},
}
},
"parents": [BRICK.Temperature_Parameter],
"tags": [
TAG.Point,
TAG.Temperature,
TAG.Step,
TAG.Parameter,
],
},
},
},
"Time_Parameter": {
"tags": [TAG.Point, TAG.Parameter, TAG.Time],
"subclasses": {
"Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Time,
TAG.Integral,
],
"subclasses": {
"Air_Temperature_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Air,
TAG.Temperature,
TAG.Parameter,
TAG.PID,
TAG.Time,
TAG.Integral,
],
"parents": [BRICK.Temperature_Parameter],
"subclasses": {
"Cooling_Discharge_Air_Temperature_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Cooling_Supply_Air_Temperature_Integral_Time_Parameter": {
OWL.equivalentClass: BRICK[
"Cooling_Discharge_Air_Temperature_Integral_Time_Parameter"
],
"tags": [
TAG.Point,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Temperature,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Heating_Discharge_Air_Temperature_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Heating_Supply_Air_Temperature_Integral_Time_Parameter": {
OWL.equivalentClass: BRICK[
"Heating_Discharge_Air_Temperature_Integral_Time_Parameter"
],
"tags": [
TAG.Point,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Temperature,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Differential_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Differential,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
"subclasses": {
"Hot_Water_Differential_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Hot,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Chilled_Water_Differential_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Discharge_Water_Differential_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Supply_Water_Differential_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Supply,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Exhaust_Air_Flow_Integral_Time_Parameter": {
"subclasses": {
"Exhaust_Air_Stack_Flow_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Exhaust,
TAG.Air,
TAG.Stack,
TAG.Flow,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
}
},
"tags": [
TAG.Point,
TAG.Exhaust,
TAG.Air,
TAG.Flow,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Static_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Static,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
"subclasses": {
"Discharge_Air_Static_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Supply_Air_Static_Pressure_Integral_Time_Parameter": {
OWL.equivalentClass: BRICK[
"Discharge_Air_Static_Pressure_Integral_Time_Parameter"
],
"tags": [
TAG.Point,
TAG.Supply,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Supply_Water_Differential_Pressure_Integral_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Supply,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
"Supply_Water_Temperature_Integral_Time_Parameter": {
"parents": [BRICK.Temperature_Parameter],
"tags": [
TAG.Point,
TAG.Supply,
TAG.Water,
TAG.Temperature,
TAG.Integral,
TAG.Time,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Derivative_Time_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Time,
TAG.Derivative,
],
},
},
},
"Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Parameter,
TAG.PID,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
"subclasses": {
"Differential_Pressure_Proportional_Band": {
"tags": [
TAG.Point,
TAG.Differential,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.PID,
],
"subclasses": {
"Hot_Water_Differential_Pressure_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Hot,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Chilled_Water_Differential_Pressure_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Discharge_Water_Differential_Pressure_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Supply_Water_Differential_Pressure_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Supply,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Discharge_Air_Temperature_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
"parents": [BRICK.Temperature_Parameter],
"subclasses": {
"Heating_Discharge_Air_Temperature_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Cooling_Discharge_Air_Temperature_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Supply_Air_Temperature_Proportional_Band_Parameter": {
OWL.equivalentClass: BRICK[
"Discharge_Air_Temperature_Proportional_Band_Parameter"
],
"tags": [
TAG.Point,
TAG.Supply,
TAG.Air,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
"parents": [BRICK.Temperature_Parameter],
"subclasses": {
"Cooling_Supply_Air_Temperature_Proportional_Band_Parameter": {
OWL.equivalentClass: BRICK[
"Cooling_Discharge_Air_Temperature_Proportional_Band_Parameter"
],
"tags": [
TAG.Point,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Heating_Supply_Air_Temperature_Proportional_Band_Parameter": {
OWL.equivalentClass: BRICK[
"Heating_Discharge_Air_Temperature_Proportional_Band_Parameter"
],
"tags": [
TAG.Point,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Exhaust_Air_Flow_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Exhaust,
TAG.Air,
TAG.Flow,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
"subclasses": {
"Exhaust_Air_Stack_Flow_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Exhaust,
TAG.Air,
TAG.Stack,
TAG.Flow,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
},
},
"Static_Pressure_Proportional_Band_Parameter": {
"subclasses": {
"Discharge_Air_Static_Pressure_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Exhaust_Air_Static_Pressure_Proportional_Band_Parameter": {
"tags": [
TAG.Point,
TAG.Exhaust,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Supply_Air_Static_Pressure_Proportional_Band_Parameter": {
OWL.equivalentClass: BRICK[
"Discharge_Air_Static_Pressure_Proportional_Band_Parameter"
],
"tags": [
TAG.Point,
TAG.Supply,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
},
"tags": [
TAG.Point,
TAG.Static,
TAG.Pressure,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Supply_Water_Temperature_Proportional_Band_Parameter": {
"parents": [BRICK.Temperature_Parameter],
"tags": [
TAG.Point,
TAG.Supply,
TAG.Water,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
"Discharge_Water_Temperature_Proportional_Band_Parameter": {
"parents": [BRICK.Temperature_Parameter],
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Water,
TAG.Temperature,
TAG.Proportional,
TAG.Band,
TAG.Parameter,
TAG.PID,
],
},
},
},
},
},
"Tolerance_Parameter": {
"tags": [TAG.Point, TAG.Tolerance, TAG.Parameter],
"subclasses": {
"Humidity_Tolerance_Parameter": {
"tags": [TAG.Point, TAG.Tolerance, TAG.Parameter, TAG.Humidity],
"parents": [BRICK.Humidity_Parameter],
},
"Temperature_Tolerance_Parameter": {
"parents": [BRICK.Temperature_Parameter],
"tags": [
TAG.Point,
TAG.Tolerance,
TAG.Parameter,
TAG.Temperature,
],
},
},
},
"Limit": {
"tags": [TAG.Point, TAG.Parameter, TAG.Limit],
"subclasses": {
"Close_Limit": {
"tags": [TAG.Point, TAG.Close, TAG.Parameter, TAG.Limit],
},
"Speed_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Speed,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Speed_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Speed,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"parents": [BRICK.Max_Limit],
},
"Min_Speed_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Speed,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"parents": [BRICK.Min_Limit],
},
},
},
"Air_Temperature_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Air,
TAG.Temperature,
TAG.Limit,
TAG.Setpoint,
],
"parents": [BRICK.Temperature_Parameter],
"subclasses": {
"Discharge_Air_Temperature_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Limit,
TAG.Setpoint,
],
"subclasses": {
"Max_Discharge_Air_Temperature_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Limit,
TAG.Setpoint,
],
"parents": [
BRICK.Max_Temperature_Setpoint_Limit
],
},
"Min_Discharge_Air_Temperature_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Discharge,
TAG.Air,
TAG.Temperature,
TAG.Limit,
TAG.Setpoint,
],
"parents": [
BRICK.Min_Temperature_Setpoint_Limit
],
},
},
},
},
},
"Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Current_Limit": {
"tags": [TAG.Point, TAG.Current, TAG.Limit, TAG.Parameter],
},
"Position_Limit": {
"tags": [TAG.Point, TAG.Position, TAG.Limit],
"subclasses": {
"Max_Position_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Position,
TAG.Limit,
TAG.Setpoint,
],
"parents": [BRICK.Max_Limit],
},
"Min_Position_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Position,
TAG.Limit,
TAG.Setpoint,
],
"parents": [BRICK.Min_Limit],
},
},
},
"Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Chilled_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Chilled_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Hot_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Hot,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Hot_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Hot,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Fresh_Air_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Fresh,
TAG.Air,
TAG.Limit,
TAG.Setpoint,
],
"subclasses": {
"Min_Fresh_Air_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Fresh,
TAG.Air,
TAG.Limit,
TAG.Setpoint,
],
"parents": [BRICK.Min_Limit],
},
},
},
"Ventilation_Air_Flow_Ratio_Limit": {
"tags": [
TAG.Point,
TAG.Ventilation,
TAG.Air,
TAG.Ratio,
TAG.Limit,
],
},
"Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"High_Static_Pressure_Cutout_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.High,
TAG.Static,
TAG.Pressure,
TAG.Cutout,
TAG.Limit,
TAG.Setpoint,
],
},
},
},
"Max_Limit": {
"tags": [TAG.Point, TAG.Max, TAG.Limit, TAG.Parameter],
"subclasses": {
"Max_Speed_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Speed,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Discharge_Air_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Supply_Air_Static_Pressure_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Discharge_Air_Static_Pressure_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Supply,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Chilled_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Hot_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Hot,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Discharge_Air_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Supply_Air_Static_Pressure_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Discharge_Air_Static_Pressure_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Supply,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Max_Temperature_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Temperature,
TAG.Limit,
TAG.Setpoint,
],
"parents": [BRICK.Temperature_Parameter],
},
"Max_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Cooling_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Cooling_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Occupied_Cooling_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Occupied_Cooling_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Occupied,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Unoccupied_Cooling_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Unoccupied_Cooling_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Unoccupied,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Max_Cooling_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Occupied_Cooling_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Occupied,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Unoccupied_Cooling_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Unoccupied,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Max_Heating_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Heating_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Occupied_Heating_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Occupied_Heating_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Occupied,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Unoccupied_Heating_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Max_Unoccupied_Heating_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Max,
TAG.Unoccupied,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Max_Heating_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Max_Occupied_Heating_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Occupied,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Max_Unoccupied_Heating_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Max,
TAG.Unoccupied,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
},
},
},
},
"Min_Limit": {
"tags": [TAG.Point, TAG.Min, TAG.Limit, TAG.Parameter],
"subclasses": {
"Min_Speed_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Speed,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Hot_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Hot,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Chilled_Water_Differential_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Chilled,
TAG.Water,
TAG.Differential,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Discharge_Air_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Supply_Air_Static_Pressure_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Discharge_Air_Static_Pressure_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Supply,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Temperature_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Temperature,
TAG.Limit,
TAG.Setpoint,
],
"parents": [BRICK.Temperature_Parameter],
},
"Min_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Discharge_Air_Static_Pressure_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Discharge,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Supply_Air_Static_Pressure_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Discharge_Air_Static_Pressure_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Supply,
TAG.Air,
TAG.Static,
TAG.Pressure,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Min_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Outside_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Outside,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Cooling_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Cooling_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Occupied_Cooling_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Occupied_Cooling_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Occupied,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Unoccupied_Cooling_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Unoccupied_Cooling_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Unoccupied,
TAG.Cool,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Min_Cooling_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Occupied_Cooling_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Occupied,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Unoccupied_Cooling_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Unoccupied,
TAG.Cool,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Min_Heating_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Heating_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Occupied_Heating_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Occupied_Heating_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Occupied,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Unoccupied_Heating_Supply_Air_Flow_Setpoint_Limit": {
OWL.equivalentClass: BRICK[
"Min_Unoccupied_Heating_Discharge_Air_Flow_Setpoint_Limit"
],
"tags": [
TAG.Point,
TAG.Min,
TAG.Unoccupied,
TAG.Heat,
TAG.Supply,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
"Min_Heating_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
"subclasses": {
"Min_Occupied_Heating_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Occupied,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
"Min_Unoccupied_Heating_Discharge_Air_Flow_Setpoint_Limit": {
"tags": [
TAG.Point,
TAG.Min,
TAG.Unoccupied,
TAG.Heat,
TAG.Discharge,
TAG.Air,
TAG.Flow,
TAG.Limit,
TAG.Parameter,
TAG.Setpoint,
],
},
},
},
},
},
},
},
},
},
},
}
}
|
python
|
input = """
male(john). republican(john).
male(matt). republican(matt).
female(joana). republican(joana).
female(luise). democrat(luise).
moreMaleRepublicans :-
#count{X:republican(X), female(X)} < N,
#count{Y: republican(Y), male(Y)} = N.
"""
output = """
male(john). republican(john).
male(matt). republican(matt).
female(joana). republican(joana).
female(luise). democrat(luise).
moreMaleRepublicans :-
#count{X:republican(X), female(X)} < N,
#count{Y: republican(Y), male(Y)} = N.
"""
|
python
|
from auto_yolo import envs
from yolo_air_stage1 import durations, distributions, config
readme = "Running simple on addition task."
envs.run_experiment(
"addition-stage1", config, readme, alg="simple",
task="arithmetic2", durations=durations, distributions=distributions
)
|
python
|
from singledispatch import singledispatch
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import interfaces
from graphene import (ID, Boolean, Dynamic, Enum, Field, Float, Int, List,
String)
from graphene.types.json import JSONString
try:
from sqlalchemy_utils import ChoiceType, JSONType, ScalarListType, TSVectorType
except ImportError:
ChoiceType = JSONType = ScalarListType = TSVectorType = object
def get_column_doc(column):
return getattr(column, "doc", None)
def is_column_nullable(column):
return bool(getattr(column, "nullable", True))
def convert_sqlalchemy_relationship(relationship, registry, connection_field_factory):
direction = relationship.direction
model = relationship.mapper.entity
def dynamic_type():
_type = registry.get_type_for_model(model)
if not _type:
return None
if direction == interfaces.MANYTOONE or not relationship.uselist:
return Field(_type)
elif direction in (interfaces.ONETOMANY, interfaces.MANYTOMANY):
if _type._meta.connection:
return connection_field_factory(relationship, registry)
return Field(List(_type))
return Dynamic(dynamic_type)
def convert_sqlalchemy_hybrid_method(hybrid_item):
return String(description=getattr(hybrid_item, "__doc__", None), required=False)
def convert_sqlalchemy_composite(composite, registry):
converter = registry.get_converter_for_composite(composite.composite_class)
if not converter:
try:
raise Exception(
"Don't know how to convert the composite field %s (%s)"
% (composite, composite.composite_class)
)
except AttributeError:
# handle fields that are not attached to a class yet (don't have a parent)
raise Exception(
"Don't know how to convert the composite field %r (%s)"
% (composite, composite.composite_class)
)
return converter(composite, registry)
def _register_composite_class(cls, registry=None):
if registry is None:
from .registry import get_global_registry
registry = get_global_registry()
def inner(fn):
registry.register_composite_converter(cls, fn)
return inner
convert_sqlalchemy_composite.register = _register_composite_class
def convert_sqlalchemy_column(column, registry=None):
return convert_sqlalchemy_type(getattr(column, "type", None), column, registry)
@singledispatch
def convert_sqlalchemy_type(type, column, registry=None):
raise Exception(
"Don't know how to convert the SQLAlchemy field %s (%s)"
% (column, column.__class__)
)
@convert_sqlalchemy_type.register(types.Date)
@convert_sqlalchemy_type.register(types.Time)
@convert_sqlalchemy_type.register(types.String)
@convert_sqlalchemy_type.register(types.Text)
@convert_sqlalchemy_type.register(types.Unicode)
@convert_sqlalchemy_type.register(types.UnicodeText)
@convert_sqlalchemy_type.register(postgresql.UUID)
@convert_sqlalchemy_type.register(postgresql.INET)
@convert_sqlalchemy_type.register(postgresql.CIDR)
@convert_sqlalchemy_type.register(TSVectorType)
def convert_column_to_string(type, column, registry=None):
return String(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.DateTime)
def convert_column_to_datetime(type, column, registry=None):
from graphene.types.datetime import DateTime
return DateTime(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.SmallInteger)
@convert_sqlalchemy_type.register(types.Integer)
def convert_column_to_int_or_id(type, column, registry=None):
if column.primary_key:
return ID(
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
else:
return Int(
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
@convert_sqlalchemy_type.register(types.Boolean)
def convert_column_to_boolean(type, column, registry=None):
return Boolean(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.Float)
@convert_sqlalchemy_type.register(types.Numeric)
@convert_sqlalchemy_type.register(types.BigInteger)
def convert_column_to_float(type, column, registry=None):
return Float(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(types.Enum)
def convert_enum_to_enum(type, column, registry=None):
enum_class = getattr(type, 'enum_class', None)
if enum_class: # Check if an enum.Enum type is used
graphene_type = Enum.from_enum(enum_class)
else: # Nope, just a list of string options
items = zip(type.enums, type.enums)
graphene_type = Enum(type.name, items)
return Field(
graphene_type,
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
@convert_sqlalchemy_type.register(ChoiceType)
def convert_column_to_enum(type, column, registry=None):
name = "{}_{}".format(column.table.name, column.name).upper()
return Enum(name, type.choices, description=get_column_doc(column))
@convert_sqlalchemy_type.register(ScalarListType)
def convert_scalar_list_to_list(type, column, registry=None):
return List(String, description=get_column_doc(column))
@convert_sqlalchemy_type.register(postgresql.ARRAY)
def convert_postgres_array_to_list(_type, column, registry=None):
graphene_type = convert_sqlalchemy_type(column.type.item_type, column)
inner_type = type(graphene_type)
return List(
inner_type,
description=get_column_doc(column),
required=not (is_column_nullable(column)),
)
@convert_sqlalchemy_type.register(postgresql.HSTORE)
@convert_sqlalchemy_type.register(postgresql.JSON)
@convert_sqlalchemy_type.register(postgresql.JSONB)
def convert_json_to_string(type, column, registry=None):
return JSONString(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
@convert_sqlalchemy_type.register(JSONType)
def convert_json_type_to_string(type, column, registry=None):
return JSONString(
description=get_column_doc(column), required=not (is_column_nullable(column))
)
|
python
|
#!/usr/bin/python
# TrayIcon
# Access to various monitoring capabilities (HIDS, dashboard, ip configuration, network recognition, etc.)
# Alerting plugin (IM notifications, irssi, OSSEC) that can help to display and monitor notification informations)
# Deamon plugin (sort of tail -f over selected files, RSS gathering, correlation)
# Knowledge base: regex, xpath and pattern recognition for classification
# Correlation and (help to the alerting plugin) some rules to notify additional messages (security, twitter)
# Social feeds integration (at least for Twitter) and we can imagine some sort of lexicometric tricks over it with correlation and alerting
# import sys
# from PyQt4 import QtGui
#
# class SystemTrayIcon(QtGui.QSystemTrayIcon):
# def __init__(self, icon, parent=None):
# QtGui.QSystemTrayIcon.__init__(self, icon, parent)
# self.menu = QtGui.QMenu(parent)
# exitAction = self.menu.addAction("Exit")
# self.setContextMenu(self.menu)
#
# def main():
# app = QtGui.QApplication(sys.argv)
# style = app.style()
# icon = QtGui.QIcon(style.standardPixmap(QtGui.QStyle.SP_FileIcon))
# trayIcon = SystemTrayIcon(icon)
#
# trayIcon.show()
# sys.exit(app.exec_())
#
# if __name__ == '__main__':
# main()
# TO BE CONTINUED... ?
|
python
|
"""
Pipeline object class for EmrActivity
"""
from .activity import Activity
from ..config import Config
from .schedule import Schedule
from ..utils import constants as const
from ..utils.exceptions import ETLInputError
config = Config()
MAX_RETRIES = config.etl.get('MAX_RETRIES', const.ZERO)
class EmrActivity(Activity):
"""EMR Activity class
"""
def __init__(self,
id,
resource,
schedule,
input_node,
emr_step_string,
output_node=None,
additional_files=None,
max_retries=None,
depends_on=None):
"""Constructor for the EmrActivity class
Args:
id(str): id of the object
resource(Ec2Resource / EMRResource): resource to run the activity on
schedule(Schedule): schedule of the pipeline
emr_step_string(list of str): command string to be executed
output_node(S3Node): output_node for the emr job
additional_files(list of S3File): Additional files required for emr
max_retries(int): number of retries for the activity
depends_on(list of activities): dependendent pipelines steps
"""
# Validate inputs
if not isinstance(schedule, Schedule):
raise ETLInputError(
'Input schedule must be of the type Schedule')
# Set default values
if depends_on is None:
depends_on = []
if max_retries is None:
max_retries = MAX_RETRIES
super(EmrActivity, self).__init__(
id=id,
type='EmrActivity',
maximumRetries=max_retries,
dependsOn=depends_on,
runsOn=resource,
schedule=schedule,
step=emr_step_string,
output=output_node,
input=input_node,
)
self.add_additional_files(additional_files)
|
python
|
from django.test import TestCase
from . models import Urls, Statistics
class UrlsTestClass(TestCase):
'''
Class that test the characterics of the Urls objects and its methods
'''
def setUp(self):
'''
Method that runs at the beginning of each test
'''
self.url = Urls(short_id = 'hrhje', httpurl ='http://google.com')
def test_isinstance(self):
'''
tets that detetmines whether an object is an instance of the class Urls
'''
self.assertTrue(isinstance(self.url,Urls))
def test_save_url(self):
'''
test the Urls class save method
'''
self.url.save_url()
saved_urls = Urls.objects.all()
self.assertTrue(len(saved_urls)>0)
def test_count_unique(self):
'''
Test count_unique method of the Urls class
'''
self.url.save()
all = Urls.count_unique('http://google.com')
self.assertTrue(all == 1)
def test_shortcode_exist(self):
'''
Test the is is unique method of the Urls class
'''
self.url.save()
is_exitent = Urls.shortcode_exist('hrhje')
self.assertTrue(is_exitent == True)
def test_url_exist(self):
'''
Method that tests url_exist method
'''
self.url.save()
is_exitent = Urls.url_exist('http://google.com')
self.assertTrue(is_exitent == True)
def test_code_generator(self):
'''
Test the code generator method of the Urls class
'''
self.url.save()
shortcode = self.url.code_generator()
is_exitent = self.url.shortcode_exist('hrhje')
self.assertEqual(is_exitent,True)
def test_get_url_by_shorcode(self):
'''
Tests the get_url_by shortcode method of the class Urls
'''
self.url.save()
url = Urls.get_url_by_shorcode('hrhje')
self.assertTrue(url.short_id == 'hrhje' )
def test_get_shortcode_by_url(self):
'''
Tests the get_shortcode_by_url method of the Urls class
'''
self.url.save()
requested_url = Urls.get_shortcode_by_url('http://google.com')
self.assertTrue(requested_url.short_id == 'hrhje')
class OtherFunctionsTestClass(TestCase):
'''
Test class that test the characteristics of other methods and
functionalities of the app
'''
def setUp(self):
'''
Method that runs at the beggining of each test
'''
self.url = Urls(short_id = 'hrhje', httpurl ='http://google.com')
def test_Url_Validator(self):
pass
class StatisticsTestClass(TestCase):
'''
Tests the characteristics of the statistics class
'''
def setUp(self):
'''
Method that runs at the begining of each test
'''
self.statistic = Statistics(name='statistics')
def test_isinstance(self):
'''
Method that test if an object is an instance of a given Class
'''
self.assertTrue(isinstance(self.statistic,Statistics))
def test_get_total_clicks(self):
'''
Method that test get_total_clicks method
'''
self.statistic.save()
self.statistic.total_clicks +=1
self.statistic.save()
self.assertTrue(Statistics.get_total_clicks() == 1)
def test_calculate_popularity(self):
'''
Method that test the calculate_popularity method
'''
self.statistic.save()
self.statistic.total_clicks +=2
calculated_index = Statistics.calculate_popularity(1)
self.statistic.save()
self.assertTrue(calculated_index == 2)
|
python
|
import ipdb
import numpy as np
import os
from multiprocessing import Process, Queue, Lock
from moviepy.video.io.VideoFileClip import VideoFileClip as Video
import skvideo.measure as skv
from glob import glob
import csv
from tqdm import tqdm
def job(item):
fn, indir, outdir = item
outdir = os.path.splitext(fn.replace(indir, outdir))[0]
if not os.path.isdir(outdir):
os.makedirs(outdir)
vid = Video(fn)
vid.write_images_sequence(os.path.join(outdir, '%06d.bmp'), fps=8, verbose=False, logger=None)
vid.close()
def worker(inq, outq, lock):
for item in iter(inq.get, None):
job(item)
outq.put(0)
if __name__ == "__main__":
inq = Queue()
outq = Queue()
lock = Lock()
nproc = 40
#basepath = "PATH/TO/scenes"
basepath = "YOUR PATH HERE"
outdir = "YOUR PATH HERE"
data=glob(os.path.join(basepath, '**/*.mp4'), recursive=True)
for item in data:
inq.put((item, basepath, outdir))
for i in range(nproc):
inq.put(None)
for i in range(nproc):
Process(target=worker, args=(inq, outq, lock)).start()
for item in tqdm(data):
outq.get()
|
python
|
from biocrnpyler import *
kb, ku, ktx, ktl, kdeg = 100, 10, 3, 2, 1
parameters = {"kb": kb, "ku": ku, "ktx": ktx, "ktl": ktl, "kdeg": kdeg}
myMixture = BasicExtract(name="txtl", parameters=parameters)
A1 = DNAassembly(name="G1", promoter="pBest",
rbs="BCD2", transcript="T1", protein="GFP", initial_concentration=10, parameter_warnings = False)
# Note: Protein and Transcript strings (or chemical_reaction_network.specie objects) are optional parameters
# DNAassemblies default to using their name for their transcript and protein products.
myMixture.add_components(A1)
myCRN = myMixture.compile_crn()
print("\n" + repr(A1))
print("\n" + repr(myMixture))
print("\n" + repr(myCRN))
#print("\nmyMixture.parameters", myMixture.parameters)
#print("\ndna_assembly.parameters", A1.parameters)
file_name = "constitutive_expression_test.xml"
f = myCRN.write_sbml_file(file_name)
|
python
|
from django.db import models
from django.conf import settings
from django import forms
# Create your models here.
class Dataset(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
columns = models.IntegerField() # add validator limt 10
data = models.FileField()
description = models.TextField(max_length=5000)
stars = models.IntegerField(default=0)
class Attribute(models.Model):
dataset = models.ForeignKey(Dataset)
name = models.CharField()
datatype = models.CharField()
# Forms
class DatasetForm(forms.ModelForm):
name = models.CharField(max_length=200, null=True, blank=True)
columns = models.IntegerField() # add validator limit 10
description = models.TextField(max_length=5000)
class DataForm(forms.ModelForm):
def __init__(self, columns):
super.__init__(self)
for i in range(columns):
self.fields['name' + str(i)] = models.CharField(max_length=200)
self.fields['datatype' + str(i)] = models.CharField(choices=['image', 'text', 'integer'])
def save(self, columns, dataset):
for i in range(columns):
Attribute.objects.create(
dataset=dataset,
name=self.fields['name' + str(i)],
datatype=self.fields['datatype' + str(i)]
)
|
python
|
"""
OpenVINO DL Workbench
Class for annotate dataset job
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sqlalchemy.orm import Session
from config.constants import OPENVINO_ROOT_PATH
from wb.main.enumerates import JobTypesEnum
from wb.main.jobs.accuracy_analysis.annotate_datset.annotate_dataset_job import AnnotateDatasetJob
from wb.main.models import AnnotateDatasetJobModel
class LocalAnnotateDatasetJob(AnnotateDatasetJob):
job_type = JobTypesEnum.annotate_dataset
def _set_paths(self, session: Session):
"""
Set job paths for local and remote use-cases.
This method mutates `job_bundle_path`, `_openvino_path` and `_venv_path` fields
"""
accuracy_job_model: AnnotateDatasetJobModel = self.get_job_model(session)
self.job_bundle_path = str(self.get_dataset_annotation_path(accuracy_job_model))
self._openvino_path = OPENVINO_ROOT_PATH
self._venv_path = None
def collect_artifacts(self):
pass
|
python
|
from bs4 import BeautifulSoup
from contextlib import suppress
RUN_EXAMPLE = 2
class Match:
"""
This class stores information about a class
"""
def __init__(self, team1: str, team2: str, state, _, score1: int, score2: int):
self.team1 = self._sanitize(team1)
self.team2 = self._sanitize(team2)
self.state = state
self.score1 = score1
self.score2 = score2
@staticmethod
def _sanitize(team_name: str):
return team_name[:-4] if team_name.endswith("GOAL") else team_name
def __str__(self):
return "{} vs {}, {} ({}, {})".format(self.team1, self.team2, self.state, self.score1, self.score2)
def __repr__(self):
return self.__str__()
def open_example_page():
"""
For testing purposes.
Reads the local bet365.html file and parses it.
Activate by using the flag --testing
:return:
"""
with open("bet365.html", "r", encoding="utf8") as file:
contents = file.read()
return contents
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def parse_bet365(page=RUN_EXAMPLE):
if page == RUN_EXAMPLE:
page = open_example_page()
else:
pass
#print("got\n " + page)
soup = BeautifulSoup(page, 'html.parser')
rows = soup.find_all('div')
items = []
for row in rows:
if row.has_attr('class'):
with suppress(IndexError):
if "ipo-TeamStack_Team" in row['class']:
items.append(row.text)
elif any(x.startswith("ipo-TeamPoints_TeamScore") for x in row['class']):
items.append(row.text)
return [Match(*x) for x in chunks(items, 6)]
|
python
|
from django.apps import AppConfig
class AwewardsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'awewards'
|
python
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
requirements = f.readlines()
long_description = "Automated tool to provision Greengrass 2.0"
setup(
name="ggv2_provisioner",
version="0.0.8",
author="Gavin Adams",
author_email="[email protected]",
url="https://github.com/gadams999/greengrassv2-provisioner",
description="Greengrass 2.0 command line provisioner",
long_description=long_description,
long_description_content_type="text/markdown",
license="Apache-2.0",
packages=find_packages(),
entry_points={"console_scripts": ["ggv2-provisioner = ggv2_provisioner:main"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">3.6",
keywords="greengrass ggv2 provision provisioner",
install_requires=requirements,
zip_safe=False,
)
|
python
|
'''
Load the CIOD module tables from DICOM Standard PS3.3, Annex A.
All CIOD tables are defined in chapter A of the DICOM Standard.
Output the tables in JSON format, one entry per CIOD.
'''
from typing import List, Tuple
import sys
import re
from bs4 import Tag
from dicom_standard import parse_lib as pl
from dicom_standard import parse_relations as pr
from dicom_standard.macro_utils import MetadataTableType
from dicom_standard.table_utils import (
StringifiedTableListType,
TableDictType,
get_chapter_tables,
tables_to_json,
get_short_standard_link,
get_table_description,
table_to_dict,
)
CHAPTER_IDS = ['chapter_A', 'chapter_F']
# Standard workaround: Include upper case "S" to catch typo in Table A.39.19-1
# http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_A.35.19.3.html
TABLE_SUFFIX = re.compile(".*IOD Module[sS]$")
COLUMN_TITLES_WITH_IE = ['informationEntity', 'module', 'referenceFragment', 'usage']
COLUMN_TITLES_NO_IE = ['module', 'referenceFragment', 'usage', 'description']
def is_valid_ciod_table(table_div: Tag) -> bool:
return bool(TABLE_SUFFIX.match(pr.table_name(table_div)))
def ciod_table_to_dict(table: StringifiedTableListType) -> List[TableDictType]:
# Table F.3-1 (the only table in section F) has no "Information Entity" column, so we check for the href in the second column
# http://dicom.nema.org/dicom/2013/output/chtml/part03/sect_F.3.html#table_F.3-1
sect_f_table = 'href' in table[0][1]
column_titles = COLUMN_TITLES_NO_IE if sect_f_table else COLUMN_TITLES_WITH_IE
return table_to_dict(table, column_titles)
def get_table_with_metadata(table_with_tdiv: Tuple[List[TableDictType], Tag]) -> MetadataTableType:
table, tdiv = table_with_tdiv
clean_name = pl.clean_table_name(pr.table_name(tdiv))
table_description = get_table_description(tdiv)
return {
'name': clean_name,
'modules': table,
'id': pl.create_slug(clean_name),
'description': str(table_description),
'linkToStandard': get_short_standard_link(tdiv)
}
if __name__ == "__main__":
standard = pl.parse_html_file(sys.argv[1])
tables = []
tdivs = []
for chapter_id in CHAPTER_IDS:
chapter_tables, chapter_tdivs = get_chapter_tables(standard, chapter_id, is_valid_ciod_table)
tables += chapter_tables
tdivs += chapter_tdivs
parsed_table_data = tables_to_json(tables, tdivs, ciod_table_to_dict, get_table_with_metadata)
pl.write_pretty_json(parsed_table_data)
|
python
|
import requests
import json
import yaml
def checkDomains(domains):
url = 'https://www.virustotal.com/vtapi/v2/url/report'
scans = []
for dom in domains:
params = {'apikey':getApiKey('vt'), 'resource':dom}
try:
response = requests.get(url, params=params)
scans.append(response.json())
except Exception as e:
print("It was not possible to check the {} domain.\nMaybe we hit VT free limit? Try upgrading your API license".format(dom))
break
return scans
def checkAbuseIP(ips):
checkedIPs = {}
for ip in ips:
url = 'https://api.abuseipdb.com/api/v2/check'
querystring = {
'ipAddress': ip,
'maxAgeInDays': '90'
}
headers = {
'Accept': 'application/json',
'Key': getApiKey('abuseipdb')
}
try:
response = requests.request(method='GET', url=url, headers=headers, params=querystring)
whitelisted = json.loads(response.text)['data']['isWhitelisted']
checkedIPs[ip] = whitelisted
except Exception as e:
print(e)
return checkedIPs
def getApiKey(provider):
with open("/opt/netlyzer/config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
if provider == "vt":
return cfg['api']['vtApiKey']
if provider == "abuseipdb":
return cfg['api']['abuseIPDBKey']
|
python
|
from torchvision import models
import torch.nn as nn
class model(nn.Module):
def __init__(self, input_dim, output_dim):
super(model, self).__init__()
self.restored = False
self.input_dim = input_dim
self.output_dim = output_dim
num = len(input_dim)
feature = []
for i in range(num):
feature.append(
nn.Sequential(
nn.Linear(self.input_dim[i],2*self.input_dim[i]),
nn.BatchNorm1d(2*self.input_dim[i]),
nn.LeakyReLU(0.1, True),
nn.Linear(2*self.input_dim[i],2*self.input_dim[i]),
nn.BatchNorm1d(2*self.input_dim[i]),
nn.LeakyReLU(0.1, True),
nn.Linear(2*self.input_dim[i],self.input_dim[i]),
nn.BatchNorm1d(self.input_dim[i]),
nn.LeakyReLU(0.1, True),
nn.Linear(self.input_dim[i],self.output_dim),
nn.BatchNorm1d(self.output_dim),
nn.LeakyReLU(0.1, True),
))
self.feature = nn.ModuleList(feature)
self.feature_show = nn.Sequential(
nn.Linear(self.output_dim,self.output_dim),
nn.BatchNorm1d(self.output_dim),
nn.LeakyReLU(0.1, True),
nn.Linear(self.output_dim,self.output_dim),
nn.BatchNorm1d(self.output_dim),
nn.LeakyReLU(0.1, True),
nn.Linear(self.output_dim,self.output_dim),
)
def forward(self, input_data, domain):
feature = self.feature[domain](input_data)
feature = self.feature_show(feature)
return feature
|
python
|
INSTRUCTIONS = """
"""
from utils.decorators import time_this
@time_this
def solution(inputs):
"""
"""
test_case_inputs = [
]
|
python
|
class SilkObject:
__slots__ = []
def __ne__(self, other):
return not self.__eq__(other)
class SilkStringLike(SilkObject):
__slots__ = []
from . import primitives
|
python
|
from .transpose import transpose
|
python
|
from functools import partial
from flask import Blueprint, current_app, g
from api.client import SecurityTrailsClient, ST_OBSERVABLE_TYPES
from api.mappings import Mapping
from api.schemas import ObservableSchema
from api.utils import get_json, jsonify_data, get_key, jsonify_result
enrich_api = Blueprint('enrich', __name__)
get_observables = partial(get_json, schema=ObservableSchema(many=True))
@enrich_api.route('/deliberate/observables', methods=['POST'])
def deliberate_observables():
return jsonify_data({})
@enrich_api.route('/observe/observables', methods=['POST'])
def observe_observables():
key = get_key()
observables = get_observables()
client = SecurityTrailsClient(current_app.config['API_URL'],
key,
current_app.config['USER_AGENT'],
current_app.config['NUMBER_OF_PAGES'],
current_app.config['GET_ALL_PAGES'])
g.sightings = []
try:
for observable in observables:
mapping = Mapping.for_(observable)
if mapping:
client_data = client.get_data(observable)
for record in client_data:
refer_link = client.refer_link(
current_app.config['UI_URL'], observable
)
sighting = mapping.extract_sighting(record, refer_link)
if sighting:
g.sightings.append(sighting)
except KeyError:
g.errors = [{
'type': 'fatal',
'code': 'key error',
'message': 'The data structure of SecurityTrails '
'has changed. The module is broken.'
}]
return jsonify_result()
@enrich_api.route('/refer/observables', methods=['POST'])
def refer_observables():
observables = get_observables()
ui_url = current_app.config['UI_URL']
data = []
for observable in observables:
type_ = ST_OBSERVABLE_TYPES.get(observable['type'])
if type_:
data.append(
{
'id': (
'ref-securitytrails-search-{type}-{value}'.format(
**observable)
),
'title': f'Search for this {type_}',
'description': (
f'Lookup this {type_} on SecurityTrails'
),
'url': SecurityTrailsClient.refer_link(ui_url, observable),
'categories': ['Search', 'SecurityTrails'],
}
)
return jsonify_data(data)
|
python
|
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, _app_ctx_stack
import requests, os
from bs4 import BeautifulSoup
# configuration
try:
DATABASE = 'simply-billboard.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
except ImportError:
SECRET_KEY = os.environ.get('SECRET_KEY')
USERNAME = os.environ.get('USERNAME')
PASSWORD = os.environ.get('PASSWORD')
DEBUG = False
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
def init_db():
"""Creates the database tables."""
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
import billboard
chart = billboard.ChartData('hot-100', date=None, fetch=True, all=False)
for x in range(0, 100):
db.execute('INSERT INTO billboard100 (title, artist, peakPos, lastPos, weeks, rankChange) VALUES (?, ?, ?, ?, ?, ?)',
[chart[x].title, chart[x].artist, chart[x].peakPos, chart[x].lastPos, chart[x].weeks, chart[x].change])
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
top = _app_ctx_stack.top
if not hasattr(top, 'sqlite_db'):
sqlite_db = sqlite3.connect(app.config['DATABASE'])
sqlite_db.row_factory = sqlite3.Row
top.sqlite_db = sqlite_db
return top.sqlite_db
@app.teardown_appcontext
def close_db_connection(exception):
"""Closes the database again at the end of the request."""
top = _app_ctx_stack.top
if hasattr(top, 'sqlite_db'):
top.sqlite_db.close()
@app.route('/')
def billboard():
db = get_db()
cur = db.execute('SELECT * FROM billboard100 ORDER BY rank')
entries = cur.fetchall()
return render_template('billboard.html', entries=entries)
if __name__ == '__main__':
init_db()
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
python
|
#!/usr/bin/env python
# Copyright 2015 Dmitriy Robota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from xvfbwrapper import Xvfb
from robot.api import logger
from XvfbRobot.version import VERSION
__version__ = VERSION
class XvfbRobot(object):
"""
A robot library for creating virtual display on demand
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_VERSION = VERSION
_display = None
def start_virtual_display(self, width=1440, height=900,
colordepth=24, **kwargs):
"""Starts virtual display which will be
destroyed after test execution will be end
*Arguments:*
- width: a width to be set in pixels
- height: a height to be set in pixels
- color_depth: a color depth to be used
- kwargs: extra parameters
*Example:*
| Start Virtual Display |
| Start Virtual Display | 1920 | 1080 |
| Start Virtual Display | ${1920} | ${1080} | ${16} |
"""
if self._display is None:
logger.info("Using virtual display: '{0}x{1}x{2}'".format(
width, height, colordepth))
self._display = Xvfb(int(width), int(height),
int(colordepth), **kwargs)
self._display.start()
atexit.register(self._display.stop)
|
python
|
from settings import *
class BonusBox:
def __init__(self, data, gui):
self.boxID = int(data["boxID"])
self.x = int(data["x"])
self.y = int(data["y"])
self.type = int(data["type"])
self.size = 2
self.gui = gui
if self.type == 1: # cargo
self.color = "orange"
elif self.type == 2:
self.color = "yellow"
elif self.type == 21:
self.color = "green"
self.gui.bonusBoxes.append(self)
self.guiObj = self.gui.canvas.create_rectangle(
(self.x/100 * self.gui.scale)-self.size,
(self.y/100 * self.gui.scale)-self.size,
(self.x/100 * self.gui.scale)+self.size,
(self.y/100 * self.gui.scale)+self.size,
fill=self.color
)
def hide(self):
# self.gui.setColor(self.guiObj, "black")
self.gui.canvas.delete(self.guiObj)
def show(self):
self.gui.setColor(self.guiObj, self.color)
def remove(self):
if self in self.gui.bonusBoxes:
self.gui.bonusBoxes.remove(self)
self.hide()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.