content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# -*- coding: utf-8 -*-
""" Helper for uploading file, takes care of chunking file, create the file schema. """
__author__ = 'Thomas Sileo ([email protected])'
import logging
import os
from concurrent import futures
import camlipy
from camlipy.rollsum import Rollsum
from camlipy.schema import Bytes, File
MAX_BLOB_SIZE = 1 << 20
FIRST_CHUNK_SIZE = 256 << 10
TOO_SMALL_THRESHOLD = 64 << 10
# Buffer to detect EOF in advance.
BUFFER_SIZE = 32 << 10
log = logging.getLogger(__name__)
class Span(object):
""" Chunk metadata, used to create the tree,
and compute chunk/bytesRef size. """
def __init__(self, _from=0, to=0, bits=None, children=[], chunk_cnt=0, br=None, size=None):
self._from = _from
self.to = to
self.bits = bits
self.br = br
self.children = children
self.chunk_cnt = chunk_cnt
self._size = size
def __repr__(self):
return '<Span children:{0}, iter:{1}, {2}:{3} {4}bits>'.format(len(self.children),
self.chunk_cnt,
self._from, self.to,
self.bits)
def single_blob(self):
return not len(self.children)
def size(self):
if self._size:
return self.size
size = self.to - self._from
for cs in self.children:
size += cs.size()
return size
class FileWriter(object):
def __init__(self, con, path=None, fileobj=None):
self.con = con
self.path = path
if path:
self.reader = open(self.path, 'rb')
self.size = os.path.getsize(self.path)
else:
self.reader = fileobj
fileobj.seek(0, 2)
self.size = fileobj.tell()
fileobj.seek(0)
self.rs = Rollsum()
self.blob_size = 0
# Store Span the instance of the chunk
self.spans = []
# Total size
self.n = 0
# buffer to store the chunk
self.buf = ''
self.buf_spans = {}
# To generate the end report.
self.cnt = {'skipped': 0,
'skipped_size': 0,
'uploaded': 0,
'uploaded_size': 0}
def _upload_spans(self, force=False):
""" Actually upload/put the blobs. """
if len(self.buf_spans) == 10 or force:
if camlipy.DEBUG:
log.debug('Upload spans')
resp = self.con.put_blobs(self.buf_spans.values())
self.buf_spans = {}
for rec in resp['received']:
self.cnt['uploaded'] += 1
self.cnt['uploaded_size'] += rec['size']
for rec in resp['skipped']:
self.cnt['skipped'] += 1
self.cnt['skipped_size'] += rec['size']
def upload_last_span(self):
""" Empty the current blob buffer, prepare the blob,
and add it to the spans buffer (they are uploaded once they
are ten blobs in the buffer).
"""
if camlipy.DEBUG:
log.debug('Add span to buffer: {0}'.format(self.spans[-1]))
chunk = self.buf
self.buf = ''
blob_ref = camlipy.compute_hash(chunk)
self.spans[-1].br = blob_ref
self.buf_spans[blob_ref] = chunk
executor = futures.ThreadPoolExecutor(max_workers=2)
executor.submit(self._upload_spans())
executor.shutdown(wait=False)
def chunk(self):
""" Chunk the file with Rollsum to a tree of Spans. """
if self.size <= FIRST_CHUNK_SIZE:
if camlipy.DEBUG:
log.debug('Skip chunking, file size lower than first chunk: {0}'.format(self.size))
buf = self.reader.read(self.size)
br = self.con.put_blob(buf)
span = Span(br=br, size=self.size)
self.spans.append(span)
return 1
if camlipy.DEBUG:
log.debug('Start chunking, total size: {0}'.format(self.size))
chunk_cnt = 0
last = 0
eof = False
bits = 0
while 1:
c = self.reader.read(1)
if c:
self.buf += c
self.n += 1
self.blob_size += 1
self.rs.roll(ord(c))
on_split = self.rs.on_split()
bits = 0
if self.blob_size == MAX_BLOB_SIZE:
bits = 20
# check EOF
elif self.n > self.size - BUFFER_SIZE:
continue
elif (on_split and self.n > FIRST_CHUNK_SIZE and
self.blob_size > TOO_SMALL_THRESHOLD):
bits = self.rs.bits()
# First chink => 262144 bytes
elif self.n == FIRST_CHUNK_SIZE:
bits = 18 # 1 << 18
else:
continue
self.blob_size = 0
# The tricky part, take spans from the end that have
# smaller bits score, slice them and make them children
# of the node, that's how we end up with mixed blobRef/bytesRef,
# And it keep them ordered by creating a kind of depth-first graph
children = []
children_from = len(self.spans)
while children_from > 0 and \
self.spans[children_from - 1].bits < bits:
children_from -= 1
n_copy = len(self.spans) - children_from
if n_copy:
children = self.spans[children_from:]
self.spans = self.spans[:children_from]
else:
eof = True
children = []
current_span = Span(last, self.n, bits, children, chunk_cnt)
if camlipy.DEBUG:
log.debug('Current span: {0}, last:{1}, n:{2}'.format(current_span, last, self.n))
self.spans.append(current_span)
last = self.n
self.upload_last_span()
chunk_cnt += 1
if eof:
log.debug('EOF')
break
# Upload left chunks
assert self.n == self.size
self._upload_spans(force=True)
return chunk_cnt
def bytes_writer(self, to_bytes=True):
""" Transform the span in a blobRef/bytesRef tree.
if `to_bytes' is True, returns a Bytes schema,
if False, it returns the list of parts (ready to
be injected in a File schema.)
"""
return self._bytes_writer(self.spans, to_bytes=to_bytes)
def _bytes_writer(self, spans, to_bytes=True):
""" Actually transform the span in a blobRef/bytesRef tree.
if `to_bytes' is True, returns a Bytes schema,
if False, it returns the list of parts (ready to
be injected in a File schema.)
"""
schema = Bytes(self.con)
if camlipy.DEBUG:
log.debug('Starting spans: {0}'.format(spans))
for span in spans:
if camlipy.DEBUG:
log.debug('Current span: {0}'.format(span))
# Don't create a bytesRef if there is only one child,
# make it a blobRef instead.
if len(span.children) == 1 and span.children[0].single_blob():
children_size = span.children[0].to - span.children[0]._from
schema.add_blob_ref(span.children[0].br, children_size)
span.children = []
if camlipy.DEBUG:
log.debug('Transform this span to blobRef, new span: {0}'.format(span))
# Create a new bytesRef if the span has children
elif len(span.children):
children_size = 0
for c in span.children:
children_size += c.size()
if camlipy.DEBUG:
log.debug('Embedding a bytesRef')
schema.add_bytes_ref(self._bytes_writer(span.children, True), children_size)
# Make a blobRef with the span data
schema.add_blob_ref(span.br, span.to - span._from)
log.info(schema.json())
if camlipy.DEBUG:
log.debug('Resulting Bytes schema: {0}'.format(schema.json()))
if to_bytes:
self.con.put_blobs([schema.json()])
return camlipy.compute_hash(schema.json())
return schema.data['parts']
def check_spans(self):
""" Debug methods. """
log.debug(self.spans)
return self._check_spans(self.spans)
def _check_spans(self, spans):
""" Debug methods. """
for span in spans:
if span.single_blob():
yield span.chunk_cnt
else:
for sp in self._check_spans(span.children):
yield sp
yield span.chunk_cnt
def put_file(con, path=None, fileobj=None, permanode=False):
""" Helper for uploading a file to a Camlistore server.
Specify either a path, or a fileobj.
Can also create a permanode.
"""
if path is not None:
fileobj = open(path, 'rb')
file_writer = FileWriter(con, fileobj=fileobj)
file_writer.chunk()
parts = file_writer.bytes_writer(to_bytes=False)
file_schema = File(con, path, file_name=fileobj.name)
blob_ref = file_schema.save(parts, permanode=permanode)
log.info('Uploaded: {uploaded} blobs, {uploaded_size}bytes. Skipped {skipped} skipped, {skipped_size}bytes.'.format(**file_writer.cnt))
return blob_ref
| python |
# Generated by Django 2.0 on 2017-12-30 16:08
from django.conf import settings
from django.db import migrations, models
import apps.web.validators
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='handler',
name='allowed',
),
migrations.RemoveField(
model_name='response',
name='redirect_to',
),
migrations.AddField(
model_name='chat',
name='links_preview',
field=models.BooleanField(default=False, verbose_name='Show links preview'),
preserve_default=False,
),
migrations.AddField(
model_name='chat',
name='notifications',
field=models.BooleanField(default=False, verbose_name='Show notifications'),
preserve_default=False,
),
migrations.AddField(
model_name='chat',
name='template_context',
field=models.TextField(blank=True, max_length=3000, null=True, validators=[apps.web.validators.json_field_validator], verbose_name='Template context'),
),
migrations.AddField(
model_name='handler',
name='redirects',
field=models.ManyToManyField(help_text='Users the message redirect to', to=settings.AUTH_USER_MODEL, verbose_name='Redirects'),
),
migrations.AlterField(
model_name='handler',
name='ids_expression',
field=models.CharField(blank=True, help_text='A set of math symbols to construct a particular rule,example: {} + {} > 1; example2: {cond_id} == 0', max_length=500, null=True, validators=[apps.web.validators.condition_validator], verbose_name='Mathematics expression'),
),
]
| python |
"""Generate masks from sum of flurophore channels"""
import os
import pandas as pd
import micro_dl.utils.aux_utils as aux_utils
from micro_dl.utils.mp_utils import mp_create_save_mask
from skimage.filters import threshold_otsu
class MaskProcessor:
"""Generate masks from channels"""
def __init__(self,
input_dir,
output_dir,
channel_ids,
flat_field_dir=None,
time_ids=-1,
slice_ids=-1,
pos_ids=-1,
int2str_len=3,
uniform_struct=True,
num_workers=4,
mask_type='otsu',
mask_channel=None,
mask_ext='.npy',
):
"""
:param str input_dir: Directory with image frames
:param str output_dir: Base output directory
:param list[int] channel_ids: Channel indices to be masked (typically
just one)
:param str flat_field_dir: Directory with flatfield images if
flatfield correction is applied
:param int/list channel_ids: generate mask from the sum of these
(flurophore) channel indices
:param list/int time_ids: timepoints to consider
:param int slice_ids: Index of which focal plane (z)
acquisition to use (default -1 includes all slices)
:param int pos_ids: Position (FOV) indices to use
:param int int2str_len: Length of str when converting ints
:param bool uniform_struct: bool indicator for same structure across
pos and time points
:param int num_workers: number of workers for multiprocessing
:param str mask_type: method to use for generating mask. Needed for
mapping to the masking function
:param int mask_channel: channel number assigned to to be generated masks.
If resizing images on a subset of channels, frames_meta is from resize
dir, which could lead to wrong mask channel being assigned.
:param str mask_ext: '.npy' or 'png'. Save the mask as uint8 PNG or
NPY files
:param bool normalize_im: indicator to normalize image based on z-score or not
"""
self.input_dir = input_dir
self.output_dir = output_dir
self.flat_field_dir = flat_field_dir
self.num_workers = num_workers
self.frames_metadata = aux_utils.read_meta(self.input_dir)
if 'dir_name' not in self.frames_metadata.keys():
self.frames_metadata['dir_name'] = self.input_dir
# Create a unique mask channel number so masks can be treated
# as a new channel
if mask_channel is None:
self.mask_channel = int(
self.frames_metadata['channel_idx'].max() + 1
)
else:
self.mask_channel = int(mask_channel)
metadata_ids, nested_id_dict = aux_utils.validate_metadata_indices(
frames_metadata=self.frames_metadata,
time_ids=time_ids,
channel_ids=channel_ids,
slice_ids=slice_ids,
pos_ids=pos_ids,
uniform_structure=uniform_struct,
)
self.frames_meta_sub = aux_utils.get_sub_meta(
frames_metadata=self.frames_metadata,
time_ids=metadata_ids['time_ids'],
channel_ids=metadata_ids['channel_ids'],
slice_ids=metadata_ids['slice_ids'],
pos_ids=metadata_ids['pos_ids'])
self.channel_ids = metadata_ids['channel_ids']
output_channels = '-'.join(map(str, self.channel_ids))
if mask_type is 'borders_weight_loss_map':
output_channels = str(mask_channel)
# Create mask_dir as a subdirectory of output_dir
self.mask_dir = os.path.join(
self.output_dir,
'mask_channels_' + output_channels,
)
os.makedirs(self.mask_dir, exist_ok=True)
self.int2str_len = int2str_len
self.uniform_struct = uniform_struct
self.nested_id_dict = nested_id_dict
assert mask_type in ['otsu', 'unimodal', 'dataset otsu', 'borders_weight_loss_map'], \
"Masking method invalid, 'otsu', 'unimodal', 'dataset otsu', 'borders_weight_loss_map'\
are currently supported"
self.mask_type = mask_type
self.ints_metadata = None
self.channel_thr_df = None
if mask_type == 'dataset otsu':
self.ints_metadata = aux_utils.read_meta(self.input_dir, 'intensity_meta.csv')
self.channel_thr_df = self.get_channel_thr_df()
# for channel_idx in channel_ids:
# row_idxs = self.ints_metadata['channel_idx'] == channel_idx
# pix_ints = self.ints_metadata.loc[row_idxs, 'intensity'].values
# self.channel_thr = threshold_otsu(pix_ints, nbins=32)
# # self.channel_thr = get_unimodal_threshold(pix_ints)
# self.channel_thr_df.append(0.3 * self.channel_thr)
# # self.channel_thr_df.append(1 * self.channel_thr)
self.mask_ext = mask_ext
def get_channel_thr_df(self):
ints_meta_sub = self.ints_metadata.loc[
self.ints_metadata['channel_idx'].isin(self.channel_ids),
['dir_name', 'channel_idx', 'intensity']
]
# channel_thr_df = ints_meta_sub.groupby(['dir_name', 'channel_idx']).agg(get_unimodal_threshold).reset_index()
channel_thr_df = ints_meta_sub.groupby(['dir_name', 'channel_idx']).agg(threshold_otsu).reset_index()
channel_thr_df['intensity'] = channel_thr_df['intensity']
return channel_thr_df
def get_mask_dir(self):
"""
Return mask directory
:return str mask_dir: Directory where masks are stored
"""
return self.mask_dir
def get_mask_channel(self):
"""
Return mask channel
:return int mask_channel: Assigned channel number for mask
"""
return self.mask_channel
def _get_args_read_image(self,
time_idx,
channel_ids,
slice_idx,
pos_idx,
correct_flat_field):
"""
Read image from t, c, p and s indices. All indices are singular
except channel which can be a list
:param int time_idx: Current time point to use for generating mask
:param list channel_ids: channel ids to use for generating mask
:param int slice_idx: Slice index
:param int pos_idx: Position index
:param bool correct_flat_field: bool indicator to correct for flat
field
:return np.array im: image corresponding to the given channel indices
and flatfield corrected
"""
input_fnames = []
for channel_idx in channel_ids:
frame_idx = aux_utils.get_meta_idx(self.frames_metadata,
time_idx,
channel_idx,
slice_idx,
pos_idx)
file_path = os.path.join(
self.input_dir,
self.frames_metadata.loc[frame_idx, 'file_name'],
)
input_fnames.append(file_path)
flat_field_fname = None
if correct_flat_field:
if isinstance(channel_idx, (int, float)):
flat_field_fname = os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx)
)
elif isinstance(channel_idx, (tuple, list)):
flat_field_fname = []
for ch_idx in channel_idx:
flat_field_fname.append(os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(ch_idx)
))
return tuple(input_fnames), flat_field_fname
def generate_masks(self,
correct_flat_field=False,
str_elem_radius=5):
"""
Generate masks from flat-field corrected flurophore images.
The sum of flurophore channels is thresholded to generate a foreground
mask.
:param bool correct_flat_field: bool indicator to correct for flat
field or not
:param int str_elem_radius: Radius of structuring element for
morphological operations
"""
# Loop through all the indices and create masks
fn_args = []
id_df = self.frames_meta_sub[
['dir_name', 'time_idx', 'pos_idx', 'slice_idx']
].drop_duplicates()
channel_thrs = None
if self.uniform_struct:
for id_row in id_df.to_numpy():
dir_name, time_idx, pos_idx, slice_idx = id_row
input_fnames, ff_fname = self._get_args_read_image(
time_idx=time_idx,
channel_ids=self.channel_ids,
slice_idx=slice_idx,
pos_idx=pos_idx,
correct_flat_field=correct_flat_field,
)
if self.mask_type == 'dataset otsu':
channel_thrs = self.channel_thr_df.loc[
self.channel_thr_df['dir_name'] == dir_name, 'intensity'].to_numpy()
cur_args = (input_fnames,
ff_fname,
str_elem_radius,
self.mask_dir,
self.mask_channel,
time_idx,
pos_idx,
slice_idx,
self.int2str_len,
self.mask_type,
self.mask_ext,
channel_thrs)
fn_args.append(cur_args)
else:
for tp_idx, tp_dict in self.nested_id_dict.items():
mask_channel_dict = tp_dict[self.channel_ids[0]]
for pos_idx, sl_idx_list in mask_channel_dict.items():
for sl_idx in sl_idx_list:
input_fnames, ff_fname = self._get_args_read_image(
time_idx=tp_idx,
channel_ids=self.channel_ids,
slice_idx=sl_idx,
pos_idx=pos_idx,
correct_flat_field=correct_flat_field,
)
cur_args = (input_fnames,
ff_fname,
str_elem_radius,
self.mask_dir,
self.mask_channel,
tp_idx,
pos_idx,
sl_idx,
self.int2str_len,
self.mask_type,
self.mask_ext)
fn_args.append(cur_args)
mask_meta_list = mp_create_save_mask(fn_args, self.num_workers)
mask_meta_df = pd.DataFrame.from_dict(mask_meta_list)
mask_meta_df = mask_meta_df.sort_values(by=['file_name'])
mask_meta_df.to_csv(
os.path.join(self.mask_dir, 'frames_meta.csv'),
sep=',')
# update fg_frac field in image frame_meta.csv
cols_to_merge = self.frames_metadata.columns[self.frames_metadata.columns != 'fg_frac']
self.frames_metadata = \
pd.merge(self.frames_metadata[cols_to_merge],
mask_meta_df[['pos_idx', 'time_idx', 'slice_idx', 'fg_frac']],
how='left', on=['pos_idx', 'time_idx', 'slice_idx'])
self.frames_metadata.to_csv(os.path.join(self.input_dir, 'frames_meta.csv'),
sep=',')
| python |
# Testing
from django.test import TestCase, Client
from django.test.utils import override_settings
# APP Models
from seshdash.models import Sesh_Alert, Alert_Rule, Sesh_Site,VRM_Account, BoM_Data_Point as Data_Point, Daily_Data_Point, Sesh_User
# django Time related
from django.utils import timezone
from time import sleep
from datetime import timedelta
import pytz
#Helper Functions
from django.forms.models import model_to_dict
from django.core import mail
from django.template.loader import get_template
#Security
from guardian.shortcuts import assign_perm
from geoposition import Geoposition
#Data generations
from data_generation import get_random_int, get_random_binary, get_random_interval, generate_date_array, get_random_float
# Debug
from django.forms.models import model_to_dict
# To Test
from seshdash.utils.time_utils import get_time_interval_array
from seshdash.data.db.kapacitor import Kapacitor
from seshdash.data.db.influx import Influx
from django.conf import settings
from seshdash.tasks import get_aggregate_daily_data
from seshdash.tests.data_generation import create_test_data
# This test case written to test alerting module.
# It aims to test if the system sends an email and creates an Sesh_Alert object when an alert is triggered.
class KapacitorTestCase(TestCase):
def setUp(self):
# Need this to create a Site
self.VRM = VRM_Account.objects.create(vrm_user_id='[email protected]',vrm_password="asd")
# Setup Influx
self._influx_db_name = 'test_db'
self.i = Influx(database=self._influx_db_name)
try:
self.i.create_database(self._influx_db_name)
#Generate random data points for 24h
except:
self.i.delete_database(self._influx_db_name)
sleep(1)
self.i.create_database(self._influx_db_name)
pass
# Setup Kapacitor
self.kap = Kapacitor()
self.template_id = 'test_template'
self.task_id = 'test_task'
self.dj_template_name = 'alert_template'
self.dbrps = [{'db': self._influx_db_name, 'rp':'autogen' }]
self.location = Geoposition(52.5,24.3)
dt = timezone.make_aware(timezone.datetime(2015, 12, 11, 22, 0))
self.site = Sesh_Site.objects.create(site_name=u"Test_aggregate",
comission_date = dt,
location_city = u"kigali",
location_country=u"rwanda",
vrm_account = self.VRM,
installed_kw=123.0,
position=self.location,
system_voltage=12,
number_of_panels=12,
vrm_site_id=213,
battery_bank_capacity=12321,
has_genset=True,
has_grid=True)
#self.no_points = create_test_data(self.site,
# start = self.site.comission_date,
# end = dt + timedelta( hours = 48),
# interval = 30,
# random = False)
#create test user
self.test_user = Sesh_User.objects.create_user("john doe","[email protected]","asdasd12345")
#assign a user to the sites
assign_perm("view_Sesh_Site",self.test_user,self.site)
def tearDown(self):
self.i.delete_database(self._influx_db_name)
self.kap.delete_template(self.template_id)
self.kap.delete_task(self.task_id)
pass
@override_settings(INFLUX_DB='test_db')
def test_template_creation(self):
"""
Test creating template in kapacitor
"""
temp_script = """
// Which measurement to consume
var measurement string
// Optional where filter
var where_filter = lambda: TRUE
// Optional list of group by dimensions
var groups = [*]
// Which field to process
var field string
// Warning criteria, has access to 'mean' field
var warn lambda
// Critical criteria, has access to 'mean' field
var crit lambda
// How much data to window
var window = 5m
// The slack channel for alerts
var slack_channel = '#alerts'
stream
|from()
.measurement(measurement)
.where(where_filter)
.groupBy(groups)
|window()
.period(window)
.every(window)
|mean(field)
|alert()
.warn(warn)
.crit(crit)
.slack()
.channel(slack_channel)
"""
temp_id = self.template_id
temp_type = 'stream'
# Create template
temp = self.kap.create_template(temp_id, temp_type, temp_script)
self.assertTrue(temp.has_key('vars'))
# Verify template creation
temp_res = self.kap.get_template(temp_id)
self.assertTrue(temp_res.has_key('vars'))
# List template
temp_res = self.kap.list_templates()
self.assertTrue(temp_res.has_key('templates'))
# Update Template
temp_script = """
// Which measurement to consume
var measurement = 'cpu'
// Optional where filter
var where_filter = lambda: TRUE
// Optional list of group by dimensions
var groups = [*]
// Which field to process
var field string
// Warning criteria, has access to 'mean' field
var warn lambda
// Critical criteria, has access to 'mean' field
var crit lambda
// How much data to window
var window = 5m
// The slack channel for alerts
var slack_channel = '#alerts'
stream
|from()
.measurement(measurement)
.where(where_filter)
.groupBy(groups)
|window()
.period(window)
.every(window)
|mean(field)
|alert()
.warn(warn)
.crit(crit)
.slack()
.channel(slack_channel)
"""
temp_res = self.kap.update_template(temp_id, temp_script)
# Delete template
self.kap.delete_template(self.template_id)
def test_task_creation(self):
"""
Create a task and check if it actually causes an alert to trigger
"""
temp_script = """
stream
|from()
.measurement('cpu')
|alert()
.crit(lambda: "value" < 70)
.log('/tmp/alerts.log')
"""
temp_id = self.template_id
task_id = self.task_id
# Create task
temp = self.kap.create_task(task_id, dbrps=self.dbrps, script=temp_script, task_type='stream')
self.assertEqual(temp['status'],'enabled')
sleep(20)
for i in reversed(range(0,5)):
sleep(1)
dp_dict = {'cpu': i}
self.i.send_object_measurements(dp_dict, tags={"site_name":"test_site"}, database=self._influx_db_name)
temp = self.kap.get_task(task_id)
self.assertGreater(temp['stats']['node-stats']['alert2']['alerts_triggered'], 0)
def test_task_dj_template(self):
"""
test task creation with django templates
"""
template = get_template('seshdash/kapacitor_tasks/%s.tick'%self.dj_template_name)
alert_id = self.task_id
alert_info ={
'field': 'cpu',
'where_filter_lambda' : 'lambda: TRUE',
'error_lambda' : 'lambda: \"value\" < 30',
'time_window' : '5m',
'slack_channel' : '#alerts'
}
rendered_alert = template.render(alert_info)
result = self.kap.create_task(alert_id, dbrps= self.dbrps, script=rendered_alert)
self.assertEquals(result['status'], 'enabled')
| python |
# Created by Gorkem Polat at 10.02.2021
# contact: [email protected]
import os
import glob
import json
import shutil
import cv2
import numpy as np
import random
import matplotlib.pyplot as plt
from tqdm import tqdm
def show_image(image):
plt.imshow(image)
plt.show()
def show_image_opencv(image):
if len(image.shape) == 3:
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
else:
plt.imshow(image, cmap="gray")
plt.show()
def resize_by_keeping_ratio(image, new_height, fixed_width):
height, width, _ = image.shape
scale = new_height / height
new_width = int(scale * width)
resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
new_image = np.zeros((new_height, fixed_width, 3))
new_image[0:new_height, 0:new_width] = resized_image
return new_image, scale
image_path = "/home/gorkem/Desktop/data/EndoCV2021/original_files/trainData_EndoCV2021_5_Feb2021/data_C1/bbox_image"
image_paths = glob.glob(os.path.join(image_path, "*.jpg"))
image_path = random.choice(image_paths)
image = cv2.imread(image_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
new_image, scale = resize_by_keeping_ratio(image, 512, 910)
print(image_path)
print("height: " + str(image.shape[0]) + " width: " + str(image.shape[1]))
new_image = new_image / 255
# show_image(new_image)
show_image_opencv(new_image.astype("float32"))
| python |
import importlib
import imp
import sys
class SettingsWrapper(object):
'''
Wrapper for loading settings files and merging them with overrides
'''
my_settings = {}
ignore = [
'__builtins__',
'__file__',
'__package__',
'__doc__',
'__name__',
]
def _init__(self):
pass
def load(self, local='localsettings.py', default='settings.py'):
'''
Load the settings dict
@param local: The local settings filename to use
@param default: The default settings module to read
@return: A dict of the loaded settings
'''
self._load_defaults(default)
self._load_custom(local)
return self.settings()
def load_from_string(self, settings_string='', module_name='customsettings'):
'''
Loads settings from a settings_string. Expects an escaped string like
the following:
"NAME=\'stuff\'\nTYPE=[\'item\']\n"
@param settings_string: The string with your settings
@return: A dict of loaded settings
'''
try:
mod = imp.new_module(module_name)
exec settings_string in mod.__dict__
except TypeError:
print "Could not import settings"
self.my_settings = {}
try:
self.my_settings = self._convert_to_dict(mod)
except ImportError:
print "Settings unable to be loaded"
return self.settings()
def settings(self):
'''
Returns the current settings dictionary
'''
return self.my_settings
def _load_defaults(self, default='settings.py'):
'''
Load the default settings
'''
if default[-3:] == '.py':
default = default[:-3]
self.my_settings = {}
try:
settings = importlib.import_module(default)
self.my_settings = self._convert_to_dict(settings)
except ImportError:
print "No default settings found"
def _load_custom(self, settings_name='localsettings.py'):
'''
Load the user defined settings, overriding the defaults
'''
if settings_name[-3:] == '.py':
settings_name = settings_name[:-3]
new_settings = {}
try:
settings = importlib.import_module(settings_name)
new_settings = self._convert_to_dict(settings)
except ImportError:
print "No override settings found"
for key in new_settings:
if key in self.my_settings:
item = new_settings[key]
if isinstance(item, dict) and \
isinstance(self.my_settings[key], dict):
for key2 in item:
self.my_settings[key][key2] = item[key2]
else:
self.my_settings[key] = item
else:
self.my_settings[key] = new_settings[key]
def _convert_to_dict(self, setting):
'''
Converts a settings file into a dictionary, ignoring python defaults
@param setting: A loaded setting module
'''
the_dict = {}
set = dir(setting)
for key in set:
if key in self.ignore:
continue
value = getattr(setting, key)
the_dict[key] = value
return the_dict
| python |
"""
Frame assertion setting.
"""
class Ac:
"""
Set assertion constant.
Const:
eq: Assertion is equal.
nq: Assert inequality.
at: Assertion is True.
af: Assertion is False.
als: Assert a is b.
alst: Assert a is not b.
an: Assertion is None.
ann: Assertion is not None.
ain: Assert a in b.
nin: Assert a in not b.
ins: Assert isinstance(a, b).
nins: Assert not isinstances(a, b).
"""
eq = "self.assertEquals('{}','{}')"
nq = "self.assertNotEqual(str({}),'{}')"
al = "self.assertIs({}, {})"
at = "self.assertIsNot({},{})"
ai = "self.assertIn('{}','{}')"
ani = "self.assertNotIn('{}','{}')"
ais = "self.assertlsInstance({},{})"
anis = "self.assertNotIsInstance({},{})"
ln = "self.assertIsNone({})"
lnn = "self.assertIsNotNone({})"
bt = "self.assertTrue({})"
bf = "self.assertFalse({})"
| python |
from pwn import *
sh = ssh(user='ctf', host='node3.buuoj.cn', port=25102, password='guest', level='debug')
sh.interactive() | python |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.api.v2.views import base as base_view
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class NameServerView(base_view.BaseView):
""" Model a NameServer API response as a python dictionary """
_resource_name = 'nameserver'
_collection_name = 'nameservers'
def _get_base_href(self, parents=None):
assert len(parents) == 1
href = "%s/v2/zones/%s/nameservers" % (self.base_uri, parents[0])
return href.rstrip('?')
def show_basic(self, context, request, nameserver):
""" Basic view of a nameserver """
return {
"id": nameserver["id"],
"name": nameserver["name"]
}
| python |
import unittest
from elasticsearch import ElasticsearchException
from elasticbatch.exceptions import ElasticBufferFlushError
class TestElasticBufferFlushError(unittest.TestCase):
def test_str(self):
class TestCase:
def __init__(self, msg, err, verbose, expected_str):
self.msg = msg
self.err = err
self.verbose = verbose
self.expected_str = expected_str
tests = {
'msg and err are None, verbose=False': TestCase(
msg=None,
err=None,
verbose=False,
expected_str=''
),
'msg and err are None, verbose=True': TestCase(
msg=None,
err=None,
verbose=True,
expected_str=''
),
'msg only, verbose=False': TestCase(
msg='error message',
err=None,
verbose=False,
expected_str='error message',
),
'msg only, verbose=True': TestCase(
msg='error message',
err=None,
verbose=True,
expected_str='error message',
),
'err is string, verbose=False': TestCase(
msg='error message',
err='we have a big problem',
verbose=False,
expected_str='error message',
),
'err is string, verbose=True': TestCase(
msg='error message',
err='we have a big problem',
verbose=True,
expected_str='error message: we have a big problem',
),
'err is list, verbose=False': TestCase(
msg='error message',
err=['error1', 'error2', 'error3'],
verbose=False,
expected_str='error message',
),
'err is list, verbose=True': TestCase(
msg='error message',
err=['error1', 'error2', 'error3'],
verbose=True,
expected_str='error message: [\'error1\', \'error2\', \'error3\']',
),
'err is ValueError, verbose=False': TestCase(
msg='error message',
err=ValueError('we have a big problem'),
verbose=False,
expected_str='error message',
),
'err is ValueError, verbose=True': TestCase(
msg='error message',
err=ValueError('we have a big problem'),
verbose=True,
expected_str='error message: ValueError: we have a big problem',
),
'err is ElasticsearchException, verbose=False': TestCase(
msg='error message',
err=ElasticsearchException('we have a big problem'),
verbose=False,
expected_str='error message',
),
'err is ElasticsearchException, verbose=True': TestCase(
msg='error message',
err=ElasticsearchException('we have a big problem'),
verbose=True,
expected_str='error message: elasticsearch.exceptions.ElasticsearchException: '
'we have a big problem',
),
}
for test_name, test in tests.items():
err = ElasticBufferFlushError(msg=test.msg, err=test.err, verbose=test.verbose)
self.assertEqual(str(err), test.expected_str, test_name)
| python |
from domain import Material
from collections import namedtuple
from random import randint
_State = namedtuple('_State', 'player_spawn, world_map, links, players')
def initial_state(player_spawn, world_map, links):
return _State(player_spawn, world_map, links, {})
def handle_command(state, command_name, input_data):
output_data = None
events = []
if command_name == 'activate':
player_name = input_data
assert player_name in state.players
player = state.players[player_name]
key_pos = player.position.to_grid()
door_pos = state.links.get(key_pos, None)
if door_pos:
new_material = Material.DOOR if state.world_map.material(door_pos) == Material.FLOOR else Material.FLOOR
new_world_map = state.world_map.replace_material(door_pos, new_material)
state = state._replace(world_map=new_world_map)
events.append(('world_map', new_world_map))
elif command_name == 'get_world_map':
output_data = state.world_map
elif command_name == 'join':
name = input_data
if name not in state.players:
player = state.player_spawn._replace(name=name)
else:
player = state.players[name]
state.players[name] = player
for player in state.players.values():
events.append(('player', player))
output_data = player
print("Player joined: " + name)
elif command_name == 'leave':
name = input_data
assert name in state.players
events.append(('player_left', name))
print("Player left: " + name)
elif command_name == 'move':
player = input_data
assert player.name in state.players
state.players[player.name] = player
events.append(('player', player))
else:
raise NotImplementedError()
return state, output_data, events
| python |
# EXPERIMENTAL: all may be removed soon
from gym.benchmarks import scoring
from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere
register_benchmark(
id='Atari200M',
scorer=scoring.TotalReward(),
name='Atari200M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
},
])
register_benchmark(
id='Atari40M',
scorer=scoring.TotalReward(),
name='Atari40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
}
])
register_benchmark(
id='AtariExploration40M',
scorer=scoring.TotalReward(),
name='AtariExploration40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'FreewayNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.1,
'reward_ceiling': 31.0,
},
{
'env_id': 'GravitarNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 245.5,
'reward_ceiling': 1000.0,
},
{
'env_id': 'MontezumaRevengeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 25.0,
'reward_ceiling': 10000.0,
},
{
'env_id': 'PitfallNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -348.8,
'reward_ceiling': 1000.0,
},
{
'env_id': 'PrivateEyeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 662.8,
'reward_ceiling': 100.0,
},
{
'env_id': 'SolarisNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 2047.2,
'reward_ceiling': 5000.0,
},
{
'env_id': 'VentureNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 18.0,
'reward_ceiling': 100.0,
}
])
register_benchmark(
id='ClassicControl2-v0',
name='ClassicControl2',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v0',
'trials': 1,
'max_timesteps': 2000,
},
{'env_id': 'Pendulum-v0',
'trials': 1,
'max_timesteps': 1000,
},
])
register_benchmark(
id='ClassicControl-v0',
name='ClassicControl',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': 0.0,
'reward_ceiling': 500.0,
},
{'env_id': 'Acrobot-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MountainCar-v0',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -200.0,
'reward_ceiling': -100.0,
},
{'env_id': 'Pendulum-v0',
'trials': 3,
'max_timesteps': 200000,
'reward_floor': -1400.0,
'reward_ceiling': 0.0,
},
])
### Autogenerated by tinkerbell.benchmark.convert_benchmark.py
register_benchmark(
id='Mujoco10M-v0',
name='Mujoco10M',
view_group="Control",
description='Mujoco benchmark with 10M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'Ant-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Hopper-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Humanoid-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'HumanoidStandup-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Walker2d-v1',
'trials': 1,
'max_timesteps': 1000000,
}
])
register_benchmark(
id='Mujoco1M-v0',
name='Mujoco1M',
view_group="Control",
description='Mujoco benchmark with 1M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'HalfCheetah-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -280.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'Hopper-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 16.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'InvertedDoublePendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 53.0,
'reward_ceiling': 10000.0,
},
{'env_id': 'InvertedPendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 5.6,
'reward_ceiling': 1000.0,
},
{'env_id': 'Reacher-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -43.0,
'reward_ceiling': -0.5,
},
{'env_id': 'Swimmer-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 0.23,
'reward_ceiling': 500.0,
},
{'env_id': 'Walker2d-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 1.6,
'reward_ceiling': 5500.0,
}
])
register_benchmark(
id='MinecraftEasy-v0',
name='MinecraftEasy',
view_group="Minecraft",
description='Minecraft easy benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftBasic-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -2200.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftDefaultFlat1-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MinecraftTrickyArena1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -1000.0,
'reward_ceiling': 2800.0,
},
{'env_id': 'MinecraftEating1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -300.0,
'reward_ceiling': 300.0,
},
])
register_benchmark(
id='MinecraftMedium-v0',
name='MinecraftMedium',
view_group="Minecraft",
description='Minecraft medium benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftCliffWalking1-v0',
'trials': 2,
'max_timesteps': 400000,
'reward_floor': -100.0,
'reward_ceiling': 100.0,
},
{'env_id': 'MinecraftVertical-v0',
'trials': 2,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 8040.0,
},
{'env_id': 'MinecraftMaze1-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftMaze2-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftHard-v0',
name='MinecraftHard',
view_group="Minecraft",
description='Minecraft hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftObstacles-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 2080.0,
},
{'env_id': 'MinecraftSimpleRoomMaze-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 4160.0,
},
{'env_id': 'MinecraftAttic-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1040.0,
},
{'env_id': 'MinecraftComplexityUsage-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftVeryHard-v0',
name='MinecraftVeryHard',
view_group="Minecraft",
description='Minecraft very hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftMedium-v0',
'trials': 2,
'max_timesteps': 1800000,
'reward_floor': -10000.0,
'reward_ceiling': 16280.0,
},
{'env_id': 'MinecraftHard-v0',
'trials': 2,
'max_timesteps': 2400000,
'reward_floor': -10000.0,
'reward_ceiling': 32640.0,
},
])
register_benchmark(
id='MinecraftImpossible-v0',
name='MinecraftImpossible',
view_group="Minecraft",
description='Minecraft impossible benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftDefaultWorld1-v0',
'trials': 2,
'max_timesteps': 6000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
bandit_tasks = []
for n_arms in [5, 10, 50]:
for n_episodes in [10, 100, 500]:
bandit_tasks.append({
'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': n_episodes,
})
register_benchmark(
id='BernoulliBandit-v0',
name='BernoulliBandit',
description='Multi-armed Bernoulli bandits',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=bandit_tasks
)
tabular_mdp_tasks = []
for n_states in [10]:
for n_actions in [5]:
for episode_length in [10]:
for n_episodes in [10, 25, 50, 75, 100]:
tabular_mdp_tasks.append({
'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format(
s=n_states, a=n_actions, t=episode_length, n=n_episodes,
),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': episode_length * n_episodes * 2,
})
register_benchmark(
id='RandomTabularMDP-v0',
name='RandomTabularMDP',
description='Random tabular MDPs',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=tabular_mdp_tasks
)
| python |
#coding: utf-8
from __future__ import division, absolute_import, print_function, unicode_literals
from kasaya.core import exceptions
import msgpack
#
# Warning, msgpack is broken and can't differentiate strings from binary data.
# Under python 3 message pack is unusable to transport data.
#
# More details and useless discussion here:
# https://github.com/msgpack/msgpack/issues/121
#
def encode_ext_types(obj):
"""
Convert unknown for messagepack protocol types to dicts
"""
encoders = {
# datetime
datetime.datetime: (
'datetime',
lambda obj:obj.strftime("%Y%m%dT%H:%M:%S.%f")
),
# date
datetime.date: (
'date',
lambda obj:obj.strftime("%Y%m%d")
),
# time
datetime.time: (
'time',
lambda obj:obj.strftime("%H:%M:%S.%f")
),
# timedelta
datetime.timedelta: (
'timedelta',
lambda obj: "%i:%i:%i" % (obj.days, obj.seconds, obj.microseconds)
),
Decimal: (
'decimal',
lambda obj: str(obj)
)
}
key = type(obj)#.__class__
if key in encoders:
n,f = encoders[obj.__class__]
return {'__customtype__':n, 'as_str':f(obj) }
raise Exception("Encoding of %s is not possible " % key)
return obj
def decode_obj_types(obj):
"""
Reverse operation for encode_ext_types
"""
decoders = {
'datetime':
lambda S : datetime.datetime.strptime( S, "%Y%m%dT%H:%M:%S.%f"),
'date':
lambda S : datetime.datetime.strptime( S, "%Y%m%d").date(),
'time':
lambda S : datetime.datetime.strptime( S, "%H:%M:%S.%f").time(),
'timedelta':
lambda S : datetime.timedelta( **dict( [ (n,int(v)) for n, v in zip(("days","seconds","microseconds"), S.split(":")) ]) ),
'decimal':
lambda S : Decimal(S),
}
try:
key = obj['__customtype__']
except:
return obj
try:
func = decoders[key]
except KeyError:
return obj
return func(obj['as_str'])
def data_2_bin(data):
return msgpack.packb(data, default=encode_ext_types)
def bin_2_data(bin):
return msgpack.unpackb(bin, object_hook=decode_obj_types)
try:
pass
except msgpack.exceptions.UnpackException:
raise exceptions.MessageCorrupted()
| python |
from RPIO import PWM
from sys import stdin,stdout
pin=18
PWM.setup()
PWM.init_channel(13)
PWM.add_channel_pulse(13, pin ,0,0)
while True:
userinput = stdin.readline().rstrip('\n')
if userinput == 'quit':
break
else:
stdout.write("LightValue: " + userinput)
PWM.clear_channel_gpio(13, pin)
PWM.add_channel_pulse(13, pin ,999,int(userinput))
| python |
import flask
import pickle
import praw
import nltk
nltk.download("stopwords")
nltk.download("punkt")
from nltk.corpus import stopwords
import contractions
import inflect
import pandas as pd
import json
def clean(t):
en_stops = set(stopwords.words('english'))
t_old = str(t)
t_old = t_old.translate({ord(i): None for i in '{[(!@#$|%^.;:?><*=`~\-/_,&+)]}'})
t_old = t_old.replace('\n','')
t_old = t_old.replace('"','')
t_old = t_old.replace("'",'')
t_old = contractions.fix(t_old)
t_new = nltk.word_tokenize(t_old)
words_list=[]
for word in t_new:
word1=word.lower()
words_list.append(word1)
word_list=[]
for word in words_list:
if word not in en_stops:
word_list.append(word)
p = inflect.engine()
new_words = []
for word in word_list:
if word.isdigit():
new_word = p.number_to_words(word)
new_words.append(new_word)
else:
new_words.append(word)
if len(new_words) == 0:
return ''
else:
return new_words
def pos(data):
reddit = praw.Reddit(client_id='Qq1MxtQ9YVNXgA',client_secret='hg00d83IEYWEAAT0RdFzm50zm5E', user_agent='testing', username='mic_testing123',password='Cookies')
try:
post_data = reddit.submission(url = data)
except:
return ("No post with given URL",5000)
post = {}
post = {
"title":clean(post_data.title),
"url":str(post_data.url),}
post_data.comments.replace_more(limit=0)
comment = ''
count=0
for top_level_comment in post_data.comments:
comment = comment + ' ' + top_level_comment.body
count=count+1
if(count > 20):
break
post["comment"] = clean(comment)
s = str(post["title"])+","+str(post["url"])+","+str(post["comment"])
a = s.split(',')
a1=''
for item in a:
item1 = item.replace("[",'')
item1 = item1.replace("]",'')
item1 = item1.replace('"','')
item1 = item1.replace(' ','')
a1=a1+","+(item1)
return a1,0
model = pickle.load(open("model/model_final.pkl", 'rb'))
app = flask.Flask(__name__,template_folder = 'template')
@app.route('/', methods = ['GET', 'POST'])
def main():
if flask.request.method == 'GET':
return (flask.render_template('main.html'))
if flask.request.method == 'POST':
#print("yaya")
url = flask.request.form['url']
input_var,code = pos(str(url))
if code != 0:
return flask.render_template('main.html', original_input={'URL':url},result=input_var,)
else:
dic={}
dic["combined"] = input_var
val = pd.DataFrame([dic])
prediction1 = str(model.predict(val["combined"]))
prediction1 = prediction1[2:-2]
return flask.render_template('main.html', original_input={'URL':url},result=prediction1,)
@app.route('/automated_testing',methods = ['POST'])
def automated_testing():
if flask.request.method == 'POST':
#print(flask.request.files)
#print("I m here1")
#print(type(flask.request.files))
txt = flask.request.files["upload_file"]
#print("I m here2")
#print(txt)
urls = txt.read().decode('utf-8').split('\n')
dic1 = {}
for url in urls:
if url != '':
input_var,code = pos(str(url))
dic={}
dic["combined"] = input_var
val = pd.DataFrame([dic])
prediction1 = str(model.predict(val["combined"]))
dic1[url] = prediction1[2:-2]
#print(dic1[url])
return json.dumps(dic1)
if __name__ == "__main__":
app.run()
| python |
"""
Реализовать функцию int_func(), принимающую слово из маленьких латинских букв и возвращающую его же, но с прописной
первой буквой. Например, print(int_func(‘text’)) -> Text.
Продолжить работу над заданием. В программу должна попадать строка из слов, разделенных пробелом. Каждое слово состоит
из латинских букв в нижнем регистре. Сделать вывод исходной строки, но каждое слово должно начинаться с заглавной буквы.
Необходимо использовать написанную ранее функцию int_func()
"""
def int_func(string):
return chr(ord(string[:1])-32)+string[1:] # f-string needed
# text_string = input("Введите строку: ")
text_string = 'the quick brown fox jumps over the lazy dog'
print(text_string)
print(*list(map(int_func, text_string.split())))
print(int_func('one'))
# ver - 2 using *args
def int_func_v2(*args):
return " ".join([chr(ord(el[:1])-32)+el[1:] for el in args])
text_string = 'the quick brown fox jumps over the lazy dog'
print(text_string)
print(int_func_v2(*text_string.split()))
print(int_func_v2('one'))
print(int_func_v2('one', 'two'))
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Distributed under terms of the MIT license.
import os
import datetime
import json
import numpy as np
from numpy.linalg import norm
import math
import argparse
from platt import *
from sklearn.metrics import f1_score
import time
import scipy.stats
from itertools import combinations
from tqdm import tqdm
from data_gen import *
class mix_curv_perceptron:
def __init__(self, mix_component, embed_data, multiclass, max_round, max_update):
self.X_train = embed_data['X_train']
self.X_test = embed_data['X_test']
self.y_train = embed_data['y_train']
self.y_test = embed_data['y_test']
self.max_norm = embed_data['max_norm']
self.curv_value = embed_data['curv_value']
self.multiclass = multiclass
self.round = max_round
self.max_update = max_update
self.class_labels = list(np.unique(self.y_train))
self.n_class = len(self.class_labels)
self.n_train_samples = self.y_train.size
self.n_test_samples = self.y_test.size
# store each component in order
prod_space_component = mix_component.split(',')
self.space_type = []
self.space_dim = []
for comp in prod_space_component:
self.space_type.append(comp[0])
if comp.startswith('e'):
self.space_dim.append(int(comp[1]))
else:
self.space_dim.append(int(comp[1]) + 1)
self.IpTrain = {}
def mix_classifier_train(self, idx, error_record, y_bin_train):
res = 0
for err_idx in error_record:
if (err_idx, idx) not in self.IpTrain:
cur_dis = 0
start_dim = 0
for comp_idx in range(len(self.space_type)):
if self.space_type[comp_idx] == 'e':
cur_dis += np.dot(self.X_train[err_idx, start_dim: start_dim + self.space_dim[comp_idx]],
self.X_train[idx, start_dim: start_dim + self.space_dim[comp_idx]]) + 1
elif self.space_type[comp_idx] == 'h':
dist_h = np.dot(self.X_train[err_idx, start_dim: start_dim + self.space_dim[comp_idx]],
self.X_train[idx, start_dim: start_dim + self.space_dim[comp_idx]]) / (self.max_norm[comp_idx] ** 2)
if abs(dist_h) > 1:
dist_h = np.sign(dist_h)
cur_dis += math.sqrt(self.curv_value[comp_idx]) * np.arcsin(dist_h)
elif self.space_type[comp_idx] == 's':
dist_s = np.dot(self.X_train[err_idx, start_dim: start_dim + self.space_dim[comp_idx]],
self.X_train[idx, start_dim: start_dim + self.space_dim[comp_idx]]) * self.curv_value[comp_idx]
if abs(dist_s) > 1:
dist_s = np.sign(dist_s)
cur_dis += math.sqrt(self.curv_value[comp_idx]) * np.arcsin(dist_s)
start_dim += self.space_dim[comp_idx]
# store the results
self.IpTrain[(err_idx, idx)] = y_bin_train[err_idx] * cur_dis
res += error_record[err_idx] * self.IpTrain[(err_idx, idx)]
return res
def mix_classifier_test(self, idx, error_record, y_bin_train):
res = 0
for err_idx in error_record:
cur_dis = 0
start_dim = 0
for comp_idx in range(len(self.space_type)):
if self.space_type[comp_idx] == 'e':
cur_dis += np.dot(self.X_train[err_idx, start_dim: start_dim + self.space_dim[comp_idx]],
self.X_test[idx, start_dim: start_dim + self.space_dim[comp_idx]]) + 1
elif self.space_type[comp_idx] == 'h':
dist_h = np.dot(self.X_train[err_idx, start_dim: start_dim + self.space_dim[comp_idx]],
self.X_test[idx, start_dim: start_dim + self.space_dim[comp_idx]]) / (self.max_norm[comp_idx] ** 2)
if abs(dist_h) > 1:
dist_h = np.sign(dist_h)
cur_dis += math.sqrt(self.curv_value[comp_idx]) * np.arcsin(dist_h)
elif self.space_type[comp_idx] == 's':
dist_s = np.dot(self.X_train[err_idx, start_dim: start_dim + self.space_dim[comp_idx]],
self.X_test[idx, start_dim: start_dim + self.space_dim[comp_idx]]) * self.curv_value[comp_idx]
if abs(dist_s) > 1:
dist_s = np.sign(dist_s)
cur_dis += math.sqrt(self.curv_value[comp_idx]) * np.arcsin(dist_s)
start_dim += self.space_dim[comp_idx]
res += error_record[err_idx] * y_bin_train[err_idx] * cur_dis
return res
def process_data(self):
if self.multiclass:
test_probability = np.zeros((self.n_test_samples, self.n_class), dtype=float)
for class_val in self.class_labels:
y_bin_train = np.array([1 if val == class_val else -1 for val in self.y_train])
# initialize the error count dictionary
tmp_error_record = {0: 1}
total_error_count = 1
break_flag = False
# training
for epoch in range(self.round):
for idx in range(self.n_train_samples):
yn = self.mix_classifier_train(idx, tmp_error_record, y_bin_train)
if y_bin_train[idx] * yn <= 0:
if idx in tmp_error_record:
tmp_error_record[idx] += 1
else:
tmp_error_record[idx] = 1
total_error_count += 1
print('\r', idx+1, 'samples finished.', total_error_count, end='')
if total_error_count == self.max_update:
break_flag = True
break
print('\n', epoch + 1, 'rounds finished.')
if break_flag:
break
# obtain the decision values for training samples
decision_vals = [0] * self.n_train_samples
for idx in range(self.n_train_samples):
decision_vals[idx] = self.mix_classifier_train(idx, tmp_error_record, y_bin_train)
tmp_ab = SigmoidTrain(deci=decision_vals, label=y_bin_train, prior1=None, prior0=None)
print('Platt probability computed')
# testing
for idx in range(self.n_test_samples):
yn = self.mix_classifier_test(idx, tmp_error_record, y_bin_train)
test_probability[idx, self.class_labels.index(class_val)] = SigmoidPredict(deci=yn, AB=tmp_ab)
y_pred_idx = np.argmax(test_probability, axis=1)
y_pred = np.array([self.class_labels[i] for i in y_pred_idx])
print('F1 score:', f1_score(self.y_test, y_pred, average='macro'), 'total number of testing samples:', self.y_test.size)
return f1_score(self.y_test, y_pred, average='macro')
else:
error_record = {0: 1}
total_error_count = 1
break_flag = False
# training
for epoch in range(self.round):
for idx in tqdm(range(self.y_train.size)):
yn = self.mix_classifier_train(idx, error_record, self.y_train)
if self.y_train[idx] * yn <= 0:
if idx in error_record:
error_record[idx] += 1
else:
error_record[idx] = 1
total_error_count += 1
# print('\r', f'{idx + 1}/{self.yTrain.size} samples finished.', total_error_count, end='')
if total_error_count == self.max_update:
break_flag = True
break
print('\n', epoch + 1, 'rounds finished,', total_error_count)
if break_flag:
break
# testing
y_pred = []
for idx in tqdm(range(self.y_test.size)):
yn = self.mix_classifier_test(idx, error_record, self.y_train)
if yn > 0:
y_pred.append(1)
else:
y_pred.append(-1)
y_pred = np.array(y_pred)
print('F1 score:', f1_score(self.y_test, y_pred, average='macro'), 'total number of testing samples:', self.y_test.size)
return f1_score(self.y_test, y_pred, average='macro')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Perceptron algorithm in product space form.")
parser.add_argument("--data_path1", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path2", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path3", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path4", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path_num", type=int, default=1, help="How many data path to include.")
parser.add_argument("--data_name", type=str, default="Lymphoma", help="Which dataset to test on.")
parser.add_argument("--prod_space", type=str, default="e2,h2,s2", help="Product space form.")
parser.add_argument("--test_size", type=float, default=0.4, help="Percent of test set size.")
parser.add_argument("--trails", type=int, default=10, help="Number of trails want to repeat.")
parser.add_argument("--save_path", type=str, default="results", help="Where to save results.")
parser.add_argument("--transform", type=bool, default=False, help="Where to perform inverse projection.")
args = parser.parse_args()
start = time.time()
cifar_flag = False
if args.data_name == "Lymphoma":
labels_chosen_lst = [[0, 1]]
elif args.data_name == "Blood_cell_landmark":
labels_chosen_lst = list(combinations([i for i in range(10)], 2))
# for debug only
# np.random.seed(0)
# rnd_idx = list(np.random.permutation(45)[0:10])
# tmp_labels_chosen_lst = [labels_chosen_lst[i] for i in rnd_idx]
# labels_chosen_lst = tmp_labels_chosen_lst.copy()
elif args.data_name == "cifar100":
cifar_flag = True
labels_chosen_lst = []
for i in range(30):
np.random.seed(i)
labels_chosen_lst.append(list(np.random.permutation(100)[0:2]))
else:
# used for debugging purpose
labels_chosen_lst = [[0, 1]]
label_trails = len(labels_chosen_lst)
acc = np.zeros((label_trails, args.trails))
# path to different files
data_path = [args.data_path1, args.data_path2, args.data_path3, args.data_path4]
data_path = data_path[0: args.data_path_num]
print(data_path)
# curvature of each file
prod_space = []
for file_name in data_path:
if cifar_flag:
prod_space.append(file_name.split('-')[2])
else:
prod_space.append(file_name.split('-')[3])
joint_prod_space = ','.join(prod_space)
assert args.prod_space == joint_prod_space
for i in range(label_trails):
for j in range(args.trails):
embed_data = mix_data_generation(data_path, prod_space, 2, list(labels_chosen_lst[i]), test_size=args.test_size, cifar_flag=cifar_flag, seed=None, transform=args.transform)
mix_perp = mix_curv_perceptron(args.prod_space, embed_data, False, 1, 10000)
print(f'=========={i},{j},{args.prod_space}==========')
acc[i, j] = mix_perp.process_data()
print(mean_confidence_interval(acc))
print('Time used:', time.time() - start)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
cur_time = datetime.datetime.utcnow().isoformat()
np.save(f'{args.save_path}/{args.data_name}_{prod_space}_perceptron_f1_scores_{cur_time}.npy', acc)
| python |
import pandas as pd
import time
import json
from collections import OrderedDict
class RunManager():
def __init__(self):
""" Class constructor """
self.epoch_count = 0
self.epoch_loss = 0
self.epoch_num_correct = 0
self.epoch_start_time = None
self.run_params = None
self.run_count = 0
self.run_data = []
self.run_start_time = None
self.network = None
self.loader = None
def begin_run(self, run, network, loader):
""" Function to initialize each individual run """
self.run_start_time = time.time() # start time of the current run
self.run_params = run # save the current run parameters
self.run_count += 1 # increment the current run by one
self.network = network # save our network
self.loader = loader # save our dataloader
def end_run(self):
""" Function to wrap up the current run """
self.epoch_count = 0 # restart the epoch count
print(f"Done with run {self.run_count}")
def begin_epoch(self):
""" Function to initialize each individual epoch of each run"""
self.epoch_start_time = time.time() # start time of the current epoch
self.epoch_count += 1 # increment current epoch by one
self.epoch_loss = 0 # zero current loss
self.epoch_num_correct = 0 # zero current number of correct predictions
def end_epoch(self):
""" Function to wrap up the current epoch"""
epoch_duration = time.time() - self.epoch_start_time
run_duration = time.time() - self.run_start_time
loss = self.epoch_loss / len(self.loader.dataset)
accuracy = self.epoch_num_correct / len(self.loader.dataset)
# Track training loop perfomance #
results = OrderedDict()
results["run"] = self.run_count
results["epoch"] = self.epoch_count
results['loss'] = loss
results["accuracy"] = accuracy
results['epoch duration'] = epoch_duration
results['run duration'] = run_duration
for k, v in self.run_params._asdict().items():
results[k] = v
self.run_data.append(results)
def track_loss(self, loss, batch):
""" Function to track the loss of each batch of images """
self.epoch_loss += loss.item() * batch[0].shape[0]
def track_num_correct(self, preds, labels):
""" Function to track the number of correct predictions of each batch of images """
self.epoch_num_correct += self._get_num_correct(preds, labels)
def _get_num_correct(self, preds, labels):
""" Function to calculate the number of correct predictions of each batch of images """
return preds.argmax(dim=1).eq(labels).sum().item()
def save(self, fileName):
""" Function to save the results in JSON and .csv format for each training loop"""
pd.DataFrame.from_dict(
self.run_data, orient='columns'
).to_csv(f'{fileName}.csv')
with open(f'{fileName}.json', 'w', encoding='utf-8') as f:
json.dump(self.run_data, f, ensure_ascii=False, indent=4)
| python |
__author__ = 'David Moser <[email protected]>'
from unittest import TestSuite
from .testcase_create_delete_live_stream import CreateLiveStreamTestCase
def get_test_suite():
test_suite = TestSuite()
test_suite.addTest(CreateLiveStreamTestCase())
return test_suite
| python |
#
# PySNMP MIB module Dlink-IMPB-MNG (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Dlink-IMPB-MNG
# Produced by pysmi-0.3.4 at Wed May 1 12:58:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd")
rlImpbManagment, = mibBuilder.importSymbols("Dlink-IMPB-FEATURES", "rlImpbManagment")
ifIndex, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
PortList, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "PortList")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, ObjectIdentity, ModuleIdentity, Gauge32, IpAddress, iso, Unsigned32, Integer32, NotificationType, MibIdentifier, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "ObjectIdentity", "ModuleIdentity", "Gauge32", "IpAddress", "iso", "Unsigned32", "Integer32", "NotificationType", "MibIdentifier", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Counter64")
TextualConvention, DisplayString, MacAddress, TruthValue, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "MacAddress", "TruthValue", "RowStatus")
class IMPBPacketType(TextualConvention, Integer32):
description = 'Specifies one of 2 Packet Types: 1- IP. 2- ARP. 3- IP+ARP'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("ip", 1), ("arp", 2), ("iparp", 3))
class IMPBLockMode(TextualConvention, Integer32):
description = 'Specifies lock/unlock for an entry: 1- Unlocked. 2- Locked.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("unlocked", 1), ("locked", 2))
class IMPBDeviceType(TextualConvention, Integer32):
description = 'Specifies one of 3 Devices Types: 1- HOST. 2- DHCP Server. 3- Router. 4- Router with DHCP.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("host", 1), ("dhcpSrv", 2), ("router", 3), ("routerDhcp", 4))
rlIMPBMngTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1), )
if mibBuilder.loadTexts: rlIMPBMngTable.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngTable.setDescription('The table specifies all IMPB station. The entry contains IP address and MAC of the station and also list of ports, packet type and Device type of this station.')
rlIMPBMngEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1), ).setIndexNames((0, "Dlink-IMPB-MNG", "rlIMPBMngIPAddress"))
if mibBuilder.loadTexts: rlIMPBMngEntry.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngEntry.setDescription('The row definition for this table.')
rlIMPBMngIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 1), IpAddress())
if mibBuilder.loadTexts: rlIMPBMngIPAddress.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngIPAddress.setDescription('IP address of station.')
rlIMPBMngPacketType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 2), IMPBPacketType().clone('ip')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngPacketType.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngPacketType.setDescription('Packet type of IMPB.')
rlIMPBMngPMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 3), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngPMACAddress.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngPMACAddress.setDescription('Station mac address')
rlIMPBMngDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 4), IMPBDeviceType().clone('host')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngDeviceType.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngDeviceType.setDescription('Station type')
rlIMPBMngPortlist = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 5), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngPortlist.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngPortlist.setDescription('List of ports that the station is configured on.')
rlIMPBMngMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 6), IMPBLockMode().clone('locked')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngMode.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngMode.setDescription('Is this station locked or not.')
rlIMPBMngRouterBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 1000000), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngRouterBandwidth.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngRouterBandwidth.setDescription('Specifies, for Router entry, , egress Bandwidth on the associated ports. Zero means disabled.')
rlIMPBMngRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 1, 1, 8), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngRowStatus.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngRowStatus.setDescription('A status can be destroy, active or createAndGo')
rlIMPBMngAction = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("noAction", 1), ("lockAll", 2), ("unlockAll", 3), ("deleteUnlock", 4), ("deleteAll", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngAction.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngAction.setDescription('An action scalar which specifies the global action to take on the management DB.')
rlIMPBMngPortBandwidthTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 3), )
if mibBuilder.loadTexts: rlIMPBMngPortBandwidthTable.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngPortBandwidthTable.setDescription('The table specifies the Bandwidth value for each Port that at least one Router station is configured on it.')
rlIMPBMngPortBandwidthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIMPBMngPortBandwidthEntry.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngPortBandwidthEntry.setDescription('The row definition for this table.')
rlIMPBMngBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 3, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIMPBMngBandwidth.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngBandwidth.setDescription('This variable specifies what is the Bandwidth value on the specific ifIndex. Zero means disabled.')
rlIMPBMngRouterBandwidthTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 4), )
if mibBuilder.loadTexts: rlIMPBMngRouterBandwidthTable.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngRouterBandwidthTable.setDescription('The table shows the Bandwidth information per configured router')
rlIMPBMngRouterBandwidthEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 4, 1), ).setIndexNames((0, "Dlink-IMPB-MNG", "rlIMPBRouterIPAddress"))
if mibBuilder.loadTexts: rlIMPBMngRouterBandwidthEntry.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngRouterBandwidthEntry.setDescription('The row definition for this table.')
rlIMPBRouterIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: rlIMPBRouterIPAddress.setStatus('current')
if mibBuilder.loadTexts: rlIMPBRouterIPAddress.setDescription('IP address of station.')
rlIMPBRouterPortlist = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 4, 1, 2), PortList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIMPBRouterPortlist.setStatus('current')
if mibBuilder.loadTexts: rlIMPBRouterPortlist.setDescription('List of ports that the router is configured on.')
rlIMPBRouterBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIMPBRouterBandwidth.setStatus('current')
if mibBuilder.loadTexts: rlIMPBRouterBandwidth.setDescription('This variable specifies the Bandwidth value for a specific router, Zero means disabled.')
rlIMPBMngDiscoveryLearningStatus = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("learning", 1), ("noLearning", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIMPBMngDiscoveryLearningStatus.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngDiscoveryLearningStatus.setDescription('This variable specifies the discovery is NOW learning or stopped. Setting an entry in rlIMPBMngDiscoverytTable is conditioned with a value of this scalar to be set to noLearning.')
rlIMPBMngUncheckPorts = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 6), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngUncheckPorts.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngUncheckPorts.setDescription('Action scalar that defines which ports the system should be deleted from all entries in the management MIB. If an entry is remained with empty portlist the entry will be deleted also')
rlIMPBMngLockedStations = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIMPBMngLockedStations.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngLockedStations.setDescription('Read-only scalar to count how much locked stations there are in the system')
rlIMPBMngGratARPPeriodTimeout = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 139, 1, 8), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(30, 300), ))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIMPBMngGratARPPeriodTimeout.setStatus('current')
if mibBuilder.loadTexts: rlIMPBMngGratARPPeriodTimeout.setDescription("The interval at which ARP Requests are transmitted on behalf of configured stations. The default value for rlIMPBGratARPPeriodTimeout object is Zero, which means don't send ARP Request. The value of this object must be restored from non-volatile storage after a re-initialization of the management system.")
mibBuilder.exportSymbols("Dlink-IMPB-MNG", rlIMPBMngPortBandwidthTable=rlIMPBMngPortBandwidthTable, rlIMPBMngAction=rlIMPBMngAction, rlIMPBMngIPAddress=rlIMPBMngIPAddress, rlIMPBMngGratARPPeriodTimeout=rlIMPBMngGratARPPeriodTimeout, rlIMPBMngRouterBandwidthEntry=rlIMPBMngRouterBandwidthEntry, rlIMPBRouterBandwidth=rlIMPBRouterBandwidth, rlIMPBMngMode=rlIMPBMngMode, IMPBLockMode=IMPBLockMode, rlIMPBMngPortBandwidthEntry=rlIMPBMngPortBandwidthEntry, rlIMPBMngRowStatus=rlIMPBMngRowStatus, rlIMPBMngRouterBandwidth=rlIMPBMngRouterBandwidth, rlIMPBMngTable=rlIMPBMngTable, rlIMPBMngPMACAddress=rlIMPBMngPMACAddress, rlIMPBMngDeviceType=rlIMPBMngDeviceType, rlIMPBMngRouterBandwidthTable=rlIMPBMngRouterBandwidthTable, IMPBDeviceType=IMPBDeviceType, rlIMPBMngEntry=rlIMPBMngEntry, rlIMPBMngPacketType=rlIMPBMngPacketType, rlIMPBRouterIPAddress=rlIMPBRouterIPAddress, rlIMPBMngUncheckPorts=rlIMPBMngUncheckPorts, rlIMPBRouterPortlist=rlIMPBRouterPortlist, rlIMPBMngDiscoveryLearningStatus=rlIMPBMngDiscoveryLearningStatus, rlIMPBMngLockedStations=rlIMPBMngLockedStations, rlIMPBMngPortlist=rlIMPBMngPortlist, rlIMPBMngBandwidth=rlIMPBMngBandwidth, IMPBPacketType=IMPBPacketType)
| python |
#GAE modules
import webapp2
from google.appengine.ext.webapp import template
from google.appengine.ext import ndb
#Application specific Modules
from ExtraModules.gettemplate import gettemplate
from ExtraModules import phonenumbers
from model import Messages
def checkPhoneNumber(number, country_code):
try:
numobj = phonenumbers.parse(number, country_code)
if phonenumbers.is_valid_number(numobj):
return True
else:
return False
except:
return False
class getMsgValues:
def __init__(self, obj):
self.namevalue = obj.request.get('name')
self.emailvalue = obj.request.get('email')
self.phonevalue = obj.request.get('phone')
self.subjectvalue = obj.request.get('subject')
self.completemessage = obj.request.get('message')
self.countrycode = obj.request.get('countrycode')
class Contact(webapp2.RequestHandler):
def get(self):
template_values = {
'page':"Contact",
'msg_sent_status':False,
}
self.response.out.write(template.render(gettemplate('Contact'), template_values))
class SubmitMessage(webapp2.RequestHandler):
def post(self):
template_values = {
'page':"Contact",
'msg_sent_status':False,
}
msg = getMsgValues(self)
if not checkPhoneNumber(msg.phonevalue, msg.countrycode):
template_values['msg_sent_status'] = False
template_values['msg'] = "Invalid Phone number"
self.response.out.write(template.render(gettemplate('Contact'), template_values))
template_values['msg'] = None
template_values['msg_sent_status'] = True
msg = Messages(parent=ndb.Key("MSG", msg.emailvalue or "*notice*"),
name=msg.namevalue,
email=msg.emailvalue,
phone=msg.phonevalue,
subject=msg.subjectvalue,
message=msg.completemessage)
msg.put()
self.response.out.write(template.render(gettemplate('Contact'), template_values))
| python |
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
mc.postToChat("Hello, Minecraft World") | python |
"""
You should not make an instance of the Client class yourself, rather you should listen for new connections with
:meth:`~websocket.server.WebSocketServer.connection`
>>> @socket.connection
>>> async def on_connection(client: Client):
... # Here you can use the client, register callbacks on it or send it messages
... await client.writer.ping()
"""
import asyncio
import logging
import time
from .enums import DataType, State
from .reasons import Reasons, Reason
from .stream.reader import WebSocketReader
from .stream.writer import WebSocketWriter
logger = logging.getLogger(__name__)
class NoCallbackException(Exception):
pass
class UnexpectedFrameException(Exception):
def __init__(self, client, recv, expect):
super().__init__(f"Received unexpected {recv.name.lower()} frame from client {client.addr, client.port}, "
f"expected {expect.name.lower()}.")
self.recieved = recv
self.expected = expect
self.client = client
class ConnectionClosed(Exception):
def __init__(self):
super().__init__("Closing connection in middle of message.")
class Client:
"""
:ivar addr: IPv4 or IPv6 address of the client.
:type addr: str
:ivar port: The port the client opened it's socket on.
:type port: int
:ivar writer: The writer used for writing frames to the client.
:type writer: WebSocketWriter
"""
def __init__(self, state, addr, port, writer, loop):
self.last_message = time.time()
self.state = state
self.addr = addr
self.port = port
self.data_type = DataType.NONE
self.writer = WebSocketWriter(writer, loop)
self._reader = None
self.read_task = None
self.continuation = DataType.NONE
self.server_has_initiated_close = False
self._loop = loop
@self.message
async def on_message(reader):
raise NoCallbackException("No message callback defined.")
@self.ping
async def on_ping(payload, length):
await self.writer.pong(length, payload)
@self.pong
async def on_pong(payload, length):
pass
@self.closed
async def on_closed(code, reason):
pass
def message(self, fn):
"""Decorator for registering the on_message callback.
:param fn: The callback to register.
The callback should be async and take one parameter, a :class:`~websocket.stream.reader.WebSocketReader`
This callback is called when the server receives an valid data frame,
if an exception occurs after the first valid frame e.g. if an text frame
contains invalid utf-8, or if it's an invalid fragmented message, then we
send the exception to the reader with :meth:`~websocket.stream.buffer.Buffer.set_exception`.
>>> @client.message
>>> async def on_message(reader: WebSocketReader):
... print("Got message " + await reader.get())
"""
self.on_message = fn
def ping(self, fn):
"""Decorator for registering the on_ping callback.
:param fn: The callback to register.
If you set this callback you will override the default behaviour of sending pongs back to the client when
receiving pings. If you want to keep this behaviour call :meth:`~websocket.stream.writer.WebSocketWriter.pong`.
The callback should be async and take two parameters, :class:`bytes` payload, and :class:`int` length.
This callback is called when we receive a valid ping from the client.
>>> @client.ping
>>> async def on_ping(payload: bytes, length: int):
... print("Received ping from client")
... await self.writer.pong(length, payload)
"""
self.on_ping = fn
def pong(self, fn):
"""Decorator for registering the on_pong callback.
:param fn: The callback to register.
The callback should be async and take two parameters, :class:`bytes` payload, and :class:`int` length
This callback is called when we receive a valid pong from the client.
>>> @client.pong
>>> async def on_pong(payload: bytes, length: int):
... print("Received pong from client")
"""
self.on_pong = fn
def closed(self, fn):
"""Decorator for registering the on_closed callback.
:param fn: The callback to register.
The callback should be async and take two parameters, :class:`bytes` code of length 2, and :class:`str` reason.
This callback is called when the connection this this client is closing.
>>> @client.closed
>>> async def on_closed(code: bytes, reason: str):
... print("Connection with client is closing for " + reason)
"""
self.on_closed = fn
async def close_with_read(self, reader, code, reason):
close = asyncio.ensure_future(self.close(code, reason), loop=self._loop)
buffer = WebSocketReader(DataType.BINARY, self, self._loop)
length = await buffer.feed(reader)
buffer.done()
logger.debug("1")
data = await buffer.read(length)
logger.debug("2")
await close
return data
async def close(self, code: bytes, reason: str):
if not self.server_has_initiated_close:
asyncio.ensure_future(self.on_closed(code, reason), loop=self._loop)
self.server_has_initiated_close = True
await self.writer.close(code, reason)
# TODO: Kill in 5 secs if client dont respond
async def _read_message(self, reader, fin):
await self._reader.feed(reader)
if fin:
self.continuation = DataType.NONE
self._reader.done()
else:
self.continuation = self._reader.data_type
@staticmethod
def handle_data(kind):
async def handler(self, reader, fin):
if self.continuation != DataType.NONE:
self._reader.set_exception(UnexpectedFrameException(self, kind, DataType.CONTINUATION))
self._reader.done()
await self.close_with_read(reader, Reasons.PROTOCOL_ERROR.value, "expected continuation frame")
return
logger.debug(f"Received {kind.name.lower()} data frame from client {self.addr, self.port}.")
self.type = kind
self._reader = WebSocketReader(kind, self, self._loop)
self._loop.create_task(self.on_message(self._reader))
return await self._read_message(reader, fin)
return handler
async def handle_continuation(self, reader, fin):
if self.continuation == DataType.NONE:
logger.debug("Received unexpected continuation data frame from client "
f"{self.addr, self.port}, expected {self.continuation.name.lower()}.")
await self.close_with_read(reader, Reasons.PROTOCOL_ERROR.value,
f"expected {self.continuation.name.lower()} frame")
return
logger.debug(f"Received continuation frame from client {self.addr, self.port}.")
await self._read_message(reader, fin)
def ensure_clean_close(self):
if self.continuation != DataType.NONE:
self._reader.set_exception(ConnectionClosed())
self._reader.done()
@staticmethod
def handle_ping_or_pong(kind):
async def handler(self, reader, fin):
buffer = WebSocketReader(DataType.BINARY, self, self._loop)
feed = asyncio.ensure_future(buffer.feed_once(reader), loop=self._loop)
if not fin or self.server_has_initiated_close:
if not fin:
logger.warning(f"Received fragmented {kind.name.lower()} from client {self.addr, self.port}.")
self.ensure_clean_close()
await self.close(Reasons.PROTOCOL_ERROR.value, "fragmented control frame")
else:
logger.warning(f"Received {kind.name.lower()} from client {self.addr, self.port} after server "
"initiated close.")
self.ensure_clean_close()
await self.close(Reasons.POLICY_VIOLATION.value, "control frame after close")
await feed
return
length = await feed
if length > 125:
logger.warning(f"{kind.name.lower()} payload too long({length} bytes).")
self.ensure_clean_close()
await self.close(Reasons.PROTOCOL_ERROR.value, "control frame too long")
return
logger.debug(f"Received {kind.name.lower()} from client {self.addr, self.port}.")
data = await buffer.read(length)
if kind is DataType.PING:
self._loop.create_task(self.on_ping(data, length))
elif kind is DataType.PONG:
self._loop.create_task(self.on_pong(data, length))
buffer.done()
return handler
async def handle_close(self, reader, fin):
logger.debug(f"Received close from client {self.addr, self.port}.")
buffer = WebSocketReader(DataType.BINARY, self, self._loop)
length = await buffer.feed_once(reader)
reason = await buffer.read(length)
if not self.server_has_initiated_close:
if length > WebSocketWriter.MAX_LEN_7:
code, reason = Reasons.PROTOCOL_ERROR.value, "control frame too long"
else:
code, reason = Reason.from_bytes(reason, length)
if code == Reasons.NO_STATUS.value:
code = Reasons.NORMAL.value
self.ensure_clean_close()
await self.close(code, reason)
self.state = State.CLOSING
if self.read_task is not None:
self.read_task.cancel()
async def handle_undefined(self, reader, fin):
logger.debug(f"Received invalid opcode from client {self.addr, self.port}.")
await self.close_with_read(reader, Reasons.PROTOCOL_ERROR.value, "invalid opcode")
def tick(self):
self.last_message = time.time()
HANDLERS = {opcode: Client.handle_undefined for opcode in range(0, 1 << 4)}
HANDLERS.update({
DataType.CONTINUATION.value: Client.handle_continuation,
DataType.TEXT.value: Client.handle_data(DataType.TEXT),
DataType.BINARY.value: Client.handle_data(DataType.BINARY),
DataType.CLOSE.value: Client.handle_close,
DataType.PING.value: Client.handle_ping_or_pong(DataType.PING),
DataType.PONG.value: Client.handle_ping_or_pong(DataType.PONG),
})
| python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_log_viewer.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(746, 628)
self.horizontalLayout = QtWidgets.QHBoxLayout(Dialog)
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollArea = QtWidgets.QScrollArea(Dialog)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 726, 608))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.textBrowser = QtWidgets.QTextBrowser(self.scrollAreaWidgetContents)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout.addWidget(self.textBrowser)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButtonClose = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButtonClose.setObjectName("pushButtonClose")
self.horizontalLayout_2.addWidget(self.pushButtonClose)
self.pushButtonCopyText = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButtonCopyText.setObjectName("pushButtonCopyText")
self.horizontalLayout_2.addWidget(self.pushButtonCopyText)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.pushButtonClearLog = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButtonClearLog.setObjectName("pushButtonClearLog")
self.horizontalLayout_2.addWidget(self.pushButtonClearLog)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollArea)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButtonClose.setText(_translate("Dialog", "close"))
self.pushButtonCopyText.setText(_translate("Dialog", "copy text"))
self.pushButtonClearLog.setText(_translate("Dialog", "clear log"))
| python |
import requests
import json
import re
ig_url = 'https://instagram.com'
ig_username = 'thephotoadventure'
query_url = f'{ig_url}/graphql/query'
all_user_posts = []
r = requests.get(f'{ig_url}/{ig_username}/?__a=1')
all_data = r.json()
user_data = all_data['graphql']['user']
user_posts = user_data['edge_owner_to_timeline_media']
end_cursor = user_posts['page_info']['end_cursor']
has_next = user_posts['page_info']['has_next_page']
user_id = user_data['id']
all_user_posts.extend(user_posts['edges'])
if has_next is True:
r = requests.get(f'{ig_url}/{ig_username}')
js_file_posts = re.search(r'/static/bundles/(metro|es6)/ProfilePageContainer.js/\w+.js', r.text)
js_file_comments = re.search(r'/static/bundles/(metro|es6)/Consumer.js/\w+.js', r.text)
r = requests.get(f'{ig_url}{js_file_posts.group()}')
query_hash_posts = re.search(
r'profilePosts.byUserId.get\(n\)\)\|\|void 0===\w\?void 0:\w.pagination},queryId:\"(?P<queryId>\w+)\"',
r.text)
r = requests.get(f'{ig_url}{js_file_comments.group()}')
query_hash_comments = re.search(
r'actionHandler:.*Object.defineProperty\(e,\'__esModule\',{value:!0}\);(const|var) \w=\"(?P<queryId>\w+)\"',
r.text)
while end_cursor is not None or has_next is True:
# Get posts and pagination for loading more
r = requests.get(query_url, params={'query_hash': query_hash_posts.group('queryId'),
'id': user_id,
'first': 100,
'after': end_cursor
}
)
user_data = r.json()['data']['user']
user_posts = user_data['edge_owner_to_timeline_media']
end_cursor = user_posts['page_info']['end_cursor']
has_next = user_posts['page_info']['has_next_page']
all_user_posts.extend(user_posts['edges'])
# print(json.dumps(r.json(), indent=4))
# break
# # Get newest post and pull details with comments
# newest_post = user_posts['edges'][0]
# if newest_post:
# r = requests.get(query_url, params={'query_hash': query_hash_comments.group('queryId'),
# 'shortcode': newest_post['node']['shortcode'],
# 'child_comment_count': 3,
# 'fetch_comment_count': 40,
# }
# )
# print(json.dumps(r.json(), indent=4))
all_data['graphql']['user']['edge_owner_to_timeline_media']['edges'] = all_user_posts
with open(f'user_profile_data_{ig_username}.json', 'w') as f:
json.dump(all_data, f)
| python |
#!/usr/bin/env python3
import os
import sys
if __name__ == '__main__':
section, foil, cap = None, None, 9999999
if len(sys.argv) == 3:
section, foil = sys.argv[2], sys.argv[1]
elif len(sys.argv) == 4:
section, foil, cap = sys.argv[2], sys.argv[1], int(sys.argv[3])
else: # len(sys.argv) != 3 or len(sys.argv) != 4:
print('Please enter valid arguments.')
sys.exit()
with open(foil, 'r') as file:
wordCount = 0
active = False
for line in file.readlines():
line = line.strip()
if active:
real = not line.startswith('%') and line != '' and line != ' '
if line == '}' or line == '} \\label{abstract}':
active = False
elif real:
tempWords = []
for word in line.split(' '):
if not word.startswith('\\'):
tempWords += [word]
wordCount += len(tempWords)
#print(str(line) + ': ' + str(len(tempWords)))
if not active and line.startswith('\\' + str(section) + '{'):
active = True
print('Word Count for: ' + str(section) + ':' +
str(os.path.basename(foil)) + ' = ' + str(wordCount) + ' words')
if wordCount > cap:
print('Word Count Exceeded max length of :' + str(cap) + ' by ' +
str(wordCount - cap) + ' words')
sys.exit(23)
| python |
import sys
if len(sys.argv) != 3:
sys.exit("Wrong argument. getSeq.py <.fasta> <seqID>")
targetid = str(sys.argv[2])
# Flag
seq2print = False
with open(sys.argv[1], "r") as f:
for line in f:
if not seq2print:
if line.startswith(">"):
#print(line.lstrip(">"))
if line.rstrip().lstrip(">") == targetid:
print(line.rstrip())
seq2print = True
continue
else:
continue
else:
continue
else: # seq2print == Ture
if not line.startswith(">"):
print(line.rstrip())
else:
break
| python |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import mcl.object.MclTime
PARAMS_QUERY_TYPE_ALL = 0
PARAMS_QUERY_TYPE_IP_ONLY = 1
PARAMS_QUERY_TYPE_TCP_ONLY = 2
PARAMS_QUERY_TYPE_UDP_ONLY = 3
PARAMS_QUERY_TYPE_PIPES_ONLY = 4
class Params:
def __init__(self):
self.__dict__['monitor'] = False
self.__dict__['delay'] = mcl.object.MclTime.MclTime()
self.__dict__['queryType'] = PARAMS_QUERY_TYPE_IP_ONLY
self.__dict__['maximum'] = 1000
def __getattr__(self, name):
if name == 'monitor':
return self.__dict__['monitor']
if name == 'delay':
return self.__dict__['delay']
if name == 'queryType':
return self.__dict__['queryType']
if name == 'maximum':
return self.__dict__['maximum']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'monitor':
self.__dict__['monitor'] = value
elif name == 'delay':
self.__dict__['delay'] = value
elif name == 'queryType':
self.__dict__['queryType'] = value
elif name == 'maximum':
self.__dict__['maximum'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddBool(MSG_KEY_PARAMS_MONITOR, self.__dict__['monitor'])
submsg.AddTime(MSG_KEY_PARAMS_DELAY, self.__dict__['delay'])
submsg.AddU8(MSG_KEY_PARAMS_QUERY_TYPE, self.__dict__['queryType'])
submsg.AddU32(MSG_KEY_PARAMS_MAXIMUM, self.__dict__['maximum'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
try:
self.__dict__['monitor'] = submsg.FindBool(MSG_KEY_PARAMS_MONITOR)
except:
pass
try:
self.__dict__['delay'] = submsg.FindTime(MSG_KEY_PARAMS_DELAY)
except:
pass
try:
self.__dict__['queryType'] = submsg.FindU8(MSG_KEY_PARAMS_QUERY_TYPE)
except:
pass
try:
self.__dict__['maximum'] = submsg.FindU32(MSG_KEY_PARAMS_MAXIMUM)
except:
pass | python |
from typing import Any, Dict
from . import State
from app import app
from models import User
class AssetState(State[User]):
def __init__(self) -> None:
super().__init__()
self.pending_file_upload_cache: Dict[str, Any] = {}
def get_user(self, sid: str) -> User:
return self._sid_map[sid]
asset_state = AssetState()
app["state"]["asset"] = asset_state
| python |
from .dijkstras_algorithm import DijkstraNode, DijkstraEdge, DijkstraGraph
from .a_star import AStarNode, AStarEdge, AStarGraph
from .custom_dijkstras_algorithm import CDijkstraNode, CDijkstraEdge, CDijkstraGraph | python |
from .test_case import TestCase
from infi.unittest.parameters import iterate
class IsolatedPythonVersion(TestCase):
def test(self):
with self.temporary_directory_context():
self.projector("repository init a.b.c none short long")
self.projector("isolated-python python-version get")
self.projector("isolated-python python-version set v2.7.5.5 --commit-changes")
| python |
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import xml
from xml.dom.minidom import parse, parseString
def send_email(to, server, subj, body, attachments):
"""Send an email with the given information.
Args:
to: a String, the email address to send the email to
server: a String, the mail server to send from
subj: a String, the subject line of the message
body: a String, the body of the message
attachments: a listof_pathto_File, the attachements to include
"""
msg = MIMEMultipart()
msg['Subject'] = subj
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = 'AutopsyTest'
msg['To'] = to
msg.preamble = 'This is a test'
container = MIMEText(body, 'plain')
msg.attach(container)
Build_email(msg, attachments)
s = smtplib.SMTP(server)
try:
print('Sending Email')
s.sendmail(msg['From'], msg['To'], msg.as_string())
except Exception as e:
print(str(e))
s.quit()
def Build_email(msg, attachments):
for file in attachments:
part = MIMEBase('application', "octet-stream")
atach = open(file, "rb")
attch = atach.read()
noml = file.split("\\")
nom = noml[len(noml)-1]
part.set_payload(attch)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="' + nom + '"')
msg.attach(part)
| python |
#!/usr/bin/python
import time,serial,math,sys,numpy as np,matplotlib.pyplot as plt
print '*** Graf periode pulzarja - 11.05.2017 ***'
povpstolp=20 #stevilo povprecenj stolpcev (integer)
perioda=7145.117 #perioda pulzarja v stevilu vzorcev (float)
odmik=1000 #odmik zacetka (integer) 0<odmik=<perioda
zacetek=8000 #zacetek<konec period povprecenja (integer)
konec=15000 #konec period povprecenja (integer)
print 'Zacetek racunanja ',time.ctime() #zabelezi zacetek racunanja
k=0.0 #kazalec v polju (float)
v=0 #kazalec vzorcev (integer)
p=0 #kazalec stolpcev (integer)
m=0 #stevec povprecenj (integer)
z=0 #stevilo znakov izvornega zapisa (integer) samo informativno
dolzina=konec-zacetek #stevilo period dolzine povprecenja
sirina=int(perioda/povpstolp) #sirina slike kot stevilo stolpcev
A=np.zeros([sirina],dtype=float) #naredim prazno polje periode
datoteka=str(sys.argv[1]) #prebere argument programa:program.py spremenljivka
f1=open(datoteka, 'r') #odpri izvorno datoteko
string=f1.read(odmik) #precitamo v prazno <odmik> bajtov
z=z+odmik
si=int(perioda) #precitaj v prazno <zacetek> povprecnih period
i=zacetek
while i>0: #celi del periode v prazno
string=f1.read(si)
z=z+si
i=i-1
i=int(zacetek*(perioda-si)) #in se ostanek periode v prazno
string=f1.read(i)
z=z+i
print 'Izvor ',datoteka
print 'Stevilo povprecenj stolpcev ',povpstolp
print 'Perioda pulzarja ',perioda,' vzorcev'
print 'Odmik zacetka ',odmik,' vzorcev'
print 'Povprecenje ',zacetek,' ... ',konec,' period'
print 'Sirina grafa ',sirina,' tock'
while string!="": #zanko ponavljam, dokler ne pridem do praznega znaka
string=f1.read(1)
z=z+1
if string!="": #konec izvornega zapisa?
if p<sirina: #odstranim zadnji neuporaben stolpec?
A[p]=A[p]+float(ord(string)) #dodam vhodno vrednost v povprecje
v=v+1
if v>=povpstolp:
v=0
p=p+1
k=k+1
if k>=perioda: #ena cela perioda pulzarja?
v=0
p=0
k=k-perioda
print m,' period ',z/1024, ' kByte',chr(13),
m=m+1
if m>=dolzina: #konec povprecenja?
string=""
f1.close() #zapri izvorni zapis
A=(A-(np.sum(A)/float(sirina)))/float(dolzina) #normalizacija rezultata
print chr(10),'Konec racunanja ',time.ctime() #konec obdelave datoteke
spik=np.amax(A) #izracunaj sirino impulza
mspik=np.argmax(A)
meja=spik/2.0 #izbrana meja za sirino
w=0.0
varna=sirina-1 #varna meja racunanja !!!
if mspik>1 and mspik<varna-1:
p=mspik #dodaj sirino pred max
while p>1 and A[p-1]>meja:
w=w+1.0
p=p-1
if p>0:
w=w+(A[p]-meja)/(A[p]-A[p-1])
p=mspik #dodaj sirino za max
while p<varna-1 and A[p+1]>meja:
w=w+1.0
p=p+1
if p<varna:
w=w+(A[p]-meja)/(A[p]-A[p+1])
w=w*float(povpstolp) #preracunaj v stevilo vzorcev
print 'Sirina impulza ',w,' vzorcev'
fig=plt.figure() #spravimo risanje v slikovni zapis
plt.plot([0,sirina],[meja,meja],'y-') #narisi polovicno visino spika
plt.plot(A,'b-') #narisi pulz
plt.title('Izvor: '+datoteka+'\nOdmik: '+str(odmik)+' vzorcev @ Perioda: '+str(perioda)+' vzorcev')
plt.xlabel('Povprecenje: '+str(povpstolp)+' vzorcev/stolpec Sirina impulza: '+str(w)+' vzorcev')
plt.ylabel('Povprecenje: '+str(zacetek)+'...'+str(konec)+' period')
fig.savefig(datoteka+'-pulz.png') #narise sliko v datoteko
#konec programa
| python |
from django.contrib import admin
from .models import *
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
class ServiceAdmin(admin.ModelAdmin):
list_display = ['title','status']
class CategoryAdmin(admin.ModelAdmin):
list_display = ['title','parent','slug']
class BrandAdmin(admin.ModelAdmin):
list_display = ['name','status']
class GalleryAdmin(admin.ModelAdmin):
list_display = ['name','category','gallerytype','status']
class ContactAdmin(admin.ModelAdmin):
list_display = ['name','phone','email','subject','status']
class CmsAdmin(admin.ModelAdmin):
list_display = ['title','slug','type','short_desc','status']
admin.site.register(Banner)
admin.site.register(Service, ServiceAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Team)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(Contact, ContactAdmin)
admin.site.register(ContactUs)
admin.site.register(FAQ)
admin.site.register(PrivacyAndPolicy)
admin.site.register(Brand, BrandAdmin)
admin.site.register(Cms, CmsAdmin) | python |
"""
FIFO
Queue = []
Queue = [1,2,3,4] push
[2,3,4] pop
[3,4] pop
[4] pop
[] pop
empty stack
"""
class Queue(object):
def __init__(self):
self.queue = []
self.length = 0
def enque(self, data):
self.queue.append(data)
self.length += 1
def deque(self):
if self.length < 1:
return None
data = self.queue[0]
self.queue = self.queue[1:self.length + 1]
self.length -= 1
return data
def main():
new_queue = Queue()
new_queue.enque(1)
new_queue.enque(2)
new_queue.enque(3)
new_queue.enque(4)
print(new_queue.deque()) # 1
print(new_queue.deque()) # 2
print(new_queue.deque()) # 3
print(new_queue.deque()) # 4
print(new_queue.deque()) # None
print(new_queue.deque()) # None
if __name__ == '__main__':
main() | python |
def test_canary():
assert True
| python |
import numpy as np
import random
import sys
from scipy.stats import f
from scipy.stats import norm
param= int(sys.argv[1])
np.random.seed(param)
n=500 # mediciones efectuadas
p=100 # variables medidas
mu=0.0
sigma=1.0
X=np.random.normal(mu,sigma,size=(n,p))
Y=np.random.normal(mu,sigma,size=(n,1))
XT=X.T
YT=Y.T
Inv=np.linalg.inv(np.matmul(XT,X))
beta1=np.matmul(Inv,XT)
beta=np.matmul(beta1,Y)
Hhat=np.matmul(X,beta1)
Yideal=np.matmul(X,beta)
SST1=np.matmul(np.identity(n)-(1.0/n)*np.ones((n,n)),Y)
SST=np.matmul(YT,SST1)
SSR1=np.matmul(Hhat-(1.0/n)*np.ones((n,n)),Y)
SSR=np.matmul(YT,SSR1)
SSE1=np.matmul(np.identity(n)-Hhat,Y)
SSE=np.matmul(YT,SSE1)
Rsq=SSR[0,0]/SST[0,0]
sigma2=SSE[0,0]/(n-1.)
sigmamatrix=sigma2*Inv
sigma_i=np.zeros(p)
for i in range(p):
sigma_i[i]=sigmamatrix[i,i]
sigma_i=np.sqrt(sigma_i)
MSE=SSE[0,0]/(n-p-1)
# Calculamos el MSR
MSR=SSR[0,0]/p
# Calculamos el MST
MST=SST[0,0]/(n-1)
F=(Rsq*(n-p-1))/((1-Rsq)*p)
Rango=0.9 # se define un rango, es decir cuanto porcentaje de la curva se quiere
Ftest=f.ppf(Rango,p,n-(p+1))
P_i=np.zeros(p)
if F > Ftest:
tzeros=beta[:,0]/sigma_i
P_value=2*(1-norm.cdf(tzeros)) # se integran las colas
for i in range(p):
if P_value[i]<0.5:
P_i[i]=1
else:
P_i[i]=0
else:
quit()
p_prime=np.sum(P_i)
X_new=np.zeros((n,int(p_prime)))
aux=0
for i in range(p):
if P_i[i]==1:
X_new[:,aux]=X[:,i]
aux+=1
p=X_new.shape[1]
X=X_new
XT=X.T
YT=Y.T
Inv=np.linalg.inv(np.matmul(XT,X))
beta1=np.matmul(Inv,XT)
beta=np.matmul(beta1,Y)
Hhat=np.matmul(X,beta1)
Yideal=np.matmul(X,beta)
SST1=np.matmul(np.identity(n)-(1.0/n)*np.ones((n,n)),Y)
SST=np.matmul(YT,SST1)
SSR1=np.matmul(Hhat-(1.0/n)*np.ones((n,n)),Y)
SSR=np.matmul(YT,SSR1)
SSE1=np.matmul(np.identity(n)-Hhat,Y)
SSE=np.matmul(YT,SSE1)
Rnuevo= SSR[0,0]/SST[0,0]
Fnuevo= (Rnuevo*(n-p-1))/((1-Rnuevo)*p)
print(str(Rsq), str(F), str(Rnuevo), str(Fnuevo))
| python |
# Permission mixins to override default django-guardian behaviour
from guardian.mixins import PermissionRequiredMixin
class SetChildPermissionObjectMixin:
"""
Sets child object as the focus of the permission check in the view.
"""
def get_permission_object(self):
return self.child
class PermissionRequired403Mixin(PermissionRequiredMixin):
"""
Basic PermissionRequired mixin to use in views.
Forces 403 http error on failed permission check.
"""
return_403 = True
class PermissionRequired403GlobalMixin(PermissionRequiredMixin):
"""
Basic Global PermissionRequired mixin to use in views.
Forces 403 http error on failed permission check. Disables permission object (only global check is made for User
instance)
"""
return_403 = True
accept_global_perms = True
permission_object = None
class PermissionRequiredSetChild403Mixin(SetChildPermissionObjectMixin, PermissionRequired403Mixin):
"""
PermissionRequired mixin to be used in views when we have to provide child object as the one for which we want to
check the permission for (i.e. AddSmiley / EditChild where the view object is a Smiley / User but check has to be
made for Child.
"""
pass
| python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Get score by given metric."""
from .ppl_score import ngram_ppl
from .rouge_score import rouge
def get_ppl_score(result):
"""
Calculate Perplexity(PPL) score.
Args:
List[Dict], prediction, each example has 4 keys, "source",
"target", "log_prob" and "length".
Returns:
Float, ppl score.
"""
log_probs = []
total_length = 0
for sample in result:
log_prob = sample['log_prob']
length = sample['length']
log_probs.extend(log_prob)
total_length += length
print(f" | log_prob:{log_prob}")
print(f" | length:{length}")
ppl = ngram_ppl(log_probs, total_length, log_softmax=True)
print(f" | final PPL={ppl}.")
return ppl
def get_rouge_score(result, vocab):
"""
Calculate ROUGE score.
Args:
List[Dict], prediction, each example has 4 keys, "source",
"target", "prediction" and "prediction_prob".
Dictionary, dict instance.
return:
Str, rouge score.
"""
predictions = []
targets = []
for sample in result:
predictions.append(' '.join([vocab[t] for t in sample['prediction']]))
targets.append(' '.join([vocab[t] for t in sample['target']]))
print(f" | source: {' '.join([vocab[t] for t in sample['source']])}")
print(f" | target: {targets[-1]}")
return rouge(predictions, targets)
def get_score(result, vocab=None, metric='rouge'):
"""
Get eval score.
Args:
List[Dict], prediction.
Dictionary, dict instance.
Str, metric function, default is rouge.
Return:
Str, Score.
"""
score = None
if metric == 'rouge':
score = get_rouge_score(result, vocab)
elif metric == 'ppl':
score = get_ppl_score(result)
else:
print(f" |metric not in (rouge, ppl)")
return score
| python |
import pandas as pd
import numpy as np
from ttk.corpus.CategorizedDatedCorpusReader import CategorizedDatedCorpusReader
class CategorizedDatedCorpusReporter(object):
""" Reporting utility for CategorizedDatedCorpusReporter corpora. """
def __init__(self):
self._output_formats = ['list', 'str', 'dataframe']
def summary(self, corpus, categories=None, dates=None, output='str', verbose=False):
if not self._is_valid_output(output, verbose=verbose):
return None
# get summary data
num_categories = len(corpus.categories(categories=categories, dates=dates))
num_dates = len(corpus.dates(categories=categories, dates=dates))
num_uniq_words = len(set(corpus.words(categories=categories, dates=dates)))
num_sents = len(corpus.sents(categories=categories, dates=dates))
num_words = len(corpus.words(categories=categories, dates=dates))
num_files = len(corpus.fileids(categories=categories, dates=dates))
# store in dict for consistency
summary = {
'categories':num_categories,
'dates':num_dates,
'sentences':num_sents,
'words':num_words,
'uniq_words':num_uniq_words,
'files':num_files,
}
# convert to output
if output == 'str' or output == 'list':
summary = self._get_summary_formatted_list(summary)
if output == 'str':
summary = '\n'.join(summary)
elif output == 'dataframe':
summary = pd.DataFrame([summary])
else:
print ('Output mode %s is not supported by %s, use one of the following:\n%s'
% (output, 'summary', self._output_formats))
return None
return summary
def date_summary(self, corpus, categories=None, dates=None, output='str', display_zeros=True, verbose=False):
if not self._is_valid_output(output, verbose=verbose):
return None
# generate a list of summary dictionaries
summaries = (s for s in self._iter_date_summaries(
corpus, dates=dates, categories=categories, display_zeros=display_zeros, verbose=verbose))
# convert to output type
if output == 'str':
summaries = self._get_formatted_date_summary_string(summaries)
elif output == 'dataframe':
summaries = pd.DataFrame(summaries)
elif output == 'list':
summaries = list(summaries)
else:
print ('Output mode %s is not supported by %s, use one of the following:\n%s'
% (output, 'date_summary', self._output_formats))
return None
return summaries
def category_summary(self, corpus, categories=None, dates=None, output='str', display_zeros=True, verbose=False):
if not self._is_valid_output(output, verbose=verbose):
return None
# generate category summaries
summaries = (s for s in self._iter_category_summaries(corpus,
categories=categories,
dates=dates,
display_zeros=display_zeros,
verbose=verbose))
# convert to output type
if output == 'str':
summaries = self._get_formatted_category_summary_string(summaries)
elif output == 'dataframe':
summaries = pd.DataFrame(summaries)
elif output == 'list':
summaries = list(summaries)
else:
print ('Output mode %s is not supported by %s, use one of the following:\n%s'
% (output, 'category_summary', self._output_formats))
return None
return summaries
def sample(self, corpus, categories=None, dates=None):
pass
def to_data_frame(self, corpus, categories=None, dates=None, content_scope='sents', verbose=False):
return corpus.to_data_frame(categories=categories, dates=dates, content_scope=content_scope, verbose=verbose)
"""
Iterators
"""
def _iter_date_summaries(self, corpus, dates=None, categories=None, display_zeros=True, verbose=False):
# don't filter categories to display dates with 0 records
if display_zeros:
cat_filter = None
else:
cat_filter = categories
for date in corpus.dates(dates=dates, categories=cat_filter):
# get date summary data
words = corpus.words(categories=categories, dates=[date])
num_words = len(words)
num_uniq_words = len(set(words))
num_categories = len(corpus.categories(categories=categories, dates=[date]))
num_sents = len(corpus.sents(categories=categories, dates=[date]))
num_files = len(corpus.fileids(categories=categories, dates=[date]))
# yield dictionary of summary data
summary = {'date':date,
'categories':num_categories,
'sentences':num_sents,
'words':num_words,
'uniq_words':num_uniq_words,
'files':num_files,
}
yield summary
def _iter_category_summaries(self, corpus, categories=None, dates=None, display_zeros=True, verbose=False):
# don't filter dates to display categories with 0 records
if display_zeros:
date_filter = None
else:
date_filter = dates
for cat in corpus.categories(categories=categories, dates=date_filter):
# get category summary data
words = corpus.words(categories=[cat], dates=dates)
num_words = len(words)
num_uniq_words = len(set(words))
num_date = len(corpus.dates(categories=[cat], dates=dates))
num_sents = len(corpus.sents(categories=[cat], dates=dates))
num_files = len(corpus.fileids(categories=[cat], dates=dates))
# yield dictionary of summary data
summary = {'category':cat,
'dates':num_date,
'sentences':num_sents,
'words':num_words,
'uniq_words':num_uniq_words,
'files':num_files,
}
yield summary
"""
Formatting
"""
def _get_summary_formatted_list(self, summary):
formatted = []
formatted.append('Summary for %i categories and %i dates'
% (summary['categories'], summary['dates']))
formatted.append('{:8} sentences'.format(summary['sentences']))
formatted.append('{:8} total words'.format(summary['words']))
formatted.append('{:8} unique words'.format(summary['uniq_words']))
formatted.append('{:8} files'.format(summary['files']))
return formatted
def _get_formatted_date_summary_string(self, summaries):
formatted = []
for s in summaries:
date_summary = str(
'{}: {:2} categories {:4} sentences {:5} words {:5} unique words {:3} files'
.format(s['date'], s['categories'], s['sentences'], s['words'], s['uniq_words'], s['files']))
formatted.append(date_summary)
summaries = '\n'.join(formatted)
return summaries
def _get_formatted_category_summary_string(self, summaries):
formatted = []
for s in summaries:
category_summary = str(
"{:20} {:3} dates {:6} sentences {:7} words {:6} unique words {:3} files"
.format(s['category'], s['dates'], s['sentences'], s['words'], s['uniq_words'], s['files']))
formatted.append(category_summary)
return '\n'.join(formatted)
"""
Private helpers
"""
def _is_valid_output(self, output, verbose=False):
if output in self._output_formats:
return True
else:
print ('Output mode %s is not supported, use one of the following:\n%s'
% (output, self._output_formats))
return False | python |
import os
import math
import sys
import datetime
import re
import numpy as np
import traceback
import pprint
import json
from rafiki.model import BaseModel, InvalidModelParamsException, test_model_class
from rafiki.constants import TaskType
# Min numeric value
MIN_VALUE = -9999999999
class BigramHmm(BaseModel):
'''
Implements Bigram Hidden Markov Model (HMM) for POS tagging
'''
def get_knob_config(self):
return {
'knobs': {}
}
def init(self, knobs):
pass
def train(self, dataset_uri):
dataset = self.utils.load_dataset_of_corpus(dataset_uri)
(sents_tokens, sents_tags) = zip(*[zip(*sent) for sent in dataset])
self._num_tags = dataset.tag_num_classes[0]
(self._trans_probs, self._emiss_probs) = self._compute_probs(self._num_tags, sents_tokens, sents_tags)
self.utils.log('No. of tags: {}'.format(self._num_tags))
def evaluate(self, dataset_uri):
dataset = self.utils.load_dataset_of_corpus(dataset_uri)
(sents_tokens, sents_tags) = zip(*[zip(*sent) for sent in dataset])
(sents_pred_tags) = self._tag_sents(self._num_tags, sents_tokens, self._trans_probs, self._emiss_probs)
acc = self._compute_accuracy(sents_tags, sents_pred_tags)
return acc
def predict(self, queries):
sents_tokens = queries
(sents_tags) = self._tag_sents(self._num_tags, sents_tokens, self._trans_probs, self._emiss_probs)
return sents_tags
def destroy(self):
pass
def dump_parameters(self):
params = {}
params['emiss_probs'] = self._emiss_probs
params['trans_probs'] = self._trans_probs
params['num_tags'] = self._num_tags
return params
def load_parameters(self, params):
self._emiss_probs = params['emiss_probs']
self._trans_probs = params['trans_probs']
self._num_tags = params['num_tags']
def _compute_accuracy(self, sents_tags, sents_pred_tags):
total = 0
correct = 0
for (tags, pred_tags) in zip(sents_tags, sents_pred_tags):
for (tag, pred_tag) in zip(tags, pred_tags):
total += 1
if tag == pred_tag: correct += 1
return correct / total
def _compute_probs(self, num_tags, sents_tokens, sents_tags):
# Total number of states in HMM as tags
T = num_tags + 2 # Last 2 for START & END tags
START = num_tags # <s>
END = num_tags + 1 # </s>
# Unigram (tag i) counts
uni_counts = [0 for i in range(T)]
# Bigram (tag i, tag j) counts
bi_counts = [[0 for j in range(T)] for i in range(T)]
# Counts for (tag i, word w) as [{ w -> count }]
word_counts = [{} for i in range(T)]
# For each sentence
for (tokens, tags) in zip(sents_tokens, sents_tags):
uni_counts[START] += 1
# Run through sentence and update counts
prev_tag = None
for (word, tag) in zip(tokens, tags):
if prev_tag is None:
bi_counts[START][tag] += 1
else:
bi_counts[prev_tag][tag] += 1
uni_counts[tag] += 1
word_counts[tag][word] = word_counts[tag].get(word, 0) + 1
prev_tag = tag
uni_counts[END] += 1
# Account for last bigram with </s>
if len(tokens) > 0:
last_tag = tags[-1]
bi_counts[last_tag][END] += 1
# Transition function (tag i, tag j) -> <log prob of transition from state i to j>
trans_probs = [[0 for j in range(T)] for i in range(T)]
for i in range(T):
for j in range(T):
if bi_counts[i][j] == 0:
trans_probs[i][j] = MIN_VALUE
else:
trans_probs[i][j] = math.log(bi_counts[i][j] / uni_counts[i])
# Emission function as (tag i, word w) -> <log prob of emitting word w at state i>
emiss_probs = [{} for i in range(T)]
for i in range(T):
for w in word_counts[i]:
emiss_probs[i][w] = math.log(word_counts[i][w] / uni_counts[i])
return (trans_probs, emiss_probs)
def _tag_sents(self, num_tags, sents_tokens, trans_probs, emiss_probs):
sents_tags = []
T = num_tags + 2 # Last 2 for START & END tags
START = num_tags # <s>
END = num_tags + 1 # </s>
for tokens in sents_tokens:
if len(tokens) == 0:
continue
# Maximum log probabilities for sentence up to word w, where the last word's tag is i
log_probs = [[None for i in range(T)] for w in range(len(tokens))]
# Backpointers to previous best tags for log probabilities
backpointers = [[None for i in log_probs[0]] for w in log_probs]
# Process 1st word that is conditioned on <s>
for i in range(T):
trans = trans_probs[START][i]
emiss = emiss_probs[i].get(tokens[0], MIN_VALUE)
log_probs[0][i] = trans + emiss
# For each word w after the 1st word
for w in range(1, len(tokens)):
# For each tag i
for i in range(T):
# For each prev tag j
for j in range(T):
# Compute probability for (tag j, tag i) for sentence up to word w
trans = trans_probs[j][i]
emiss = emiss_probs[i].get(tokens[w], MIN_VALUE)
prob = log_probs[w - 1][j] + trans + emiss
if log_probs[w][i] is None or prob > log_probs[w][i]:
log_probs[w][i] = prob
backpointers[w][i] = j
# Compare probabilities with </s> across all tags of last word
backpointer = None
best_prob = None
for i in range(T):
trans = trans_probs[i][END]
prob = log_probs[-1][i] + trans
if best_prob is None or prob > best_prob:
best_prob = prob
backpointer = i
# Traverse backpointers to get most probable tags
cur = backpointer
w = len(tokens) - 1
sent_tags = []
while cur is not None:
sent_tags.append(cur)
cur = backpointers[w][cur]
w -= 1
sent_tags.reverse()
sents_tags.append(sent_tags)
return sents_tags
if __name__ == '__main__':
test_model_class(
model_file_path=__file__,
model_class='BigramHmm',
task=TaskType.POS_TAGGING,
dependencies={},
train_dataset_uri='data/ptb_for_pos_tagging_train.zip',
test_dataset_uri='data/ptb_for_pos_tagging_test.zip',
queries=[
['Ms.', 'Haag', 'plays', 'Elianti', '18', '.'],
['The', 'luxury', 'auto', 'maker', 'last', 'year', 'sold', '1,214', 'cars', 'in', 'the', 'U.S.']
]
)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 26 09:03:17 2022
@author: apauron
"""
import os
import get_files_cluster
import pandas as pd
from numpy import genfromtxt
### Get the parent folder of the working directory. Change it if you modify the name of the folders
path_parent = os.path.dirname(os.getcwd())
path_SB1 = os.path.join(path_parent,"Results_SB1_intra") #location of SB1 intrachromosomal results to convert
folder_results = "Results_Intra"
path_SB3 = os.path.join(path_parent,folder_results) #location of SB3 intrachromosomal results to convert
list_chr = os.listdir(os.path.join(path_parent,folder_results,"HUVEC","25kb_resolution_intrachromosomal")) ## All the chromosomes
###################################################Convert SB1 results to SB3 results##########################################
def SB1toSB3(path):
"""
A pipeline to convert SB1 generated compartments files into SB3 format.
Keyword arguments :
path -- the path containing the folder in which there are the files containing SB1 results
Returns :
all the converted SB1 results in SB3 format in the "SB1_converted_SB3" folder
"""
filestoconvert = get_files_cluster.getfiles(path,"") #get all the files in the path
for file in filestoconvert :
cell_type = file.split("/")[-1].split("_")[0]
for resolution in ["25kb","100kb"] :
if resolution in file :
df_file = pd.read_csv(file,sep = ' ',header = None) #get the SB1 file
df_file["chrname"] = df_file[0] + df_file[1].astype(str) #transform chr x to chrx
df_file["comp"] = df_file[4] #get the comp number
df_file = df_file[["chrname","comp"]] #because SB3 type file is only chrname and comp
chr_values = pd.unique(df_file.chrname) #get the chr values
grouped = df_file.groupby(df_file.chrname) #to split according to chr name
for chr in chr_values :
split_df = grouped.get_group(chr)
split_df.comp = split_df.comp.replace([-1.0,0.0],[0.0,-1.0]) ## Change the format of filtered and B compartment bins
if not os.path.exists(os.path.join(path_parent,"SB1_converted_SB3",cell_type,resolution)): #Create folder if not exists
os.makedirs(os.path.join(path_parent,"SB1_converted_SB3",cell_type,resolution))
filename = os.path.join(path_parent,"SB1_converted_SB3",cell_type,resolution,chr + "_" + resolution + "_comp.txt")
split_df.comp.to_csv(filename,header = False, index = False) #create the files corresponding to our metric
###################################################Convert SB3 results to SB1 results##########################################
def SB3toSB1(path):
"""
A pipeline to convert SB3 generated compartments files into SB1 format.
Keyword arguments :
path -- the path containing the folder in which there are the files containing SB1 results
Returns :
all the converted SB3 results in SB1 format in the "SB3_converted_SB1" folder
"""
files_results = get_files_cluster.getfiles(path,"comp") #get files inside the path given
for resolution in ["25kb","100kb"] : ## Because those are intrachromosomal results
for cell_type in os.listdir(os.path.join(path_parent,folder_results)): ## adapt if not all cell types are present
if os.path.isdir(os.path.join(path_parent,folder_results,cell_type)):
list_df = []
for chr in list_chr : ## List all the chromosomes
for file_results in files_results :
# find the good corresponding file to chr,cell_type and results
if chr in file_results and cell_type in file_results and resolution in file_results :
file_df = pd.DataFrame()
# Transformation into a SB1 type file : chr x start end comp
lresults = genfromtxt(file_results, delimiter='\n')
file_df["comp"] = lresults
file_df["chromosome"] = ["chr" for i in range(len(lresults))]
file_df["chrnum"] = [chr.replace("chr","") for i in range(len(lresults))]
#According to resolution, create the start and end bins
if resolution == "100kb" :
file_df["start"] = [100000.0*x for x in file_df.index.tolist()]
else :
file_df["start"] = [25000.0*x for x in file_df.index.tolist()]
if resolution == "100kb" :
file_df["end"] = [100000.0*(x+1) for x in file_df.index.tolist()]
else :
file_df["end"] = [25000.0*(x+1) for x in file_df.index.tolist()]
#Append to a list the dataframe corresponding to the chromosome
file_df_copy = file_df.copy()
file_df_copy = file_df_copy[["chromosome","chrnum","start","end","comp"]]
file_df_copy.comp[file_df.comp == 0.0] = -1.0
file_df_copy.comp[file_df.comp == -1.0] = 0.0
list_df.append(file_df_copy)
#Concatenate all the dataframes with chromosomes of the same cell type
res_df = pd.concat(list_df)
res_df = res_df.sort_values(by = ["chrnum","start"])
filename = os.path.join(path_parent,"SB3_converted_SB1",cell_type + "_" + resolution + "_COMPARTMENT" )
res_df.to_csv(filename,header = False, index = False, sep = " ")
| python |
# Exercice 3.3 : Nombres premiers
## Question 1
def divise(n : int, p : int) -> bool:
"""Précondition : n > 0 et p >= 0
Renvoie True si et seulement si n divise p.
"""
return p % n == 0
# Jeu de tests
assert divise(1, 4) == True
assert divise(2, 4) == True
assert divise(3, 4) == False
assert divise(4, 4) == True
assert divise(4, 2) == False
assert divise(17, 123) == False
assert divise(17, 357) == True
assert divise(21, 357) == True
## Question 2
## Réponse
### Sans sortie anticipée :
def est_premier(n : int) -> bool:
"""Précondition: n >= 0
renvoie True si et seulement si n est premier.
"""
if n < 2:
return False
else:
# pas de diviseur trouvé ?
b : bool = True
# prochain diviseur potentiel
i : int = 2
while b and (i < n):
if divise(i, n):
b = False
else:
i = i + 1
return b
# Jeu de tests
assert est_premier(0) == False
assert est_premier(1) == False
assert est_premier(2) == True
assert est_premier(17) == True
assert est_premier(357) == False
### Avec sortie anticipée :
def est_premier2(n : int) -> bool:
""" ... cf. ci-dessus ...
"""
if n < 2:
return False
else:
# prochain diviseur potentiel
i : int = 2
while i < n:
if divise(i, n):
return False
else:
i = i + 1
return True
# Jeu de tests
assert est_premier2(0) == False
assert est_premier2(1) == False
assert est_premier2(2) == True
assert est_premier2(17) == True
assert est_premier2(357) == False
| python |
import os
import re
import subprocess
import time
import urllib
import glanceclient
import keystoneauth1
import keystoneauth1.identity.v2 as keystoneauth1_v2
import keystoneauth1.session as keystoneauth1_session
import keystoneclient.v2_0.client as keystoneclient_v2
import keystoneclient.v3.client as keystoneclient_v3
import keystoneclient.auth.identity.v3 as keystone_id_v3
import keystoneclient.session as session
import neutronclient.v2_0.client as neutronclient
import novaclient.client as novaclient_client
import charms_openstack.charm as charm
import charms_openstack.adapters as adapters
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as host
import charmhelpers.fetch as fetch
def install():
"""Use the singleton from the TempestCharm to install the packages on the
unit
"""
TempestCharm.singleton.install()
def render_configs(interfaces_list):
"""Using a list of interfaces, render the configs and, if they have
changes, restart the services on the unit.
"""
if not os.path.isdir(TempestCharm.TEMPEST_LOGDIR):
os.makedirs(TempestCharm.TEMPEST_LOGDIR)
TempestCharm.singleton.render_with_interfaces(interfaces_list)
TempestCharm.singleton.assess_status()
def run_test(tox_target):
"""Use the singleton from the TempestCharm to install the packages on the
unit
"""
TempestCharm.singleton.run_test(tox_target)
def assess_status():
"""Use the singleton from the TempestCharm to install the packages on the
unit
"""
TempestCharm.singleton.assess_status()
class TempestAdminAdapter(adapters.OpenStackRelationAdapter):
"""Inspect relations and provide properties that can be used when
rendering templates"""
interface_type = "identity-admin"
def __init__(self, relation):
"""Initialise a keystone client and collect user defined config"""
self.kc = None
self.keystone_session = None
self.api_version = '2'
super(TempestAdminAdapter, self).__init__(relation)
self.init_keystone_client()
self.uconfig = hookenv.config()
@property
def keystone_info(self):
"""Collection keystone information from keystone relation"""
ks_info = self.relation.credentials()
ks_info['default_credentials_domain_name'] = 'default'
if ks_info.get('api_version'):
ks_info['api_version'] = ks_info.get('api_version')
else:
ks_info['api_version'] = self.api_version
if not ks_info.get('service_user_domain_name'):
ks_info['service_user_domain_name'] = 'admin_domain'
return ks_info
@property
def ks_client(self):
if not self.kc:
self.init_keystone_client()
return self.kc
def keystone_auth_url(self, api_version=None):
if not api_version:
api_version = self.keystone_info.get('api_version', '2')
ep_suffix = {
'2': 'v2.0',
'3': 'v3'}[api_version]
return '{}://{}:{}/{}'.format(
'http',
self.keystone_info['service_hostname'],
self.keystone_info['service_port'],
ep_suffix,
)
def resolve_endpoint(self, service_type, interface):
if self.api_version == '2':
ep = self.ks_client.service_catalog.url_for(
service_type=service_type,
endpoint_type='{}URL'.format(interface)
)
else:
svc_id = self.ks_client.services.find(type=service_type).id
ep = self.ks_client.endpoints.find(
service_id=svc_id,
interface=interface).url
return ep
def set_keystone_v2_client(self):
self.keystone_session = None
self.kc = keystoneclient_v2.Client(**self.admin_creds_v2)
def set_keystone_v3_client(self):
auth = keystone_id_v3.Password(**self.admin_creds_v3)
self.keystone_session = session.Session(auth=auth)
self.kc = keystoneclient_v3.Client(session=self.keystone_session)
def init_keystone_client(self):
"""Initialise keystone client"""
if self.kc:
return
if self.keystone_info.get('api_version', '2') > '2':
self.set_keystone_v3_client()
self.api_version = '3'
else:
# XXX Temporarily catching the Unauthorized exception to deal with
# the case (pre-17.02) where the keystone charm maybe in v3 mode
# without telling charms via the identity-admin relation
try:
self.set_keystone_v2_client()
self.api_version = '2'
except keystoneauth1.exceptions.http.Unauthorized:
self.set_keystone_v3_client()
self.api_version = '3'
self.kc.services.list()
def admin_creds_base(self, api_version):
return {
'username': self.keystone_info['service_username'],
'password': self.keystone_info['service_password'],
'auth_url': self.keystone_auth_url(api_version=api_version)}
@property
def admin_creds_v2(self):
creds = self.admin_creds_base(api_version='2')
creds['tenant_name'] = self.keystone_info['service_tenant_name']
creds['region_name'] = self.keystone_info['service_region']
return creds
@property
def admin_creds_v3(self):
creds = self.admin_creds_base(api_version='3')
creds['project_name'] = self.keystone_info.get(
'service_project_name',
'admin')
creds['user_domain_name'] = self.keystone_info.get(
'service_user_domain_name',
'admin_domain')
creds['project_domain_name'] = self.keystone_info.get(
'service_project_domain_name',
'Default')
return creds
@property
def ec2_creds(self):
"""Generate EC2 style tokens or return existing EC2 tokens
@returns {'access_token' token1, 'secret_token': token2}
"""
_ec2creds = {}
if self.api_version == '2':
current_creds = self.ks_client.ec2.list(self.ks_client.user_id)
if current_creds:
_ec2creds = current_creds[0]
else:
creds = self.ks_client.ec2.create(
self.ks_client.user_id,
self.ks_client.tenant_id)
_ec2creds = {
'access_token': creds.access,
'secret_token': creds.secret}
return _ec2creds
@property
def image_info(self):
"""Return image ids for the user-defined image names
@returns {'image_id' id1, 'image_alt_id': id2}
"""
image_info = {}
if self.service_present('glance'):
if self.keystone_session:
glance_client = glanceclient.Client(
'2', session=self.keystone_session)
else:
glance_ep = self.resolve_endpoint('image', 'public')
glance_client = glanceclient.Client(
'2', glance_ep, token=self.ks_client.auth_token)
for image in glance_client.images.list():
if self.uconfig.get('glance-image-name') == image.name:
image_info['image_id'] = image.id
if self.uconfig.get('image-ssh-user'):
image_info['image_ssh_user'] = \
self.uconfig.get('image-ssh-user')
if self.uconfig.get('glance-alt-image-name') == image.name:
image_info['image_alt_id'] = image.id
if self.uconfig.get('image-alt-ssh-user'):
image_info['image_alt_ssh_user'] = \
self.uconfig.get('image-alt-ssh-user')
return image_info
@property
def network_info(self):
"""Return public network and router ids for user-defined router and
network names
@returns {'public_network_id' id1, 'router_id': id2}
"""
network_info = {}
if self.service_present('neutron'):
if self.keystone_session:
neutron_client = neutronclient.Client(
session=self.keystone_session)
else:
neutron_ep = self.ks_client.service_catalog.url_for(
service_type='network',
endpoint_type='publicURL')
neutron_client = neutronclient.Client(
endpoint_url=neutron_ep,
token=self.ks_client.auth_token)
routers = neutron_client.list_routers(
name=self.uconfig['router-name'])
if len(routers['routers']) == 0:
hookenv.log("Router not found")
else:
router = routers['routers'][0]
network_info['router_id'] = router['id']
networks = neutron_client.list_networks(
name=self.uconfig['network-name'])
if len(networks['networks']) == 0:
hookenv.log("network not found")
else:
network = networks['networks'][0]
network_info['public_network_id'] = network['id']
networks = neutron_client.list_networks(
name=self.uconfig['floating-network-name'])
if len(networks['networks']) == 0:
hookenv.log("Floating network name not found")
else:
network_info['floating_network_name'] = \
self.uconfig['floating-network-name']
return network_info
def service_present(self, service):
"""Check if a given service type is registered in the catalogue
:params service: string Service type
@returns Boolean: True if service is registered
"""
return service in self.get_present_services()
def get_nova_client(self):
if not self.keystone_session:
auth = keystoneauth1_v2.Password(
auth_url=self.keystone_auth_url(),
username=self.keystone_info['service_username'],
password=self.keystone_info['service_password'],
tenant_name=self.keystone_info['service_tenant_name'])
self.keystone_session = keystoneauth1_session.Session(auth=auth)
return novaclient_client.Client(
2, session=self.keystone_session)
@property
def compute_info(self):
"""Return flavor ids for user-defined flavors
@returns {'flavor_id' id1, 'flavor_alt_id': id2}
"""
compute_info = {}
if self.service_present('nova'):
nova_client = self.get_nova_client()
nova_ep = self.resolve_endpoint('compute', 'public')
url = urllib.parse.urlparse(nova_ep)
compute_info['nova_base'] = '{}://{}'.format(
url.scheme,
url.netloc.split(':')[0])
for flavor in nova_client.flavors.list():
if self.uconfig['flavor-name'] == flavor.name:
compute_info['flavor_id'] = flavor.id
if self.uconfig['flavor-alt-name'] == flavor.name:
compute_info['flavor_alt_id'] = flavor.id
return compute_info
def get_present_services(self):
"""Query keystone catalogue for a list for registered services
@returns [svc1, svc2, ...]: List of registered services
"""
services = [svc.name
for svc in self.ks_client.services.list()
if svc.enabled]
return services
@property
def service_info(self):
"""Assemble a list of services tempest should tests
Compare the list of keystone registered services with the services the
user has requested be tested. If in 'auto' mode test all services
registered in keystone.
@returns [svc1, svc2, ...]: List of services to test
"""
service_info = {}
tempest_candidates = ['ceilometer', 'cinder', 'glance', 'heat',
'horizon', 'ironic', 'neutron', 'nova',
'sahara', 'swift', 'trove', 'zaqar', 'neutron']
present_svcs = self.get_present_services()
# If not running in an action context asssume auto mode
try:
action_args = hookenv.action_get()
except Exception:
action_args = {'service-whitelist': 'auto'}
if action_args['service-whitelist'] == 'auto':
white_list = []
for svc in present_svcs:
if svc in tempest_candidates:
white_list.append(svc)
else:
white_list = action_args['service-whitelist']
for svc in tempest_candidates:
if svc in white_list:
service_info[svc] = 'true'
else:
service_info[svc] = 'false'
return service_info
class TempestAdapters(adapters.OpenStackRelationAdapters):
"""
Adapters class for the Tempest charm.
"""
relation_adapters = {
'identity_admin': TempestAdminAdapter,
}
def __init__(self, relations):
super(TempestAdapters, self).__init__(
relations,
options=TempestConfigurationAdapter)
class TempestConfigurationAdapter(adapters.ConfigurationAdapter):
"""
Manipulate user supplied config as needed
"""
def __init__(self):
super(TempestConfigurationAdapter, self).__init__()
class TempestCharm(charm.OpenStackCharm):
release = 'liberty'
name = 'tempest'
required_relations = ['identity-admin']
"""Directories and files used for running tempest"""
TEMPEST_ROOT = '/var/lib/tempest'
TEMPEST_LOGDIR = TEMPEST_ROOT + '/logs'
TEMPEST_CONF = TEMPEST_ROOT + '/tempest.conf'
"""pip.conf for proxy settings etc"""
PIP_CONF = '/root/.pip/pip.conf'
"""List of packages charm should install
XXX The install hook is currently installing most packages ahead of
this because modules like keystoneclient are needed at load time
"""
packages = [
'git', 'testrepository', 'subunit', 'python-nose', 'python-lxml',
'python-boto', 'python-junitxml', 'python-subunit',
'python-testresources', 'python-oslotest', 'python-stevedore',
'python-cinderclient', 'python-glanceclient', 'python-heatclient',
'python-keystoneclient', 'python-neutronclient', 'python-novaclient',
'python-swiftclient', 'python-ceilometerclient', 'openvswitch-test',
'python3-cinderclient', 'python3-glanceclient', 'python3-heatclient',
'python3-keystoneclient', 'python3-neutronclient',
'python3-novaclient', 'python3-swiftclient',
'python3-ceilometerclient', 'openvswitch-common', 'libffi-dev',
'libssl-dev', 'python-dev', 'python-cffi'
]
"""Use the Tempest specific adapters"""
adapters_class = TempestAdapters
"""Tempest has no running services so no services need restarting on
config file change
"""
restart_map = {
TEMPEST_CONF: [],
PIP_CONF: [],
}
@property
def all_packages(self):
_packages = self.packages[:]
if host.lsb_release()['DISTRIB_RELEASE'] > '14.04':
_packages.append('tox')
else:
_packages.append('python-tox')
return _packages
def setup_directories(self):
for tempest_dir in [self.TEMPEST_ROOT, self.TEMPEST_LOGDIR]:
if not os.path.exists(tempest_dir):
os.mkdir(tempest_dir)
def setup_git(self, branch, git_dir):
"""Clone tempest and symlink in rendered tempest.conf"""
conf = hookenv.config()
if not os.path.exists(git_dir):
git_url = conf['tempest-source']
fetch.install_remote(str(git_url), dest=str(git_dir),
branch=str(branch), depth=str(1))
conf_symlink = git_dir + '/tempest/etc/tempest.conf'
if not os.path.exists(conf_symlink):
os.symlink(self.TEMPEST_CONF, conf_symlink)
def execute_tox(self, run_dir, logfile, tox_target):
"""Trigger tempest run through tox setting proxies if needed"""
env = os.environ.copy()
conf = hookenv.config()
if conf.get('http-proxy'):
env['http_proxy'] = conf['http-proxy']
if conf.get('https-proxy'):
env['https_proxy'] = conf['https-proxy']
cmd = ['tox', '-e', tox_target]
f = open(logfile, "w")
subprocess.call(cmd, cwd=run_dir, stdout=f, stderr=f, env=env)
def get_tempest_files(self, branch_name):
"""Prepare tempest files and directories
@return git_dir, logfile, run_dir
"""
log_time_str = time.strftime("%Y%m%d%H%M%S", time.gmtime())
git_dir = '{}/tempest-{}'.format(self.TEMPEST_ROOT, branch_name)
logfile = '{}/run_{}.log'.format(self.TEMPEST_LOGDIR, log_time_str)
run_dir = '{}/tempest'.format(git_dir)
return git_dir, logfile, run_dir
def parse_tempest_log(self, logfile):
"""Read tempest logfile and return summary as dict
@return dict: Dictonary of summary data
"""
summary = {}
with open(logfile, 'r') as tempest_log:
summary_line = False
for line in tempest_log:
if line.strip() == "Totals":
summary_line = True
if line.strip() == "Worker Balance":
summary_line = False
if summary_line:
# Match lines like: ' - Unexpected Success: 0'
matchObj = re.match(
r'(.*)- (.*?):\s+(.*)', line, re.M | re.I)
if matchObj:
key = matchObj.group(2)
key = key.replace(' ', '-').replace(':', '').lower()
summary[key] = matchObj.group(3)
return summary
def run_test(self, tox_target):
"""Run smoke tests"""
action_args = hookenv.action_get()
branch_name = action_args['branch']
git_dir, logfile, run_dir = self.get_tempest_files(branch_name)
self.setup_directories()
self.setup_git(branch_name, git_dir)
self.execute_tox(run_dir, logfile, tox_target)
action_info = self.parse_tempest_log(logfile)
action_info['tempest-logfile'] = logfile
hookenv.action_set(action_info)
class TempestCharmRocky(TempestCharm):
release = 'rocky'
packages = [
'git', 'testrepository', 'subunit', 'python3-nose', 'python3-lxml',
'python3-boto', 'python3-junitxml', 'python3-subunit',
'python3-testresources', 'python3-oslotest', 'python3-stevedore',
'python3-cinderclient', 'python3-glanceclient', 'python3-heatclient',
'python3-keystoneclient', 'python3-neutronclient',
'python3-novaclient', 'python3-swiftclient',
'python3-ceilometerclient', 'openvswitch-test', 'openvswitch-common',
'libffi-dev', 'libssl-dev', 'python3-dev', 'python3-cffi'
]
purge_packages = [
'python-nose', 'python-lxml', 'python-boto', 'python-junitxml',
'python-subunit', 'python-testresources', 'python-oslotest',
'python-stevedore', 'python-cinderclient', 'python-glanceclient',
'python-heatclient', 'python-keystoneclient', 'python-neutronclient',
'python-novaclient', 'python-swiftclient', 'python-ceilometerclient',
'python-dev', 'python-cffi'
]
python_version = 3
| python |
# -*- coding: utf-8 -*-
from flask import Flask, abort
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from config import basedir, UPLOAD_FOLDER
#from flask.ext.mail import Mail
theapp = Flask(__name__)
theapp.config.from_object('config')
#mail = Mail(theapp)
bootstrap = Bootstrap(theapp)
db = SQLAlchemy(theapp)
from app import views, models
| python |
import Sofa
import SofaPython.Tools
import SofaTest
def createScene(node):
node.createObject('PythonScriptController', filename=__file__, classname='VerifController')
class VerifController(SofaTest.Controller):
def initGraph(self, node):
Sofa.msg_info("initGraph ENTER")
child = node.createChild("temporary_node")
# FROM HERE, 'child' was added to the nodes to init in ScriptEnvironment, but it is not anymore
node.removeChild( child )
# 'child' is no longer in the scene graph but still was in ScriptEnvironment, but it is not anymore
Sofa.msg_info("initGraph EXIT")
# Coming back to SofaPython:
# Nobody is no longer pointing to 'child', it will be deleted (smart pointer).
# ScriptEnvironment was calling 'init' to an invalid pointer or
# at least to a node detached from the scene graph,
# but it does not anymore.
# This could bring tons of potential troubles (including crashes).
def onEndAnimationStep(self, dt):
Sofa.msg_info("onEndAnimationStep")
self.sendSuccess()
| python |
"""
This application demonstrates how to create a Tag Template in Data Catalog,
loading its information from Google Sheets.
"""
import argparse
import logging
import re
import stringcase
import unicodedata
from google.api_core import exceptions
from google.cloud import datacatalog
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client import service_account
_CLOUD_PLATFORM_REGION = 'us-central1'
_CUSTOM_MULTIVALUED_TYPE = 'MULTI'
_DATA_CATALOG_BOOL_TYPE = 'BOOL'
_DATA_CATALOG_ENUM_TYPE = 'ENUM'
_DATA_CATALOG_NATIVE_TYPES = ['BOOL', 'DOUBLE', 'ENUM', 'STRING', 'TIMESTAMP']
_LOOKING_FOR_SHEET_LOG_FORMAT = 'Looking for {} sheet {} | {}...'
class TemplateMaker:
def __init__(self):
self.__sheets_reader = GoogleSheetsReader()
self.__datacatalog_facade = DataCatalogFacade()
def run(self, spreadsheet_id, project_id, template_id, display_name, delete_existing=False):
master_template_fields = self.__sheets_reader.read_master(
spreadsheet_id, stringcase.spinalcase(template_id))
self.__process_native_fields(spreadsheet_id, project_id, template_id, display_name,
master_template_fields, delete_existing)
self.__process_custom_multivalued_fields(spreadsheet_id, project_id, template_id,
display_name, master_template_fields,
delete_existing)
def __process_native_fields(self, spreadsheet_id, project_id, template_id, display_name,
master_template_fields, delete_existing_template):
native_fields = self.__filter_fields_by_types(master_template_fields,
_DATA_CATALOG_NATIVE_TYPES)
StringFormatter.format_elements_to_snakecase(native_fields, 0)
enums_names = {}
for field in native_fields:
if not field[2] == _DATA_CATALOG_ENUM_TYPE:
continue
names_from_sheet = self.__sheets_reader.read_helper(spreadsheet_id,
stringcase.spinalcase(field[0]))
enums_names[field[0]] = [name[0] for name in names_from_sheet]
template_name = datacatalog.DataCatalogClient.tag_template_path(
project_id, _CLOUD_PLATFORM_REGION, template_id)
if delete_existing_template:
self.__datacatalog_facade.delete_tag_template(template_name)
if not self.__datacatalog_facade.tag_template_exists(template_name):
self.__datacatalog_facade.create_tag_template(project_id, template_id, display_name,
native_fields, enums_names)
def __process_custom_multivalued_fields(self, spreadsheet_id, project_id, template_id,
display_name, master_template_fields,
delete_existing_template):
multivalued_fields = self.__filter_fields_by_types(master_template_fields,
[_CUSTOM_MULTIVALUED_TYPE])
StringFormatter.format_elements_to_snakecase(multivalued_fields, 0)
for field in multivalued_fields:
try:
values_from_sheet = self.__sheets_reader.read_helper(
spreadsheet_id, stringcase.spinalcase(field[0]))
fields = [(StringFormatter.format_to_snakecase(value[0]), value[0],
_DATA_CATALOG_BOOL_TYPE) for value in values_from_sheet]
except errors.HttpError as err:
if err.resp.status in [400]:
logging.info('NOT FOUND. Ignoring...')
continue # Ignore creating a new template representing the multivalued field
else:
raise
custom_template_id = f'{template_id}_{field[0]}'
custom_display_name = f'{display_name} - {field[1]}'
template_name = datacatalog.DataCatalogClient.tag_template_path(
project_id, _CLOUD_PLATFORM_REGION, custom_template_id)
if delete_existing_template:
self.__datacatalog_facade.delete_tag_template(template_name)
if not self.__datacatalog_facade.tag_template_exists(template_name):
self.__datacatalog_facade.create_tag_template(project_id, custom_template_id,
custom_display_name, fields)
@classmethod
def __filter_fields_by_types(cls, fields, valid_types):
return [field for field in fields if field[2] in valid_types]
"""
Input reader
========================================
"""
class GoogleSheetsReader:
def __init__(self):
self.__sheets_facade = GoogleSheetsFacade()
def read_master(self, spreadsheet_id, sheet_name, values_per_line=3):
return self.__read(spreadsheet_id, sheet_name, 'master', values_per_line)
def read_helper(self, spreadsheet_id, sheet_name, values_per_line=1):
return self.__read(spreadsheet_id, sheet_name, 'helper', values_per_line)
def __read(self, spreadsheet_id, sheet_name, sheet_type, values_per_line):
"""
Read the requested values from each line and store them into a list.
:param spreadsheet_id: Spreadsheet ID.
:param sheet_name: Sheet name.
:param sheet_type: Sheet type {'master', 'helper'}.
:param values_per_line: Number of consecutive values to be read from each line.
"""
logging.info(_LOOKING_FOR_SHEET_LOG_FORMAT.format(sheet_type, spreadsheet_id, sheet_name))
sheet_data = self.__sheets_facade.read_sheet(spreadsheet_id, sheet_name, values_per_line)
data = []
logging.info(f'Reading spreadsheet {spreadsheet_id} | {sheet_name}...')
for row in sheet_data.get('valueRanges')[0].get('values'):
row_data = []
for counter in range(values_per_line):
row_data.append(row[counter].strip())
data.append(row_data)
# The first line is usually used for headers, so it's discarded.
del (data[0])
logging.info('DONE')
return data
"""
API communication classes
========================================
"""
class DataCatalogFacade:
"""
Manage Templates by communicating to Data Catalog's API.
"""
def __init__(self):
# Initialize the API client.
self.__datacatalog = datacatalog.DataCatalogClient()
def create_tag_template(self,
project_id,
template_id,
display_name,
fields_descriptors,
enums_names=None):
"""Create a Tag Template."""
location = datacatalog.DataCatalogClient.common_location_path(
project_id, _CLOUD_PLATFORM_REGION)
tag_template = datacatalog.TagTemplate()
tag_template.display_name = display_name
for descriptor in fields_descriptors:
field = datacatalog.TagTemplateField()
field.display_name = descriptor[1]
field_id = descriptor[0]
field_type = descriptor[2]
if not field_type == _DATA_CATALOG_ENUM_TYPE:
field.type_.primitive_type = datacatalog.FieldType.PrimitiveType[field_type]
else:
for enum_name in enums_names[field_id]:
enum_value = datacatalog.FieldType.EnumType.EnumValue()
enum_value.display_name = enum_name
field.type_.enum_type.allowed_values.append(enum_value)
tag_template.fields[field_id] = field
created_tag_template = self.__datacatalog.create_tag_template(parent=location,
tag_template_id=template_id,
tag_template=tag_template)
logging.info(f'===> Template created: {created_tag_template.name}')
def delete_tag_template(self, name):
"""Delete a Tag Template."""
try:
self.__datacatalog.delete_tag_template(name=name, force=True)
logging.info(f'===> Template deleted: {name}')
except exceptions.PermissionDenied:
pass
def tag_template_exists(self, name):
"""Check if a Tag Template with the provided name already exists."""
try:
self.__datacatalog.get_tag_template(name=name)
return True
except exceptions.PermissionDenied:
return False
class GoogleSheetsFacade:
"""
Access spreadsheets data by communicating to the Google Sheets API.
"""
def __init__(self):
# Initialize the API client.
self.__service = discovery.build(
serviceName='sheets',
version='v4',
credentials=service_account.ServiceAccountCredentials.get_application_default(),
cache_discovery=False)
def read_sheet(self, spreadsheet_id, sheet_name, values_per_line):
return self.__service.spreadsheets().values().batchGet(
spreadsheetId=spreadsheet_id,
ranges=f'{sheet_name}!A:{chr(ord("@") + values_per_line)}').execute()
"""
Tools & utilities
========================================
"""
class StringFormatter:
@classmethod
def format_elements_to_snakecase(cls, a_list, internal_index=None):
if internal_index is None:
for counter in range(len(a_list)):
a_list[counter] = cls.format_to_snakecase(a_list[counter])
else:
for element in a_list:
element[internal_index] = cls.format_to_snakecase(element[internal_index])
@classmethod
def format_to_snakecase(cls, string):
normalized_str = unicodedata.normalize('NFKD', string).encode('ASCII', 'ignore').decode()
normalized_str = re.sub(r'[^a-zA-Z0-9]+', ' ', normalized_str)
normalized_str = normalized_str.strip()
normalized_str = normalized_str.lower() \
if (' ' in normalized_str) or (normalized_str.isupper()) \
else stringcase.camelcase(normalized_str) # FooBarBaz => fooBarBaz
return stringcase.snakecase(normalized_str) # foo-bar-baz => foo_bar_baz
"""
Main program entry point
========================================
"""
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.getLogger('googleapiclient.discovery').setLevel(logging.ERROR)
logging.getLogger('oauth2client.client').setLevel(logging.ERROR)
logging.getLogger('oauth2client.transport').setLevel(logging.ERROR)
parser = argparse.ArgumentParser(description='Load Tag Template from CSV')
parser.add_argument('--template-id', help='the template ID', required=True)
parser.add_argument('--display-name', help='template\'s Display Name', required=True)
parser.add_argument('--project-id',
help='GCP Project in which the Template will be created',
required=True)
parser.add_argument('--spreadsheet-id', help='Google Spreadsheet ID', required=True)
parser.add_argument(
'--delete-existing',
action='store_true',
help='delete existing Templates and recreate them with the provided metadata')
args = parser.parse_args()
TemplateMaker().run(args.spreadsheet_id, args.project_id, args.template_id, args.display_name,
args.delete_existing)
| python |
import cloudpassage
import sys
import os
import pytest
import datetime
import time
import platform
sys.path.append(os.path.join(os.path.dirname(__file__), '../../', ''))
import lib.validate as validate
class TestUnitValidate:
def test_validate_valid_time(self):
accepted = True
try:
validate.validate_time("2016-08-20")
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_time(self):
accepted = False
try:
validate.validate_time("foobar")
except ValueError as e:
accepted = True
assert accepted
def test_validate_valid_time_range(self):
accepted = True
today = datetime.datetime.now().strftime("%Y-%m-%d")
try:
validate.validate_time_range(today)
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_time_range(self):
accepted = False
today = datetime.datetime.now()
date = (today - datetime.timedelta(days=90)).strftime("%Y-%m-%d")
try:
validate.validate_time_range(date)
except ValueError as e:
accepted = True
assert accepted
def test_validate_valid_batchsize(self):
accepted = True
size = 10
try:
validate.batchsize(size)
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_batchsize(self):
accepted = False
size = 100
try:
validate.batchsize(size)
except ValueError as e:
accepted = True
assert accepted
def test_validate_valid_thread(self):
accepted = True
thread = 1
try:
validate.thread(thread)
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_str_thread(self):
accepted = False
thread = 'foobar'
try:
validate.thread(thread)
except ValueError as e:
accepted = True
assert accepted
def test_validate_invalid_count_thread(self):
accepted = False
thread = 10
try:
validate.thread(thread)
except ValueError as e:
accepted = True
assert accepted
def test_validate_operating_system(self):
current_platform = platform.system()
if current_platform is not 'Windows':
current_platform = 'linux'
actual = validate.operating_system()
assert current_platform is actual
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Evolve life in a landscape.
Life evolves alongside landscapes by biotic and abiotic processes under complex
dynamics at Earth's surface. Researchers who wish to explore these dynamics can
use this component as a tool for them to build landscape-life evolution models.
Landlab components, including SpeciesEvolver are designed to work with a shared
model grid. Researchers can build novel models using plug-and-play surface
process components to evolve the grid's landscape alongside the life tracked by
SpeciesEvolver. The simulated life evolves following customizable processes.
Component written by Nathan Lyons beginning August 2017.
"""
from collections import OrderedDict
import numpy as np
from pandas import DataFrame
from landlab import Component
from .record import Record
class SpeciesEvolver(Component):
"""Evolve life in a landscape.
This component tracks ``Taxon`` objects as they evolve in a landscape. The
component calls the evolutionary process methods of tracked ``Taxon``
objects. ``Taxon`` are intended to be subclassed for unique behavior,
attributes, and model approaches, including different implementations of
evolutionary processes.
The general workflow to use this component in a model is
1. Instantiate the component.
2. Instantiate taxa.
3. Introduce taxa to SpeciesEvolver using the ``track_taxon`` method.
4. Advance the component instance in time using ``run_one_step`` method.
Taxa can be introduced at model onset and later time steps. Multiple types
can be tracked by the same SpeciesEvolver instance.
The taxon type, ``ZoneTaxon`` is distributed with SpeciesEvolver. The
spatial aspect of ``ZoneTaxon`` macroevolutionary processes is determined
using ``Zone`` objects. A ``ZoneController`` is used to create and manage
zones as well as efficiently create multiple ZoneTaxon objects. See the
documentation of ``ZoneController`` and ``ZoneTaxon`` for more information.
SpeciesEvolver knows nothing about zones and their controller, meaning the
concept of zones are not required for other taxon types.
Model time and other variables can be viewed with the class attribute,
``record_data_frame``. Time is recorded to track the history of taxa
lineages. The unit of time is not considered within the component other
than the record, and can be thought of as in years or whatever unit is
needed. Time is advanced with the ``dt`` parameter of the ``run_one_step``
method.
The geographic ranges of the taxa at the current model time are evaluated
during the ``run_one_step`` method. Each taxon object determines if it
persists or becomes extinct, and if it creates child ``Taxon`` objects.
Metadata of all taxa introduced to the component can be viewed with the
attribute, ``taxa_data_frame``.
Taxa are automatically assigned unique taxon identifiers, ``tid``.
Identifiers are used to reference and retrieve taxon objects. Identifiers
are assigned in the order taxa are introduced to SpeciesEvolver.
Examples
--------
The evolution of a lowland taxa lineage in response to mountain range
formation is simulated using ZoneTaxon managed by ZoneController. Mountain
range formation is forced without processes for simplicity in this example.
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components import SpeciesEvolver
>>> from landlab.components.species_evolution import ZoneController
Create a model grid with mountain scale resolution. The elevation is
equally low throughout the grid at model onset.
>>> mg = RasterModelGrid((3, 7), 1000)
>>> z = mg.add_ones('topographic__elevation', at='node')
>>> z.reshape(mg.shape)
array([[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]])
Instantiate the component with the grid as the first parameter.
>>> se = SpeciesEvolver(mg)
ZoneController requires a function that returns a mask of the total extent
of taxa habitat. The mask is a boolean array where `True` values represent
nodes that satisfy habitat conditions. Zone objects are not created here.
The mask only maps the extent where taxa can exist. This function returns
`True` where elevation is below 100, which is where the simulated lowland
taxa of this model can inhabit.
>>> def zone_func(grid):
... return grid.at_node['topographic__elevation'] < 100
Instantiate ZoneController with the grid and zone function. The initial
zones are created at controller instantiation. In this example, one zone is
created because all nodes of the zone mask are adjacent to each other.
>>> zc = ZoneController(mg, zone_func)
>>> len(zc.zones) == 1
True
Additional examples of controller usage are provided in ``ZoneController``
documentation.
The ``mask`` of the zone is True where the conditions of the zone function
are met. All nodes of the grid are included because the elevation of each
node is below 100. The ``zones`` attribute of ``ZoneController`` returns a
list of the zones that currently exist in the model. Below we return the
mask of the single zone by indexing this list.
>>> zc.zones[0].mask
array([ True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True], dtype=bool)
Populate a taxon to the zone.
>>> taxon = zc.populate_zones_uniformly(1)
>>> se.track_taxa(taxon)
The attribute, ``taxa_data_frame`` indicates only the one taxon exists
because we populated each zone with one taxon, and only the one zone
exists.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
The identifier of the taxon, ``tid`` is 0. The identifier of the taxon's
parent, ``pid``, is '<NA>' because it does not have a parent taxon given
that it was manually introduced using the ``track_taxa`` method. The taxon
was introduced at time, ``t_first`` and time, ``t_final`` is '<NA>'
because the taxon remains extant. See the documentation of this attribute
for further explanation of data frame columns.
Force a change in the zone mask to demonstrate component functionality.
Here we begin a new time step where topography is uplifted by 200 that
forms a ridge trending north-south in the center of the grid.
>>> z[[3, 10, 17]] = 200
>>> z.reshape(mg.shape)
array([[ 1., 1., 1., 200., 1., 1., 1.],
[ 1., 1., 1., 200., 1., 1., 1.],
[ 1., 1., 1., 200., 1., 1., 1.]])
The current elevation, the elevation following uplift, is represented here.
::
- - - ^ - - - elevation: - 1
- - - ^ - - - ^ 200
- - - ^ - - -
The updated zone mask is below.
::
. . . x . . . key: . node in zone mask
. . . x . . . x node outside of zone mask
. . . x . . .
Run a step of both the ZoneController and SpeciesEvolver. Both are run to
keep time in sync between the ``ZoneController``and ``SpeciesEvolver``
instances.
>>> delta_time = 1000
>>> zc.run_one_step(delta_time)
>>> se.run_one_step(delta_time)
Two zones exist following this time step.
>>> len(zc.zones) == 2
True
An additional zone was created because the zone mask was not continuous.
::
. . . ^ * * * key: . a zone
. . . ^ * * * * another zone
. . . ^ * * * ^ mountain range
The split of the initial zone triggered speciation of taxon 1 by taxon 0.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
1 0 ZoneTaxon 1000 <NA>
The phylogenetic tree of the simulated taxa is represented below. The
number at the line tips are the taxa identifiers.
::
0 ──────┬── 0
│
└── 1
_________
0 1000
time
The split of the initial zone into two zones at time 1000 triggered taxon 0
to speciate. Taxon 0 occupies a zone on one side of the mountain range, and
the child, taxon 1 occupies a zone on the other side. This outcome is the
result of the evolutionary processes programmed within ``ZoneTaxon`` as
well as the parameters used in this example (default values were used
as optional parameters were not set). Different behavior can be achieved by
subclassing ``ZoneTaxon`` or ``Taxon``.
References
----------
**Required Software Citation(s) Specific to this Component**
Lyons, N.J., Albert, J.S., Gasparini, N.M. (2020). SpeciesEvolver: A
Landlab component to evolve life in simulated landscapes. Journal of Open
Source Software 5(46), 2066, https://doi.org/10.21105/joss.02066
**Additional References**
Albert, J.S., Schoolmaster Jr, D.R., Tagliacollo, V., Duke-Sylvester, S.M.
(2016). Barrier displacement on a neutral landscape: Toward a theory of
continental biogeography. Systematic Biology 66(2), 167–182.
Lyons, N.J., Val, P., Albert, J.S., Willenbring, J.K., Gasparini, N.M., in
review. Topographic controls on divide migration, stream capture, and
diversification in riverine life. Earth Surface Dynamics.
"""
_name = "SpeciesEvolver"
_unit_agnostic = True
_info = {
"taxa__richness": {
"dtype": int,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "The number of taxa at each node",
}
}
_cite_as = """@article{lyons2020species,
author = {Lyons, N.J. and Albert, J.S. and Gasparini, N.M.},
title = {SpeciesEvolver: A Landlab component to evolve life in simulated landscapes},
year = {2020},
journal = {Journal of Open Source Software},
volume = {5},
number = {46},
doi = {10.21105/joss.02066},
url = {https://doi.org/10.21105/joss.02066}
}"""
def __init__(self, grid, initial_time=0):
"""Instantiate SpeciesEvolver.
Parameters
----------
grid : ModelGrid
A Landlab ModelGrid.
initial_time : float, int, optional
The initial time. The unit of time is not considered within the
component, with the exception that time is logged in the record.
The default value of this parameter is 0.
"""
super().__init__(grid)
# Create data structures.
self._record = Record(initial_time)
self._record.set_value("taxa", 0)
self._taxa_data = OrderedDict(
[("tid", []), ("pid", []), ("type", []), ("t_first", []), ("t_final", [])]
)
self._taxon_objs = []
# Create a taxa richness field.
_ = grid.add_zeros("taxa__richness", at="node", dtype=int, clobber=True)
@property
def record_data_frame(self):
"""A Pandas DataFrame of SpeciesEvolver variables over time.
Each row is data of a model time step. The time of the step is recorded
in the `time` column. `taxa` is the count of taxa extant at a time.
Additional columns can be added and updated by SpeciesEvolver objects
during the component ``run_one_step`` method. See documention of Taxon
objects for an explanation of these columns.
The DataFrame is created from a dictionary associated with a
SpeciesEvolver ``Record`` object. nan values in Pandas DataFrame force
the column to become float values even when data are integers. The
original value type is retained in the ``Record`` object.
"""
return self._record.data_frame
@property
def taxa_data_frame(self):
"""A Pandas DataFrame of taxa metadata.
Each row is the metadata of a taxon. The column, ``tid`` is the taxon
identifier assigned when SpeciesEvolver begins tracking the taxon. The
column, ``pid`` is the tid of the parent of the taxon. A pid of `<NA>`
indicates no parent taxon. ``type`` is the type of ``Taxon`` object.
``t_first`` is the initial model time the taxon was added to
SpeciesEvolver. ``t_final`` is the model time the taxon was recognized
as extinct. A t_final of `<NA>` indicates the taxon is extant.
Additional columns may be added by some taxon types. See the
documentation of these taxa for column description.
The DataFrame is created from a data structure within the component.
"""
data = self._taxa_data
cols = list(data.keys())
cols.remove("tid")
df = DataFrame(data, columns=cols, index=data["tid"])
df.index.name = "tid"
# Change column number type because pandas makes a column float if it
# includes nan values.
df["pid"] = df["pid"].astype("Int64")
if all(isinstance(item, int) for item in data["t_final"] if not np.isnan(item)):
df["t_final"] = df["t_final"].astype("Int64")
return df
def run_one_step(self, dt):
"""Update the taxa for a single time step.
This method advances the model time in the component record, calls the
evolve method of taxa extant at the current time, and updates the
variables in the record and taxa dataframes.
Parameters
----------
dt : float
The model time step duration. Time in the record is advanced by the
value of this parameter.
"""
record = self._record
record.advance_time(dt)
# Create a dictionary of the taxa to update at the current model time.
# Keys are objects of extant taxa. Values are booleans indicating if
# stages remain for respective taxa.
time_dict = OrderedDict.fromkeys(self._taxon_objs, True)
# Iteratively call taxa ``_evolve`` method until all stages of all taxa
# have run.
stage = 0
while any(time_dict.values()):
# Run evolution stage.
stage_dict = OrderedDict([])
evolving_taxa = filter(time_dict.get, time_dict)
for taxon in evolving_taxa:
# Run evolution stage of taxon with remaining stages.
stages_remain, taxon_children = taxon._evolve(dt, stage, record)
if taxon_children:
stage_dict.update(
OrderedDict.fromkeys(taxon_children, stages_remain)
)
stage_dict[taxon] = stages_remain and taxon.extant
time_dict.update(stage_dict)
stage += 1
self._update_taxa_data(time_dict.keys())
def track_taxa(self, taxa):
"""Add taxa to be tracked over time by SpeciesEvolver.
The taxon/taxa are introduced at the latest time in the record and
also tracked during following model times. Each taxon is assigned an
identifier and then can be viewed in ``taxa_data_frame``.
Parameters
----------
taxa : Taxon or list of Taxon
The taxa to introduce.
Examples
--------
ZoneTaxon are used to demonstrate this method.
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components import SpeciesEvolver
>>> from landlab.components.species_evolution import ZoneController
Create a model grid with flat topography.
>>> mg = RasterModelGrid((3, 7), 1000)
>>> z = mg.add_ones('topographic__elevation', at='node')
Instantiate SpeciesEvolver and a ZoneController. Instantiate the
latter with a function that masks the low elevation zone extent. Only
one zone is created.
>>> se = SpeciesEvolver(mg)
>>> def zone_func(grid):
... return grid.at_node['topographic__elevation'] < 100
>>> zc = ZoneController(mg, zone_func)
>>> len(zc.zones) == 1
True
Track the taxon of the one zone.
>>> taxon = zc.populate_zones_uniformly(1)
>>> se.track_taxa(taxon)
The one taxon is now tracked by SpeciesEvolver as indicated by the taxa
DataFrame.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
"""
if not isinstance(taxa, list):
taxa = [taxa]
self._update_taxa_data(taxa)
def _update_taxa_data(self, taxa_at_time):
"""Update the taxa data structure, set identifiers, and taxa statistics.
This method sets identifiers and metadata for the newly introduced
taxa. For the previously introduced, this method updates the
'latest_time` value of the taxa metadata.
Parameters
----------
taxa_at_time : list of Taxon
The taxa at the current model time.
"""
time = self._record.latest_time
data = self._taxa_data
objs = self._taxon_objs
t_recorded = self._taxon_objs
t_introduced = [taxon for taxon in taxa_at_time if taxon in t_recorded]
t_new = [taxon for taxon in taxa_at_time if taxon not in t_recorded]
# Update previously introduced taxa.
for taxon in t_introduced:
if not taxon.extant:
idx = data["tid"].index(taxon.tid)
data["t_final"][idx] = time
objs.remove(taxon)
# Set the data of new taxa.
for taxon in t_new:
# Set identifier.
if data["tid"]:
taxon._tid = max(data["tid"]) + 1
else:
taxon._tid = 0
# Append taxon data.
data["tid"].append(taxon.tid)
if taxon.parent is not None:
data["pid"].append(taxon.parent.tid)
else:
data["pid"].append(np.nan)
data["type"].append(type(taxon).__name__)
data["t_first"].append(time)
if taxon.extant:
data["t_final"].append(np.nan)
objs.append(taxon)
else:
data["t_final"].append(time)
# Update taxa stats.
self._record.set_value("taxa", len(objs))
self._grid.at_node["taxa__richness"] = self._get_taxa_richness_map()
def get_extant_taxon_objects(self, tids=np.nan, ancestor=np.nan, time=np.nan):
"""Get extant taxon objects filtered by parameters.
This method returns all taxon objects tracked by the component when no
optional parameters are included. The objects returned can be limited
using one or more parameters.
Parameters
----------
tids : list of int, optional
The taxa with these identifiers will be returned. A list is
returned even if only one object is contained within the list. By
default, when `tids` is not specified, extant taxa with any
identifier can be returned.
ancestor : int, optional
Limit the taxa returned to those descending from the taxon
designated as the ancestor. The ancestor is designated using its
``tid``. By default, taxa with any or no ancestors are returned.
time : float, int, optional
Limit the taxa returned to those that were extant at the time
designated by this parameter as well as extant at the current model
time. By default, extant taxa at all of the times listed in the
component record can be returned.
Returns
-------
taxa : a list of Taxon
The Taxon objects that pass through the filter. The list is sorted
by ``tid``. An empty list is returned if no taxa pass through the
filter.
Examples
--------
ZoneTaxon are used to demonstrate this method.
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components import SpeciesEvolver
>>> from landlab.components.species_evolution import ZoneController
Create a model grid.
>>> mg = RasterModelGrid((3, 7), 1000)
>>> z = mg.add_ones('topographic__elevation', at='node')
Instantiate SpeciesEvolver and a ZoneController. Instantiate the latter
with a function that masks the low elevation zone extent. Only one zone
is created.
>>> se = SpeciesEvolver(mg)
>>> def zone_func(grid):
... return grid.at_node['topographic__elevation'] < 100
>>> zc = ZoneController(mg, zone_func)
>>> len(zc.zones) == 1
True
Introduce two taxa to the zone.
>>> taxa = zc.populate_zones_uniformly(2)
>>> se.track_taxa(taxa)
Force north-south mountain ranges over two time steps that drives taxa
evolution.
>>> z[mg.x_of_node == 2000] = 200
>>> zc.run_one_step(1000)
>>> se.run_one_step(1000)
>>> z[mg.x_of_node == 4000] = 200
>>> zc.run_one_step(1000)
>>> se.run_one_step(1000)
Display taxa metadata.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
1 <NA> ZoneTaxon 0 <NA>
2 0 ZoneTaxon 1000 <NA>
3 1 ZoneTaxon 1000 <NA>
4 0 ZoneTaxon 2000 <NA>
5 1 ZoneTaxon 2000 <NA>
Objects of all extant taxon are returned when no parameters are
inputted.
>>> se.get_extant_taxon_objects() # doctest: +NORMALIZE_WHITESPACE
[<ZoneTaxon, tid=0>,
<ZoneTaxon, tid=1>,
<ZoneTaxon, tid=2>,
<ZoneTaxon, tid=3>,
<ZoneTaxon, tid=4>,
<ZoneTaxon, tid=5>]
The returned objects of extant species can be limited using parameters.
Here, get the taxon objects with identifiers, 4 and 5.
>>> se.get_extant_taxon_objects(tids=[4, 5])
[<ZoneTaxon, tid=4>, <ZoneTaxon, tid=5>]
Extant taxon objects descending from a taxon can be obtained using the
``ancestor`` property. Here, get the taxa that descended from taxon 0.
>>> se.get_extant_taxon_objects(ancestor=0)
[<ZoneTaxon, tid=2>, <ZoneTaxon, tid=4>]
Taxa can be limited to those that were extant ``time``.
>>> se.get_extant_taxon_objects(time=1000) # doctest: +NORMALIZE_WHITESPACE
[<ZoneTaxon, tid=0>,
<ZoneTaxon, tid=1>,
<ZoneTaxon, tid=2>,
<ZoneTaxon, tid=3>]
The returned taxa can be further limited by including multiple
method properties.
>>> se.get_extant_taxon_objects(ancestor=0, time=1000)
[<ZoneTaxon, tid=2>]
An empty list is returned when no extant taxa match parameter criteria.
>>> se.get_extant_taxon_objects(tids=[11])
[]
"""
# Create `results` that contains tids of the taxa matching parameter
# criteria.
extant_tids = [taxon.tid for taxon in self._taxon_objs]
results = set(extant_tids)
data = self._taxa_data
# Query by identifiers.
if isinstance(tids, list):
results = results.intersection(tids)
# Query by ancestor.
if not np.isnan(ancestor) and ancestor in data["tid"]:
df = self.taxa_data_frame
df["pid"] = df["pid"].fillna(-1)
taxon = ancestor
descendants = []
stack = [taxon]
while stack:
children = df.index[df["pid"] == taxon].tolist()
if children:
descendants.extend(children)
stack.extend(children)
stack.remove(taxon)
if stack:
taxon = stack[0]
results = results.intersection(descendants)
elif not np.isnan(ancestor):
results = []
# Query by time.
if not np.isnan(time):
t_first = np.array(data["t_first"])
t_latest = np.nan_to_num(data["t_final"], nan=self._record.latest_time)
mask = np.all([time >= t_first, time <= t_latest], 0)
results = results.intersection(np.array(data["tid"])[mask].tolist())
# Get the Taxon objects that match all parameter query results.
taxa = [taxon for taxon in self._taxon_objs if taxon.tid in results]
taxa.sort(key=lambda taxon: taxon.tid)
return taxa
def _get_taxa_richness_map(self):
"""Get a map of the number of taxa."""
objs = self._taxon_objs
if objs:
masks = np.stack([taxon.range_mask for taxon in objs])
richness_mask = masks.sum(axis=0).astype(int)
else:
richness_mask = np.zeros(self._grid.number_of_nodes, dtype=int)
return richness_mask
| python |
import math
import timeit
import random
import sympy
import warnings
from random import randint, seed
import sys
from ecpy.curves import Curve,Point
from Crypto.Hash import SHA3_256, SHA256, HMAC
import requests
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Util.Padding import pad
from Crypto.Util.Padding import unpad
import random
import hashlib, hmac, binascii
import json
API_URL = 'http://cryptlygos.pythonanywhere.com'
stuID = 24775
stuID_B = 18007
def key_generation(n,P):
sA = random.randrange(0,n-1)
QA = sA*P
return sA,QA
def signature_generation(n,m,P,sA):
k = random.randrange(1, n-2)
R = k*P
r = R.x % n
temp = m + r.to_bytes((r.bit_length() + 7) // 8,byteorder= 'big')
h = SHA3_256.new(temp)
h = int.from_bytes(h.digest(), byteorder='big') % n
s = (sA*h + k) % n
return(h,s)
#testarray for id 18007
test=["The world is full of lonely people afraid to make the first move.",
"I don’t like sand. It’s all coarse, and rough, and irritating. And it gets everywhere.",
"Hate is baggage. Life’s too short to be pissed off all the time. It’s just not worth it.",
"Well, sir, it’s this rug I have, it really tied the room together.",
"Love is like taking a dump, Butters. Sometimes it works itself out. But sometimes, you need to give it a nice hard slimy push."]
#create a long term key
curve = Curve.get_curve('secp256k1')
n = curve.order
P = curve.generator
#sA_l,QA_l=key_generation(n, P);
sA_l = 47739507727097583103574014533029612368096643715089728534014772436197620809295 #long term key
QA_l = sA_l*P
lkey=QA_l
lpkey=sA_l
print('sA_l:',sA_l)
print('QA_l:',QA_l)
m = str(stuID)
m = str.encode(m)
h,s = signature_generation(n, m, P, sA_l)
####Register Long Term Key
#s, h = SignGen(str(stuID).encode(), curve, sCli_long)
mes = {'ID':stuID, 'H': h, 'S': s, 'LKEY.X': lkey.x, 'LKEY.Y': lkey.y}
response = requests.put('{}/{}'.format(API_URL, "RegLongRqst"), json = mes)
print(response.json())
print("Please enter your code:")
#code is 466773
code = int(input())
mes = {'ID':stuID, 'CODE': code}
response = requests.put('{}/{}'.format(API_URL, "RegLong"), json = mes)
print(response.json())
#Check Status
mes = {'ID_A':stuID, 'H': h, 'S': s}
response = requests.get('{}/{}'.format(API_URL, "Status"), json = mes)
print("Status ", response.json())
arraysA = [112184962276357808309568989833684271625049885675934630372866963801085964072493, 33584358211224784840202436168184815276628420769928064070743091943999268712786, 40726025470150288236659679056057720728221590797096143441172221355007043466450, 101381661083810846279577444932520014627629962066304212964928490092267766004985, 100594443061284668939798088235910436479618491421149817836807710501808402577492, 103568589245276105481949302052504652358633223871875756153798369465269147623829, 100051855146607783942326414928800209257532033065159727699014006828344258666423, 105040970101702829213395228783284792201809442061444673860747455870055614779455, 90156357612886126425473480757697158114559706965764952860166983492293539640483, 635398702918226938233284394615151078835074431754073593651417155565356312859]
arrayQAx = [82130022647859882453134084051369598210823951790545515364285068773611035505062, 51140706821905322921805595227209017018799214209971934540801379729473715539128, 49432472692951339492988178726505370500642699982361951313863393202596519914764, 36018325104317080292711623581486586963933141534504250517355266336334872881775, 76692236566180327558062509272400907882990103538569245665502423108051513335006, 69244633031946208542865994268283620303066389860002324026838412654858935857089, 60912054363237728725479112436389557995283036613828053875989391141033721671154, 9777050861158665235931399469284756599748691198285459487242387650264524106086, 71550389124668400681353157799625559428935445146334133779133788925648770731366, 95236147036073182418341514866602126427742987229922482216352098259662503571995]
arrayQAy = [99978483075519520341321215838600373635619019340293769668813125239291817052190, 109176970583477354468613775592241268156447296020122447619846616252849971527226, 41332704556124677749576587514370939479369122228554359024467723589101993498497, 111937169526343050247843961739629074374183481131752761679441414840787470387010, 31521753310428267762560716570334134560699001095409851645874368613812130826067, 83285583670825079302361649195684356772932386767124262353380806840970324007896, 66326982281265332508620837991901241925785044086964866582111351960359855191393, 5717418184376653044842346172847011511487124169152806246338268537374033277405, 34439977629883966899844059601494016249411403363018406998878545235430372004112, 45115106056023629667663131952612957462385127590246861803653084571856409210418]
for i in range(0,10):
#sA,QA = key_generation(n, P)
QA = arraysA[i]*P
mes = (str(QA.x)+str(QA.y)).encode()
# arraysA.append(sA)
# arrayQAx.append(QA.x)
# arrayQAy.append(QA.y)
hx, sx = signature_generation(n,mes,P,sA_l)
#Send Ephemeral keys
mes = {'ID': stuID, 'KEYID': i , 'QAI.X': QA.x, 'QAI.Y': QA.y, 'Si': sx, 'Hi': hx}
response = requests.put('{}/{}'.format(API_URL, "SendKey"), json = mes)
print(response.json())
### Get key of the Student B
m = str(stuID_B)
m = str.encode(m)
h1,s1 = signature_generation(n, m, P, sA_l)
mes = {'ID_A': stuID, 'ID_B':stuID_B, 'S': s1, 'H': h1}
response = requests.get('{}/{}'.format(API_URL, "ReqKey"), json = mes)
res = response.json()
print(res)
i = int(res['i'])
j = res['j']
QBj = Point(res['QBJ.x'] , res['QBJ.y'], curve)
#mesg to send
#mesg = "You can dance, you can jive"
#print("This is my message:", mesg)
for i in range(len(test)):
mesg = test[i]
print("This is my message:", mesg)
#calculations from pdf
T = arraysA[i]*QBj
U = str(T.x)+str(T.y)+"NoNeedToRunAndHide"
U = str.encode(U)
K_ENC = SHA3_256.new(U)
K_ENC = K_ENC.digest()
K_MAC = SHA3_256.new(K_ENC)
K_MAC = K_MAC.digest()
# Encyption
cipher = AES.new(K_ENC, AES.MODE_CTR)
ctext=str.encode(mesg)
ctext=cipher.encrypt(ctext)
#hmac calculation
hmac=HMAC.new(K_MAC,digestmod=SHA256)
hmac=hmac.update(ctext)
hmac=hmac.digest()
msg = cipher.nonce + ctext + hmac
msg = int.from_bytes(msg, byteorder="big")
### Send message to student B
mes = {'ID_A': stuID, 'ID_B':stuID_B, 'I': i, 'J':j, 'MSG': msg}
response = requests.put('{}/{}'.format(API_URL, "SendMsg"), json = mes)
print(response.json())
'''
## Get your message
mes = {'ID_A': stuID, 'S': s, 'H': h}
response = requests.get('{}/{}'.format(API_URL, "ReqMsg_PH3"), json = mes)
print(response.json())
if(response.ok): ## Decrypt message
res = response.json()
mes = res['MSG']
i = res['KEYID']
print("KEYID:",i)
QBj = Point(res['QBJ.X'] , res['QBJ.Y'], curve)
sa_m = arraysA[i]
print("sA for this message:",sa_m)
mes = mes.to_bytes((mes.bit_length()+7)//8, byteorder='big')
print("msg:", mes)
T = sa_m * QBj
print("T:",T)
U = str(T.x)+str(T.y)+"NoNeedToRunAndHide"
print("U:",U)
U = str.encode(U)
print("U_encode:",U)
K_ENC = SHA3_256.new(U)
K_ENC = K_ENC.digest()
print("kenc:",K_ENC)
K_MAC = SHA3_256.new(K_ENC)
K_MAC = K_MAC.digest()
print("k_mac:",K_MAC)
#decrypted msg
print("message:",mes)
cipher = AES.new(K_ENC, AES.MODE_CTR, nonce=mes[0:8])
dtext = cipher.decrypt(mes[8:-32]).decode()
#dtext = str(dtext)
print("ciphertext:", dtext)
#hmac calculation
temp = mes[8:len(mes)-32]
hmac2=HMAC.new(K_MAC,digestmod=SHA256)
hmac2=hmac2.update(temp)
hmac2=hmac2.digest()
print("hmac:",hmac2)
'''
#####Reset Ephemeral Keys
'''
#s, h = SignGen("18007".encode(), curve, sCli_long)
mes = {'ID': stuID, 'S': s, 'H': h}
print(mes)
response = requests.get('{}/{}'.format(API_URL, "RstEKey"), json = mes)
print(response.json())
'''
'''
#####Reset Long Term Key
mes = {'ID': stuID}
response = requests.get('{}/{}'.format(API_URL, "RstLongRqst"), json = mes)
print(response.json())
code = int(input())
mes = {'ID': stuID ,'CODE': code}
response = requests.get('{}/{}'.format(API_URL, "RstLong"), json = mes)
print(response.json())
''' | python |
from singly_linked_lists.remove_nth_node_from_list import remove_nth_from_end
from data_structures.singly_linked_list_node import SinglyLinkedListNode
def test_remove_nth_from_end():
head = SinglyLinkedListNode(1)
assert remove_nth_from_end(head, 1) is None
head = SinglyLinkedListNode(1)
head.next = SinglyLinkedListNode(2)
assert remove_nth_from_end(head, 2).data == 2
assert remove_nth_from_end(head, 1).data == 1
head = SinglyLinkedListNode(1)
head.next = SinglyLinkedListNode(2)
head.next.next = SinglyLinkedListNode(3)
head.next.next.next = SinglyLinkedListNode(4)
assert remove_nth_from_end(head, 2).next.next.data == 4
| python |
# Copyright (c) 2009 Alexandre Quessy, Arjan Scherpenisse
# See LICENSE for details.
"""
Tests for txosc/osc.py
Maintainer: Arjan Scherpenisse
"""
from twisted.trial import unittest
from twisted.internet import reactor, defer, task
from txosc import osc
from txosc import async
from txosc import dispatch
class TestGetAddressParts(unittest.TestCase):
"""
Test the getAddressParts function.
"""
def testGetAddressParts(self):
addresses = {
"/foo": ["foo"],
"/foo/bar": ["foo", "bar"],
"/foo/bar/ham": ["foo", "bar", "ham"],
"/egg/[1-2]": ["egg", "[1-2]"],
"/egg/*": ["egg", "*"],
"/egg/?": ["egg", "?"],
}
for k, v in addresses.iteritems():
self.failUnlessEqual(osc.getAddressParts(k), v)
class TestArgumentCreation(unittest.TestCase):
"""
Test the L{osc.CreateArgument} function.
"""
def testCreateFromValue(self):
self.assertEquals(type(osc.createArgument(True)), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(False)), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(None)), osc.NullArgument)
self.assertEquals(type(osc.createArgument(123)), osc.IntArgument)
self.assertEquals(type(osc.createArgument(3.14156)), osc.FloatArgument)
# Unicode is not supported.
self.assertRaises(osc.OscError, osc.createArgument, u'test')
def testCreateFromTypeTag(self):
self.assertEquals(type(osc.createArgument(123, "T")), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(123, "F")), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(123, "N")), osc.NullArgument)
self.assertEquals(type(osc.createArgument(123, "I")), osc.ImpulseArgument)
self.assertEquals(type(osc.createArgument(123, "i")), osc.IntArgument)
self.assertEquals(type(osc.createArgument(123, "f")), osc.FloatArgument)
self.assertRaises(osc.OscError, osc.createArgument, 123, "?")
class TestArgument(unittest.TestCase):
"""
Encoding and decoding of a string argument.
"""
def testAbstractArgument(self):
a = osc.Argument(None)
self.assertRaises(NotImplementedError, a.toBinary)
self.assertRaises(NotImplementedError, a.fromBinary, "")
class TestBlobArgument(unittest.TestCase):
"""
Encoding and decoding of a string argument.
"""
def testToBinary(self):
self.assertEquals(osc.BlobArgument("").toBinary(), "\0\0\0\0\0\0\0\0")
self.assertEquals(osc.BlobArgument("a").toBinary(), "\0\0\0\1a\0\0\0")
self.assertEquals(osc.BlobArgument("hi").toBinary(), "\0\0\0\2hi\0\0")
self.assertEquals(osc.BlobArgument("hello").toBinary(), "\0\0\0\5hello\0\0\0")
def testFromBinary(self):
data = "\0\0\0\2hi\0\0\0\0\0\5hello\0\0\0"
first, leftover = osc.BlobArgument.fromBinary(data)
self.assertEquals(first.value, "hi")
self.assertEquals(leftover, "\0\0\0\5hello\0\0\0")
second, leftover = osc.BlobArgument.fromBinary(leftover)
self.assertEquals(second.value, "hello")
self.assertEquals(leftover, "")
# invalid formatted
self.assertRaises(osc.OscError, osc.BlobArgument.fromBinary, "\0\0\0") # invalid length packet
self.assertRaises(osc.OscError, osc.BlobArgument.fromBinary, "\0\0\0\99")
class TestStringArgument(unittest.TestCase):
"""
Encoding and decoding of a string argument.
"""
def testToBinary(self):
self.assertEquals(osc.StringArgument("").toBinary(), "\0\0\0\0")
self.assertEquals(osc.StringArgument("OSC").toBinary(), "OSC\0")
self.assertEquals(osc.StringArgument("Hello").toBinary(), "Hello\0\0\0")
def testFromBinary(self):
data = "aaa\0bb\0\0c\0\0\0dddd"
first, leftover = osc.StringArgument.fromBinary(data)
#padding with 0 to make strings length multiples of 4 chars
self.assertEquals(first.value, "aaa")
self.assertEquals(leftover, "bb\0\0c\0\0\0dddd")
second, leftover = osc.StringArgument.fromBinary(leftover)
self.assertEquals(second.value, "bb")
self.assertEquals(leftover, "c\0\0\0dddd")
third, leftover = osc.StringArgument.fromBinary(leftover)
self.assertEquals(third.value, "c")
self.assertEquals(leftover, "dddd")
class TestFloatArgument(unittest.TestCase):
def testToAndFromBinary(self):
binary = osc.FloatArgument(3.14159).toBinary()
float_arg = osc.FloatArgument.fromBinary(binary)[0]
#FIXME: how should we compare floats? use decimal?
if float_arg.value < 3.1415:
self.fail("value is too small")
if float_arg.value > 3.1416:
self.fail("value is too big")
self.assertRaises(osc.OscError, osc.FloatArgument.fromBinary, "\0\0\0") # invalid value
def testCasting(self):
# we should be able to cast the argument to float to get its float value
value = 3.14159
float_arg = osc.FloatArgument(value)
if float(float_arg) < 3.1415:
self.fail("value is too small")
if float(float_arg) > 3.1416:
self.fail("value is too big")
class TestIntArgument(unittest.TestCase):
def testToAndFromBinary(self):
def test(value):
int_arg = osc.IntArgument.fromBinary(osc.IntArgument(value).toBinary())[0]
self.assertEquals(int_arg.value, value)
test(0)
test(1)
test(-1)
test(1<<31-1)
test(-1<<31)
self.assertRaises(osc.OscError, osc.IntArgument.fromBinary, "\0\0\0") # invalid value
def testIntOverflow(self):
self.assertRaises(OverflowError, osc.IntArgument(1<<31).toBinary)
self.assertRaises(OverflowError, osc.IntArgument((-1<<31) - 1).toBinary)
class TestColorArgument(unittest.TestCase):
def testToAndFromBinary(self):
def _test(value):
color_arg = osc.ColorArgument.fromBinary(osc.ColorArgument(value).toBinary())[0]
self.assertEquals(color_arg.value, value)
_test((255, 255, 255, 255))
_test((0, 0, 0, 0))
self.assertRaises(osc.OscError, osc.ColorArgument.fromBinary, "\0\0\0") # invalid value
self.assertRaises(TypeError, osc.ColorArgument.toBinary, (-244, 0, 0, 0)) # invalid value
self.assertRaises(TypeError, osc.ColorArgument.toBinary, ()) # invalid value
class TestMidiArgument(unittest.TestCase):
def testToAndFromBinary(self):
def _test(value):
midi_arg = osc.MidiArgument.fromBinary(osc.MidiArgument(value).toBinary())[0]
self.assertEquals(midi_arg.value, value)
_test((255, 255, 255, 255))
_test((0, 0, 0, 0))
self.assertRaises(osc.OscError, osc.MidiArgument.fromBinary, "\0\0\0") # invalid value
self.assertRaises(TypeError, osc.MidiArgument.toBinary, (-244, 0, 0, 0)) # invalid value
self.assertRaises(TypeError, osc.MidiArgument.toBinary, ()) # invalid value
class TestTimeTagArgument(unittest.TestCase):
def testToBinary(self):
# 1 second since Jan 1, 1900
arg = osc.TimeTagArgument(1)
binary = arg.toBinary()
self.assertEquals(binary, "\0\0\0\1\0\0\0\0")
def testFromBinary(self):
# 1 second since Jan 1, 1900
self.assertEquals(1.0, osc.TimeTagArgument.fromBinary("\0\0\0\1\0\0\0\0")[0].value)
# immediately
self.assertEquals(True, osc.TimeTagArgument.fromBinary("\0\0\0\0\0\0\0\1")[0].value)
# error
self.assertRaises(osc.OscError, osc.TimeTagArgument.fromBinary, "\0\0\0\0\0\0")
def testToAndFromBinary(self):
# 1 second since Jan 1, 1900
def test(value):
timetag_arg, leftover = osc.TimeTagArgument.fromBinary(osc.TimeTagArgument(value).toBinary())
self.assertEquals(leftover, "")
self.assertTrue(abs(timetag_arg.value - value) < 1e-6)
test(1.0)
test(1.1331)
class TestMessage(unittest.TestCase):
def testComparisons(self):
osc.Message('/foo') == None
def testMessageStringRepresentation(self):
self.assertEquals("/hello", str(osc.Message("/hello")))
self.assertEquals("/hello ,i i:1 ", str(osc.Message("/hello", 1)))
self.assertEquals("/hello ,T T:True ", str(osc.Message("/hello", True)))
def testAddMessageArguments(self):
"""
Test adding arguments to a message
"""
m = osc.Message("/example", osc.IntArgument(33), osc.BooleanArgument(True))
self.assertEquals(m.arguments[0].value, 33)
self.assertEquals(m.arguments[1].value, True)
m = osc.Message("/example", 33, True)
self.assertEquals(m.arguments[0].value, 33)
self.assertEquals(m.arguments[1].value, True)
m = osc.Message("/example")
m.add(33)
self.assertEquals(m.arguments[0].value, 33)
self.assertEquals(m.arguments[0].typeTag, "i")
m.add(True)
self.assertEquals(m.arguments[1].typeTag, "T")
def testEquality(self):
self.assertEquals(osc.Message("/example"),
osc.Message("/example"))
self.assertNotEqual(osc.Message("/example"),
osc.Message("/example2"))
self.assertEquals(osc.Message("/example", 33),
osc.Message("/example", 33))
self.assertNotEqual(osc.Message("/example", 33),
osc.Message("/example", 34))
self.assertNotEqual(osc.Message("/example", 33),
osc.Message("/example", 33.0))
self.assertNotEqual(osc.Message("/example", 33),
osc.Message("/example", 33, True))
self.assertEquals(osc.Message("/example", 33, True),
osc.Message("/example", 33, True))
def testGetTypeTag(self):
m = osc.Message("/example")
self.assertEquals(m.getTypeTags(), "")
m.arguments.append(osc.StringArgument("egg"))
self.assertEquals(m.getTypeTags(), "s")
m.arguments.append(osc.StringArgument("spam"))
self.assertEquals(m.getTypeTags(), "ss")
def testToAndFromBinary(self):
self.assertRaises(osc.OscError, osc.Message.fromBinary, "invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Message.fromBinary, "/example,invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Message.fromBinary, "/hello\0\0,xxx\0")
def test(m):
binary = m.toBinary()
m2, leftover = osc.Message.fromBinary(binary)
self.assertEquals(leftover, "")
self.assertEquals(m, m2)
test(osc.Message("/example"))
test(osc.Message("/example", osc.StringArgument("hello")))
test(osc.Message("/example", osc.IntArgument(1), osc.IntArgument(2), osc.IntArgument(-1)))
test(osc.Message("/example", osc.BooleanArgument(True)))
test(osc.Message("/example", osc.BooleanArgument(False), osc.NullArgument(), osc.StringArgument("hello")))
test(osc.Message("/example", osc.ImpulseArgument()))
def testGetValues(self):
# tests calling txosc.osc.Message.getValues()
message = osc.Message("/foo", 2, True, 3.14159)
values = message.getValues()
self.failUnlessEqual(values[0], 2)
self.failUnlessEqual(values[1], True)
self.failUnlessEqual(values[2], 3.14159)
class TestBundle(unittest.TestCase):
def testEquality(self):
self.assertEquals(osc.Bundle(), osc.Bundle())
self.assertNotEqual(osc.Bundle(), None)
self.assertNotEqual(osc.Bundle([osc.Message("/hello")]),
osc.Bundle())
self.assertEquals(osc.Bundle([osc.Message("/hello")]),
osc.Bundle([osc.Message("/hello")]))
self.assertNotEqual(osc.Bundle([osc.Message("/hello")]),
osc.Bundle([osc.Message("/hello2")]))
def testToAndFromBinary(self):
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "#bundle|invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "#bundle\0\0\0\0\1\0\0\0\0hello")
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "#bundle\0\0\0\0\1\0\0\0\0\0\0\0\5hellofdsfds")
def test(b):
binary = b.toBinary()
b2, leftover = osc.Bundle.fromBinary(binary)
self.assertEquals(leftover, "")
self.assertEquals(b, b2)
test(osc.Bundle())
test(osc.Bundle([osc.Message("/foo")]))
test(osc.Bundle([osc.Message("/foo"), osc.Message("/bar")]))
test(osc.Bundle([osc.Message("/foo"), osc.Message("/bar", osc.StringArgument("hello"))]))
nested = osc.Bundle([osc.Message("/hello")])
test(osc.Bundle([nested, osc.Message("/foo")]))
def testGetMessages(self):
m1 = osc.Message("/foo")
m2 = osc.Message("/bar")
m3 = osc.Message("/foo/baz")
b = osc.Bundle()
b.add(m1)
self.assertEquals(b.getMessages(), set([m1]))
b = osc.Bundle()
b.add(m1)
b.add(m2)
self.assertEquals(b.getMessages(), set([m1, m2]))
b = osc.Bundle()
b.add(m1)
b.add(osc.Bundle([m2]))
b.add(osc.Bundle([m3]))
self.assertEquals(b.getMessages(), set([m1, m2, m3]))
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 [email protected]
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from __future__ import absolute_import, division, print_function, unicode_literals
import yaml
import struct
import hexdump
import math
import re
import time
import os
import layout.mapped_keycodes as mapped_keycodes
from layout.common import *
from layout.scan_mode import *
from layout.rf_settings import *
from layout.device import *
from layout.ekc_data import EKCDataMain
RF_INFO_SIZE = 64
class Layout:
def __init__(self, layout, layout_id, layout_name):
self.id = layout_id
self.layers = try_get(layout, "layers", layout_name, val_type=list)
self.sub_matrix_sizes = []
self.name = layout_name
# optional default layer
if "default_layer" in layout:
self.default_layer = try_get(layout, "default_layer", layout_name, val_type=int)
else:
self.default_layer = 0
# first determine the layer structure for the keyboard
try:
self.layer_count = len(self.layers)
except:
raise ParseError("Expected at least one layer in {}".format(layout_name))
# check for at least one keyboard
try:
self.keyboard_count = len(self.layers[0])
except:
raise ParseError("Expected at least one keyboard device in 'layers' field of {}".format(layout_name))
# number of keys in keyboards
try:
self.sub_matrix_sizes = [len(kb) for kb in self.layers[0]]
except:
raise ParseError("Couldn't get keyboard sizes {}".format(layout_name))
# check that all the layers have the same dimensions
for layer_i in range(self.layer_count):
device_count_i = len(self.layers[layer_i])
if device_count_i != self.keyboard_count:
raise ParseError("Unbalanced layer structure in layout '{}'. "
" The first layer has '{}' devices, but the {} layer has '{}' devices."
.format(layout_name, self.layer_count,
num_to_ordinal_str(layer_i+1), device_count_i)
)
for device_i in range(self.keyboard_count):
expected_size = self.sub_matrix_sizes[device_i]
actual_size = len(self.layers[layer_i][device_i])
if actual_size != expected_size:
raise ParseError("Mismatching devices in layout '{}'. "
"The {} device has '{}' keycodes in the first layer, but "
"in the {} layer the same device has '{}' keycodes."
.format(
layout_name,
num_to_ordinal_str(device_i+1), expected_size,
num_to_ordinal_str(layer_i+1), actual_size
)
)
# total matrix size of layout
self.matrix_size = self.calc_total_matrix_size()
# which matrix_maps are used (if any) for the layout
if "matrix_maps" in layout:
self.matrix_maps = try_get(layout, "matrix_maps", layout_name, val_type=list)
if len(self.matrix_maps) != self.keyboard_count:
raise ParseError("In layout '{}', found '{}' maps in 'matrix_maps', "
"but found {} devices in its 'layers' list".found(
self.name, len(self.matrix_maps), self.keyboard_count))
else:
self.matrix_maps = None
def calc_total_matrix_size(self):
# total size need for the key matrices in the layout
matrix_size = 0
for size in self.sub_matrix_sizes:
matrix_size += int(math.ceil(size / 8))
if matrix_size > MAX_MATRIX_SIZE:
raise ParseError("Too many keys in layout '{}'".format(layout_name))
return matrix_size
def check_layers(self, layout, debug_hint, layout_id):
# check all layers have the same number of keyboards and all
# keyboards have the same number of keys
for (l_i, layer) in enumerate(self.layers):
if len(layer) != self.layer_count:
raise ParseError("'{}' has a mismatched number of keyboards "
"in its layers starting at layer '{}'".format(debug_hint, l_i))
for (kb_i, kb) in enumerate(layer):
if len(kb) != self.sub_matrix_sizes[kb_i]:
raise ParseError("'{}' has a mismatched number of keys "
"starting at keyboard '{}' of layer '{}'".format(debug_hint, kb_i, l_i))
class SettingsGenerator:
def __init__(self, layout_data, rf_settings):
self.layout = layout_data
self.rf = rf_settings
self.ekc_data = EKCDataMain()
self.build_device_data()
def gen_single_layout(self, layout):
result = bytearray(0)
for layer in layout.layers:
for (kb_i, kb) in enumerate(layer):
kc_map = None
size = 0
# # Check for 'matrix_maps'. It is a list of device names with
# # one for each sub-matrix in the layout. The matrix_map is
# # used to map the keys from how they are "visually arranged" to
# # to how they are physically wired.
# # The matrix_maps is optional. If it is not given, then the
# # list of keys in the matrix will match how they are physically
# # wired.
# if layout.matrix_maps != None:
# map_name = layout.matrix_maps[kb_i]
# try:
# map_device = self.get_device_by_name(map_name)
# kc_map = map_device.scan_mode.matrix_map
# sm = map_device.scan_mode
# size = sm.rows * sm.cols
# size = int(math.ceil(len(kb)/8))*8 # pad to multiple of 8
# except:
# raise ParseError("Couldn't find matrix_map for '{}' in"
# " layout '{}'".format(map_name, layout.name))
# if len(kc_map) != len(kb):
# raise ParseError("The matrix_map for '{}' has '{}' "
# "keys, but the corresponding matrix in the layout "
# "'{}' has '{}' keys".format(
# map_name, len(kc_map),
# layout.name, len(kb)))
# else:
# # no map given, so generate a list that is a 1-to-1 mapping
# kc_map = list(range(len(kb)))
# size = int(math.ceil(len(kb)/8))*8 # pad to multiple of 8
size = int(math.ceil(len(kb)/8))*8 # pad to multiple of 8
keycodes = [0] * size
for (kc_i, kc_str) in enumerate(kb):
kc = mapped_keycodes.interpret_keycode(kc_str)
keycodes[kc_i] = kc
# pack all the keycodes as uint16_t
for kc in keycodes:
result += struct.pack('<H', kc)
return result
def gen_layout_section(self, dev_id):
# Layout section has the format
# matrix_keynumber_map for this specific device[rows * cols]
# layouts for all devices
self.build_device_data()
result = bytearray(0)
dev_data = self.get_device_by_id(dev_id)
if dev_data.scan_mode.mode != ScanMode.NO_MATRIX:
# Add matrix map to the layout section
for key_num in dev_data.scan_mode.inverse_map:
result += struct.pack('<B', key_num)
# Add ekc data to the layout section
result += self.ekc_data.to_bytes()
for layout_id in range(self.number_layouts):
layout = self.get_layout_by_id(layout_id)
result += self.gen_single_layout(layout)
return result
def gen_settings_section(self, device_id):
result = bytearray(0);
result += self.gen_global_settings(device_id)
result += self.gen_layout_settings()
result += self.gen_rf_settings()
return result
def gen_global_settings(self, device_id):
# uint8_t device_id;
# char device_name[32];
# uint8_t timestamp[8]; // utc time stamp of last update
# uint8_t default_report_mode;
# uint8_t scan_mode;
# uint8_t row_count;
# uint8_t col_count;
# uint8_t _reserved[51]; // total size == 96
result = bytearray(0)
device = self.get_device_by_id(device_id)
# device_id
result += struct.pack('<B', device.id)
# device_id
result += struct.pack('<32s', device.name.encode('utf-8'))
# build timestamp, 64 bit UTC
result += struct.pack('<q', int(time.time()) )
# default_report_mode
result += struct.pack('<B', self.get_report_mode())
# scan mode information
result += self.gen_scan_mode_info(device_id)
result += bytearray(51)
return result
def parse_layouts(self):
self.layout_data = {}
layout_id = 0
for (layout_name, layout) in try_get(self.layout, 'layouts').items():
self.layout_data[layout_name] = Layout(layout, layout_id, layout_name)
layout_id += 1
self.number_layouts = layout_id
def get_layout_by_id(self, layout_id):
for (_, layout) in self.layout_data.items():
if layout.id == layout_id:
return layout
raise ParseError("Couldn't find layout with id: {}".format(layout_id))
def get_layout_by_name(self, layout_name):
if layout_name in self.layout_data:
return self.layout_data[layout_name]
raise ParseError("Couldn't find layout with name: {}".format(layout_name))
def get_device_by_name(self, device_name):
if device_name in self.device_name_map:
dev_id = self.device_name_map[device_name]
return self.device_data[dev_id]
else:
raise ParseError("Couldn't find device named: {}".format(device_name))
def get_device_by_id(self, dev_id):
if dev_id in self.device_data:
return self.device_data[dev_id]
else:
raise ParseError("Couldn't find device with id: {}".format(dev_id))
def parse_devices(self):
self.device_data = {}
self.device_name_map = {}
self.largest_device_id = 0
for (device_name, device_data) in try_get(self.layout, 'devices').items():
dev = Device.from_json_obj(device_data, device_name)
self.assert_validate_device(dev, device_name)
self.device_data[dev.id] = dev
self.device_name_map[device_name] = dev.id
self.largest_device_id = max(self.largest_device_id, dev.id)
def assert_validate_device(self, dev, device_name):
if dev.scan_mode.mode == ScanMode.NO_MATRIX:
return
if not dev.id < MAX_DEVICE_ID:
raise ParseError("Device id '{}' too large. Max allowed value is {}"
.format(dev.id, MAX_DEVICE_ID))
# if not dev.id in self.device_data:
# raise ParseError("Tried to build layout for device id '{}', but no"
# " matching device was found in the layout file."
# .format(dev.id))
# check layout identifier
if not dev.layout_name in self.layout_data:
raise ParseError("Couldn't find layout with name '{}' for "
"keyboard '{}'".format(dev.layout_name, device_name))
if (dev.id in self.device_data):
raise ParseError("Duplicate device id '{}' used in both "
"'{}' and '{}'".format(dev.id, device_name, self.device_data[dev.id].name))
# check layout offset
offset_max = self.layout_data[dev.layout_name].keyboard_count
if not dev.layout_offset < offset_max:
raise ParseError("'layout_offset' too large. Got '{}' but "
"'{}' only has {} device in its layout".format(dev.layout_offset, dev.layout_name, offset_max))
def build_device_data(self):
self.parse_layouts()
self.parse_devices()
def gen_layout_settings(self):
# uint8_t number_layouts;
# uint8_t number_devices;
# uint8_t _reserved[30]; // 32
# keyboard_info_t layouts[64];
# device_info_t devices[64];
result = bytearray(0)
result += struct.pack('<B', self.number_layouts)
result += struct.pack('<B', self.largest_device_id)
result += bytearray(30)
# layout_info_t {
# uint8_t matrix_size;
# uint8_t layer_count;
# }[64]
for layout_id in range(MAX_LAYOUT_ID):
if layout_id >= self.number_layouts:
result += bytearray(2)
continue
layout = self.get_layout_by_id(layout_id)
layout_name = layout.name
# calculate how many bytes are needed for the matrix.
# each keyboard in the layout needs ceil(kb_size/8)
result += struct.pack('<B', layout.matrix_size)
result += struct.pack('<B', layout.layer_count)
# typedef struct device_info_t {
# uint8_t keyboard_id; // the keyboard layout that this device maps to
# uint8_t matrix_offset; // the component byte offset into the given keyboard
# uint8_t matrix_size; // the size of this component == ceil(rows*cols/8)
# } [64]
for device_id in range(MAX_DEVICE_ID):
if not device_id in self.device_data or \
self.device_data[device_id].scan_mode.mode == ScanMode.NO_MATRIX:
result += bytearray(3)
continue
device = self.device_data[device_id]
layout = self.layout_data[device.layout_name]
layout_id = layout.id
# TODO: validate this value
matrix_size = device.scan_mode.calc_matrix_size()
keyboard_offset = device.layout_offset
matrix_offset = 0
for (i, size) in enumerate(layout.sub_matrix_sizes):
if not i < keyboard_offset:
break;
matrix_offset += int(math.ceil(size / 8))
if matrix_offset + matrix_size > layout.matrix_size:
raise ParseError("The matrix for device '{}' doesn't fit in "
"layout '{}'".format(device.name, layout.name))
result += struct.pack('<B', layout_id)
result += struct.pack('<B', matrix_offset)
result += struct.pack('<B', matrix_size)
return result
def gen_rf_settings(self):
if self.rf == None:
return bytearray([0xff] * RF_INFO_SIZE)
else:
rf_settings = RFSettings.from_json_obj(self.rf)
return rf_settings.to_bytes()
def get_report_mode(self):
mode = try_get(self.layout, 'report_mode')
# KEYBOARD_REPORT_MODE_AUTO = 0, // 6kro -> nkro if more than 6 keys pressed
# KEYBOARD_REPORT_MODE_NKRO = 1, // nkro
# KEYBOARD_REPORT_MODE_6KRO = 2, // 6kro
if mode == "auto_nkro":
return KEYBOARD_REPORT_MODE_AUTO
elif mode == "6kro":
return KEYBOARD_REPORT_MODE_6KRO
elif mode == "nkro":
return KEYBOARD_REPORT_MODE_NKRO
else:
raise ParseError("Unknown report mode {}".format(mode))
def get_scan_mode(self, device_id):
for (kb_name, kb) in try_get(self.layout, 'devices').items():
if (try_get(kb, 'id', kb_name) == device_id):
return try_get(kb, 'scan_mode', kb_name)
raise ParseError("No device defined for id={}".format(device_id))
def gen_scan_mode_info(self, device_id):
scan_mode = self.get_scan_mode(device_id)
mode = try_get(scan_mode, 'mode', 'scan_mode')
if mode == 'none' or mode == 'no_matrix':
return struct.pack('<BBB', MATRIX_SCANNER_MODE_NONE, 0, 0)
elif mode == 'col_row':
rows = try_get(scan_mode, 'rows', 'scan_mode')
cols = try_get(scan_mode, 'cols', 'scan_mode')
return struct.pack('<BBB', MATRIX_SCANNER_MODE_COL_ROW, rows, cols)
elif mode == 'pins':
# count = scan_mode['pin_count']
# return struct.pack('<BBB', MATRIX_SCANNER_MODE_PINS, count, 0)
raise ParseError("TODO: 'pins' scan mode not implemented yet")
else:
raise ParseError("Unsupported scan mode {}".format(mode))
if __name__ == "__main__":
layout = None
rf = None
with open("test_layout.yaml") as file_name:
layout = yaml.safe_load(file_name.read())
with open("test_rf_config.yaml") as file_name:
rf = yaml.safe_load(file_name.read())
settings = SettingsGenerator(layout_data=layout, rf_settings=rf)
target_layout_id = 0x30
print("settings:")
try:
hexdump.hexdump(bytes(settings.gen_settings_section(target_layout_id)))
except ParseError as e:
print(e)
# print(e.with_traceback())
exit(1)
print("layout:")
try:
hexdump.hexdump(bytes(settings.gen_layout_section(target_layout_id)))
except ParseError as e:
print(e)
exit(1)
print()
print()
settings = RFSettings.from_rand()
print(settings.to_json_obj())
print(settings.to_yaml())
hexdump.hexdump(settings.to_bytes())
| python |
# This file adds code completion to the auto-generated pressuresense_pb2 file.
from .pressuresense_pb2 import PressureQuanta, PressureLog
from .common_proto import _TimeStamp
from typing import List, Callable, Union
class _PressureProfile( object ):
mpa = 0
class _PressureQuanta( object ):
profiles = _PressureProfile() # type: _PressureProfile
time = _TimeStamp() # type: _TimeStamp
PressureQuanta = PressureQuanta # type: Callable[[],_PressureQuanta]
class _PressureLog( object ):
class QuantasList(list):
def add(self): # type: (...)->_PressureQuanta
return self[0]
quantas = QuantasList() # type: Union[List[_PressureQuanta],QuantasList]
def ParseFromString(self, string):
return self
def SerializeToString(self):
return ""
PressureLog = PressureLog # type: Callable[[],_PressureLog] | python |
import sys
import logging
import argparse
from pprint import pprint
from . import *
def dumpSubject(cert):
info = getSubjectFromCertFile(cert)
pprint(info, indent=2)
def main():
services = ",".join(LOGIN_SERVICE.keys())
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--log_level',
action='count',
default=0,
help='Set logging level, multiples for more detailed.')
parser.add_argument('-C','--cert',
default=None,
help='Show information for existing certificate')
parser.add_argument('-s', '--service',
default='production',
help='Service to login, one of ({})'.format(services))
parser.add_argument('-j', '--jnlp',
default=None,
help='Process specified JNLP file')
parser.add_argument('-t', '--ttl',
default=None,
help='Certificate lifetime in seconds, use JNLP default if not set')
args = parser.parse_args()
# Setup logging verbosity
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
level = levels[min(len(levels) - 1, args.log_level)]
logging.basicConfig(level=level,
format="%(asctime)s %(levelname)s %(message)s")
if args.cert is not None:
cert_file = args.cert
if cert_file == "default":
cert = getDefaultCertificatePath()
dumpSubject(cert_file)
sys.exit(0)
if args.service not in LOGIN_SERVICE.keys():
logging.error("Uknown service: %s", args.service)
sys.exit(1)
cert_file = None
if args.jnlp is not None:
cert_file = grid_shib.retrieveCertificate(args.jnlp,
getDefaultCertificatePath(),
lifetime_seconds=args.ttl)
else:
cert_file = login(overwrite=True,
service=LOGIN_SERVICE[args.service],
lifetime_seconds=args.ttl )
print("Certificate downloaded to: {}\n".format(cert_file))
print("Certificate info:")
dumpSubject(cert_file)
if __name__ == "__main__":
main() | python |
'''
knowyourmeme.com image crawler:
-------------------------------------------
Script designed to specifically crawl meme templates to be used in ml(and self enjoyment).
url: https://knowyourmeme.com/photos/templates/page/<page_number>
So, as you can see, we are lucky enough that knowyoumeme has pagination here
IMPORTANT: check robots.txt
* http://www.useragentstring.com/pages/useragentstring.php
* https://knowyourmeme.com/robots.txt
Also, check that the folder where you are going to save the images already exists...
too lazy to write something that creates the folder
'''
from bs4 import BeautifulSoup as bs
import requests
import shutil
import json
import time
import sys
import os
url = 'https://knowyourmeme.com'
img_save_path = 'templates/'
json_save_path = 'data.json'
paging_path = '/photos/templates/page/'
headers = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'}
pages = 47 #remeber to check number of pages beforehand
ids = 1
structure = {}
#crawls template and tags
def img_crawls(template_path, headers):
site_url = url + template_path
t0 = time.time()
r = requests.get(site_url, headers = headers)
response_delay = time.time()-t0
data = r.text
soup = bs(data, 'lxml')
section = soup.body.find(id='content')
left = section.find(id='maru')
right = section.find(class_='right').select('.sidebar_box')[0]
template_url = left.select('div#photo_wrapper a')[0]['href']
taglist = right.select('p#tag_list a')
tags = [str(tag.string) for tag in taglist]
time.sleep(10*response_delay)
return {'site_url': site_url,
'template_url': template_url,
'tags': tags}
for i in range(1,pages):
page_url = url + paging_path + str(i)
r = requests.get(page_url, headers = headers)
data = r.text
soup = bs(data,'lxml')
section = soup.body.find(id='content').find(id='maru').find(id="infinite-scroll-wrapper")
urls = section.select("div.item a")
for template in urls:
template_path = template['href']
info = img_crawls(template_path, headers)
print(info['site_url']) #### DEBUG
# store
structure[ids]=info
img_type = '.' + info['template_url'].split('.')[-1]
if not img_type in ['.jpg','.png','.jpeg'] :
img_type='.jpeg'
img_get = requests.get(info['template_url'], stream = True)
with open(img_save_path + str(ids) + img_type, 'wb') as out_file:
shutil.copyfileobj(img_get.raw, out_file)
print('Image '+str(ids)+' crawled...') #### DEBUG
del img_get
ids+=1
time.sleep(5)
with open(json_save_path,'w') as out_file:
json.dump(structure,out_file)
| python |
# Generated by Django 2.2.1 on 2019-06-03 04:58
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('Profesor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Estudiante',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('apellido', models.CharField(max_length=100)),
('edad', models.IntegerField()),
('sexo', models.CharField(max_length=100)),
('direccion', models.CharField(max_length=250)),
('matricula', models.IntegerField()),
('numeroTelefonico', models.IntegerField()),
('fechaNacimiento', models.DateField(default=django.utils.timezone.now)),
('materia', models.CharField(max_length=100)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('profesor', models.ForeignKey(on_delete=models.SET(-1), to='Profesor.Profesor')),
],
options={
'db_table': 'Estudiante',
},
),
]
| python |
import sys
sys.path.append(".")
import numpy as np
from DDPG import *
from main import *
import os.path
import argparse
from Environment import Environment
from shield import Shield
def carplatoon(learning_method, number_of_rollouts, simulation_steps, learning_eposides, actor_structure, critic_structure, train_dir, \
nn_test=False, retrain_shield=False, shield_test=False, test_episodes=100, retrain_nn=False):
A = np.matrix([
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,1, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,1, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,1, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,1, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,1, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,1, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,1],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0]
])
B = np.matrix([
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, -1],
])
#intial state space
s_min = np.array([[ 19.9],[ 0.9], [-0.1], [ 0.9],[-0.1], [ 0.9], [-0.1], [ 0.9], [-0.1], [ 0.9],[-0.1], [ 0.9], [-0.1], [ 0.9], [-0.1]])
s_max = np.array([[ 20.1],[ 1.1], [ 0.1], [ 1.1],[ 0.1], [ 1.1], [ 0.1], [ 1.1], [ 0.1], [ 1.1],[ 0.1], [ 1.1], [ 0.1], [ 1.1], [ 0.1]])
x_min = np.array([[18],[0.1],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1]])
x_max = np.array([[22],[1.5], [1],[1.5],[ 1],[1.5],[ 1],[1.5], [1],[1.5],[ 1],[1.5],[ 1],[1.5],[ 1]])
u_min = np.array([[-10.], [-10.], [-10.], [-10.], [-10.], [-10.], [-10.], [-10.]])
u_max = np.array([[ 10.], [ 10.], [ 10.], [ 10.], [ 10.], [ 10.], [ 10.], [ 10.]])
target = np.array([[20],[1], [0], [1], [0], [1], [0], [1], [0], [1], [0], [1], [0], [1], [0]])
s_min -= target
s_max -= target
x_min -= target
x_max -= target
Q = np.zeros((15, 15), float)
np.fill_diagonal(Q, 1)
R = np.zeros((8,8), float)
np.fill_diagonal(R, 1)
env = Environment(A, B, u_min, u_max, s_min, s_max, x_min, x_max, Q, R, continuous=True, bad_reward=-1000)
if retrain_nn:
args = { 'actor_lr': 0.000001,
'critic_lr': 0.00001,
'actor_structure': actor_structure,
'critic_structure': critic_structure,
'buffer_size': 1000000,
'gamma': 0.999,
'max_episode_len': 400,
'max_episodes': 1000,
'minibatch_size': 64,
'random_seed': 122,
'tau': 0.005,
'model_path': train_dir+"retrained_model.chkp",
'enable_test': nn_test,
'test_episodes': test_episodes,
'test_episodes_len': 1200}
else:
args = { 'actor_lr': 0.000001,
'critic_lr': 0.00001,
'actor_structure': actor_structure,
'critic_structure': critic_structure,
'buffer_size': 1000000,
'gamma': 0.999,
'max_episode_len': 400,
'max_episodes': learning_eposides,
'minibatch_size': 64,
'random_seed': 122,
'tau': 0.005,
'model_path': train_dir+"model.chkp",
'enable_test': nn_test,
'test_episodes': test_episodes,
'test_episodes_len': 1200}
actor = DDPG(env, args)
#################### Shield #################
model_path = os.path.split(args['model_path'])[0]+'/'
linear_func_model_name = 'K.model'
model_path = model_path+linear_func_model_name+'.npy'
def rewardf(x, Q, u, R):
return env.reward(x, u)
names = {0:"x0", 1:"x1", 2:"x2", 3:"x3", 4:"x4", 5:"x5", 6:"x6", 7:"x7", 8:"x8", 9:"x9", 10:"x10", 11:"x11", 12:"x12", 13:"x13", 14:"x14"}
shield = Shield(env, actor, model_path, force_learning=retrain_shield)
shield.train_shield(learning_method, number_of_rollouts, simulation_steps, rewardf=rewardf, names=names, explore_mag = 0.1, step_size = 0.1, enable_jit=True)
if shield_test:
shield.test_shield(test_episodes, 1200)
actor.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Running Options')
parser.add_argument('--nn_test', action="store_true", dest="nn_test")
parser.add_argument('--retrain_shield', action="store_true", dest="retrain_shield")
parser.add_argument('--shield_test', action="store_true", dest="shield_test")
parser.add_argument('--test_episodes', action="store", dest="test_episodes", type=int)
parser.add_argument('--retrain_nn', action="store_true", dest="retrain_nn")
parser_res = parser.parse_args()
nn_test = parser_res.nn_test
retrain_shield = parser_res.retrain_shield
shield_test = parser_res.shield_test
test_episodes = parser_res.test_episodes if parser_res.test_episodes is not None else 100
retrain_nn = parser_res.retrain_nn
carplatoon("random_search", 500, 2000, 0, [400, 300, 200], [500, 400, 300, 200], "ddpg_chkp/car-platoon/continuous/8/400300200500400300200/",
nn_test=nn_test, retrain_shield=retrain_shield, shield_test=shield_test, test_episodes=test_episodes, retrain_nn=retrain_nn) | python |
#!/usr/bin/python3
#
# Scratchpad for working with raw U2F messages, useful for creating raw messages as test data.
# Example keys from secion 8.2 of
# https://fidoalliance.org/specs/fido-u2f-v1.0-nfc-bt-amendment-20150514/fido-u2f-raw-message-formats.html#authentication-response-message-success
from binascii import hexlify, unhexlify
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
sig_alg = ec.ECDSA(hashes.SHA256())
private_key_hex = 'ffa1e110dde5a2f8d93c4df71e2d4337b7bf5ddb60c75dc2b6b81433b54dd3c0'
public_key_hex = '04d368f1b665bade3c33a20f1e429c7750d5033660c019119d29aa4ba7abc04aa7c80a46bbe11ca8cb5674d74f31f8a903f6bad105fb6ab74aefef4db8b0025e1d'
example_payload_hex = '4b0be934baebb5d12d26011b69227fa5e86df94e7d94aa2949a89f2d493992ca0100000001ccd6ee2e47baef244d49a222db496bad0ef5b6f93aa7cc4d30c4821b3b9dbc57'
example_signature_hex = '304402204b5f0cd17534cedd8c34ee09570ef542a353df4436030ce43d406de870b847780220267bb998fac9b7266eb60e7cb0b5eabdfd5ba9614f53c7b22272ec10047a923f'
s = int(private_key_hex, 16)
x = int(public_key_hex[2:66], 16)
y = int(public_key_hex[66:], 16)
keynums = ec.EllipticCurvePrivateNumbers(s, ec.EllipticCurvePublicNumbers(x, y, ec.SECP256R1()))
private_key = keynums.private_key(default_backend())
public_key = private_key.public_key()
# Just ensure that we can successfully verify the example signature against the example key
public_key.verify(unhexlify(example_signature_hex), unhexlify(example_payload_hex), sig_alg)
# Successful authentication message, but with invalid user presence byte
payload_hex = '4b0be934baebb5d12d26011b69227fa5e86df94e7d94aa2949a89f2d493992ca0000000001ccd6ee2e47baef244d49a222db496bad0ef5b6f93aa7cc4d30c4821b3b9dbc57'
payload_signature = private_key.sign(unhexlify(payload_hex), sig_alg)
print("Private key:", private_key_hex)
print("Public key:", public_key_hex)
print("Signing payload:", payload_hex)
print("Signature:", hexlify(payload_signature))
| python |
# encoding: utf-8
from workflow import web, Workflow, PasswordNotFound
def get_saved_searches(api_key, url):
"""
Parse all pages of projects
:return: list
"""
return get_saved_searches_page(api_key, url, 1, [])
def get_dashboards(api_key, url):
"""
Parse all pages of projects
:return: list
"""
return get_dashboard_page(api_key, url, 1, [])
def get_saved_searches_page(api_key, url, page, list):
log.info("Calling searches API page {page}".format(page=page))
params = dict(type='search', per_page=100, page=page, search_fields='title')
headers = {'accept-encoding':'gzip'}
r = web.get(url + '/api/saved_objects/', params, headers)
# throw an error if request failed
# Workflow will catch this and show it to the user
r.raise_for_status()
# Parse the JSON returned by Kibana and extract the saved objects
result = list + r.json()['saved_objects']
nextpage = r.headers.get('X-Next-Page')
if nextpage:
result = get_saved_searches_page(api_key, url, nextpage, result)
return result
def get_dashboard_page(api_key, url, page, list):
log.info("Calling dashboards API page {page}".format(page=page))
params = dict(type='dashboard', per_page=100, page=page, search_fields='title')
headers = {'accept-encoding':'gzip'}
r = web.get(url + '/api/saved_objects/', params, headers)
# throw an error if request failed
# Workflow will catch this and show it to the user
r.raise_for_status()
# Parse the JSON returned by Kibana and extract the saved objects
result = list + r.json()['saved_objects']
nextpage = r.headers.get('X-Next-Page')
if nextpage:
result = get_dashboard_page(api_key, url, nextpage, result)
return result
def main(wf):
try:
api_url = wf.settings.get('api_url')
# A wrapper function for the cached call below
def search_wrapper():
return get_saved_searches('', api_url)
def dashboard_wrapper():
return get_dashboards('', api_url)
saved_searches = wf.cached_data('saved_searches', search_wrapper, max_age=3600)
dashboards = wf.cached_data('dashboards', dashboard_wrapper, max_age=3600)
# Record our progress in the log file
log.debug('{} kibana searches cached'.format(len(saved_searches)))
log.debug('{} kibana dashboards cached'.format(len(dashboards)))
except PasswordNotFound: # API key has not yet been set
# Nothing we can do about this, so just log it
wf.logger.error('No API key saved')
if __name__ == u"__main__":
wf = Workflow()
log = wf.logger
wf.run(main) | python |
##############################################################################
# Written by: Cachen Chen <[email protected]>
# Date: 08/05/2008
# Description: hscrollbar.py wrapper script
# Used by the hscrollbar-*.py tests
##############################################################################$
import sys
import os
import actions
import states
from strongwind import *
from hscrollbar import *
# class to represent the main window.
class HScrollBarFrame(accessibles.Frame):
# constants
# the available widgets on the window
LABEL = "Value:"
MAXVAL = 100
MINVAL = 0
MININCREMENT = 10
def __init__(self, accessible):
super(HScrollBarFrame, self).__init__(accessible)
self.label = self.findLabel(self.LABEL)
self.hscrollbar = self.findScrollBar(None)
self.maximumValue = \
self.hscrollbar._accessible.queryValue().maximumValue
self.minimumValue = \
self.hscrollbar._accessible.queryValue().minimumValue
self.minimumIncrement = \
self.hscrollbar._accessible.queryValue().minimumIncrement
# BUG499883 - Accessible maximum value of a scroll bar is 119
#assert self.maximumValue == self.MAXVAL, \
# "maximum value was %s, expected %s" % \
# (self.maximumValue, self.MAXVAL)
assert self.minimumValue == self.MINVAL, \
"minimum value was %s, expected %s" % \
(self.minimumValue, self.MINVAL)
assert self.minimumIncrement == self.MININCREMENT, \
"minimum increment value was %s, expected %s" % \
(self.minimumIncrement, self.MININCREMENT)
# change hscrollbar's value
def assignScrollBar(self, new_value):
procedurelogger.action('set scrollbar value to "%s"' % new_value)
self.hscrollbar.value = new_value
def assertLabel(self, value):
procedurelogger.expectedResult('label\'s value changed to "%s"' % value)
expected_label = "Value: %s" % value
assert self.label.text == expected_label, \
'Label reads "%s", expected "%s"' % (self.label, expected_label)
def assertMaximumValue(self):
procedurelogger.action("Ensure that %s's maximum value is what we expect" % self.hscrollbar)
procedurelogger.expectedResult("%s's maximum value is %s" % \
(self.hscrollbar, self.MAXVAL))
self.maximumValue = \
self.hscrollbar._accessible.queryValue().maximumValue
assert self.maximumValue == self.MAXVAL, \
"Maximum value is %s, expected %s" % \
(self.maximumValue, self.MAXVAL)
def assertMinimumValue(self):
procedurelogger.action("Ensure that %s's minimum value is what we expect" % self.hscrollbar)
procedurelogger.expectedResult("%s's minimum value is %s" % \
(self.hscrollbar, self.MINVAL))
self.minimumValue = \
self.hscrollbar._accessible.queryValue().minimumValue
assert self.minimumValue == self.MINVAL, \
"Minimum value is %s, expected %s" % \
(self.minimumValue, self.MINVAL)
def assertMinimumIncrement(self):
procedurelogger.action("Ensure that %s's minimum increment is what we expect" % self.hscrollbar)
procedurelogger.expectedResult("%s's minimum increment is %s" % \
(self.hscrollbar, self.MINVAL))
self.minimumIncrement = \
self.hscrollbar._accessible.queryValue().minimumIncrement
assert self.minimumIncrement == self.MININCREMENT, \
"minimum increment value was %s, expected %s" % \
(self.minimumIncrement, self.MININCREMENT)
def assertScrollBar(self, expected_value):
procedurelogger.expectedResult('the scrollbar\'s current value is %s' % expected_value)
assert self.hscrollbar.value == expected_value, \
"scrollbar's current value is %s, expected %s" % \
(self.hscrollbar.value, expected_value)
# close application window
def quit(self):
self.altF4()
| python |
from dotenv import load_dotenv
import os
load_dotenv(verbose=True)
DISCORD_BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') | python |
# Time: O(log n)
# Space: O(n) Call stack size
class Solution:
def searchRange(self, nums, target):
first = self.binarySearch(nums, 0, len(nums) - 1, target, True)
last = self.binarySearch(nums, 0, len(nums) - 1, target, False)
return [first, last]
def binarySearch(self, nums, low, high, target, findFirst):
if high < low:
return -1
mid = low + (high - low) // 2
if findFirst:
if ((mid == 0 or target > nums[mid - 1]) and nums[mid] == target):
return mid
elif (target > nums[mid]):
return self.binarySearch(nums, (mid + 1), high, target, findFirst)
else:
return self.binarySearch(nums, low, (mid - 1), target, findFirst)
else:
if ((mid == len(nums) - 1 or target < nums[mid + 1]) and nums[mid] == target):
return mid
elif (target < nums[mid]):
return self.binarySearch(nums, low, (mid - 1), target, findFirst)
else:
return self.binarySearch(nums, (mid + 1), high, target, findFirst)
# Time: O(log n)
# Space: O(1)
class SolutionIterative:
def searchRange(self, nums: List[int], target: int) -> List[int]:
first = self.binarySearch(nums, 0, len(nums) - 1, target, True)
last = self.binarySearch(nums, 0, len(nums) - 1, target, False)
return [first, last]
def binarySearch(self, nums, low, high, target, findFirst):
while low <= high:
mid = low + (high - low) // 2
if findFirst:
if ((mid == 0 or target > nums[mid - 1]) and nums[mid] == target):
return mid
elif (target > nums[mid]):
low = mid + 1
else:
high = mid - 1
else:
if ((mid == len(nums) - 1 or target < nums[mid + 1]) and nums[mid] == target):
return mid
elif (target < nums[mid]):
high = mid - 1
else:
low = mid + 1
return -1
arr = [1, 3, 3, 5, 7, 9, 9, 10, 12]
x = 9
solution = Solution()
print(solution.searchRange(arr, x))
solution_iterative = SolutionIterative()
print(solution_iterative.searchRange(arr, x)) | python |
# -*- coding: utf-8 -*-
# import model interface
from . import models
# import constraints
from . import constraints
# import tasks
from . import tasks
# import solvers
from . import solvers
| python |
import csv
from django.db import models
import reversion
from django.core.exceptions import ObjectDoesNotExist
@reversion.register()
class FileTemplate(models.Model):
FILE_FOR_CHOICES = (
('input', 'Input'),
('equip', 'Equipment'),
('output', 'Output'),
)
name = models.CharField(max_length=200, db_index=True, unique=True)
file_for = models.CharField(max_length=6, choices=FILE_FOR_CHOICES)
# Task specific options
# Output each input item (excluding labware) by line rather than product
use_inputs = models.BooleanField(default=False)
# Collate inputs, only provide total amounts from task
# By default each input is broken down per product
total_inputs_only = models.BooleanField(default=False)
class Meta:
ordering = ['-id']
def field_name(self):
return self.name.lower().replace(' ', '_')
def _get_field_key(self, field):
if field.map_to:
return field.map_to
return field.name
def _validate_headers(self, header_list):
if header_list is None:
return False
for field in self.fields.all():
if field.required and field.name not in header_list:
return False
return True
def read(self, input_file, as_list=False):
csv_file = csv.DictReader(input_file)
try:
identifier_fields = self.fields.filter(is_identifier=True)
except ObjectDoesNotExist:
return False
else:
if as_list:
indexed = []
else:
indexed = {}
if self._validate_headers(csv_file.fieldnames):
for line in csv_file:
line = dict([(k, v) for k, v in line.items() if v.strip()])
if any(line):
# Get the identifier fields from the file
identifier = frozenset(line[n.name] for n in identifier_fields)
# Get a list of identifiers and remove from line
ifn = [i.name for i in identifier_fields]
# We don't want to used identifiers if it's a list as they'll be
# discarded.
if as_list and len(ifn) > 0:
return False
generated_line = {}
# TODO: Currently we discard extra fields in CSV that are not in
# filetemplate. Change this?
for field in self.fields.all():
# Don't add identifier fields
if field.name not in ifn and field.name in line:
field_value = line[field.name]
# May map to different DB field
field_key = self._get_field_key(field)
if field.is_property:
if 'properties' not in generated_line:
generated_line['properties'] = []
prop = {
'name': field_key,
'value': field_value
}
generated_line['properties'].append(prop)
else:
generated_line[field_key] = field_value
if as_list:
indexed.append(generated_line)
else:
indexed[identifier] = generated_line
return indexed
return False
def write(self, output_file, data, column_order='name'):
fieldnames = [item.name for item in self.fields.all().order_by(column_order)]
csv_output = csv.DictWriter(output_file, fieldnames=fieldnames,
extrasaction='ignore', lineterminator='\n')
csv_output.writeheader()
csv_output.writerows(data)
return output_file
def __str__(self):
return self.name
@reversion.register()
class FileTemplateField(models.Model):
# Name of the field in the file
name = models.CharField(max_length=50)
# Name of the field in the DB (if different to file header)
map_to = models.CharField(max_length=50, null=True, blank=True)
required = models.BooleanField(default=False)
is_identifier = models.BooleanField(default=False)
# Is to be used as/read from a property not a field
# Ignore on anything that does not support reading/writing
# properties on objects.
is_property = models.BooleanField(default=False)
template = models.ForeignKey(FileTemplate, related_name='fields')
def get_key(self):
if self.map_to:
return self.map_to
return self.name
def key_to_path(self):
key = self.get_key()
return key.split('.')
def __str__(self):
return self.name
| python |
import daisy
import unittest
class TestMetaCollection(unittest.TestCase):
def get_mongo_graph_provider(self, mode, directed, total_roi):
return daisy.persistence.MongoDbGraphProvider(
'test_daisy_graph',
directed=directed,
total_roi=total_roi,
mode=mode)
def test_graph_read_meta_values(self):
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
self.get_mongo_graph_provider(
'w', True, roi)
graph_provider = self.get_mongo_graph_provider(
'r', None, None)
self.assertEqual(True, graph_provider.directed)
self.assertEqual(roi, graph_provider.total_roi)
def test_graph_default_meta_values(self):
provider = self.get_mongo_graph_provider(
'w', None, None)
self.assertEqual(False, provider.directed)
self.assertIsNone(provider.total_roi)
graph_provider = self.get_mongo_graph_provider(
'r', None, None)
self.assertEqual(False, graph_provider.directed)
self.assertIsNone(graph_provider.total_roi)
def test_graph_nonmatching_meta_values(self):
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
roi2 = daisy.Roi((1, 0, 0),
(10, 10, 10))
self.get_mongo_graph_provider(
'w', True, None)
with self.assertRaises(ValueError):
self.get_mongo_graph_provider(
'r', False, None)
self.get_mongo_graph_provider(
'w', None, roi)
with self.assertRaises(ValueError):
self.get_mongo_graph_provider(
'r', None, roi2)
| python |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 29 08:40:49 2018
@author: user
"""
import numpy as np
np.random.seed(1)
from matplotlib import pyplot as plt
import skimage.data
from skimage.color import rgb2gray
from skimage.filters import threshold_mean
from skimage.transform import resize
import network
# Utils
def get_corrupted_input(input, corruption_level):
corrupted = np.copy(input)
inv = np.random.binomial(n=1, p=corruption_level, size=len(input))
for i, v in enumerate(input):
if inv[i]:
corrupted[i] = -1 * v
return corrupted
def reshape(data):
dim = int(np.sqrt(len(data)))
data = np.reshape(data, (dim, dim))
return data
def plot(data, test, predicted, figsize=(5, 6)):
data = [reshape(d) for d in data]
test = [reshape(d) for d in test]
predicted = [reshape(d) for d in predicted]
fig, axarr = plt.subplots(len(data), 3, figsize=figsize)
for i in range(len(data)):
if i==0:
axarr[i, 0].set_title('Train data')
axarr[i, 1].set_title("Input data")
axarr[i, 2].set_title('Output data')
axarr[i, 0].imshow(data[i])
axarr[i, 0].axis('off')
axarr[i, 1].imshow(test[i])
axarr[i, 1].axis('off')
axarr[i, 2].imshow(predicted[i])
axarr[i, 2].axis('off')
plt.tight_layout()
plt.savefig("result.png")
plt.show()
def preprocessing(img, w=128, h=128):
# Resize image
img = resize(img, (w,h), mode='reflect')
# Thresholding
thresh = threshold_mean(img)
binary = img > thresh
shift = 2*(binary*1)-1 # Boolian to int
# Reshape
flatten = np.reshape(shift, (w*h))
return flatten
def main():
# Load data
camera = skimage.data.camera()
astronaut = rgb2gray(skimage.data.astronaut())
horse = skimage.data.horse()
coffee = rgb2gray(skimage.data.coffee())
# Marge data
data = [camera, astronaut, horse, coffee]
# Preprocessing
print("Start to data preprocessing...")
data = [preprocessing(d) for d in data]
# Create Hopfield Network Model
model = network.HopfieldNetwork()
model.train_weights(data)
# Generate testset
test = [get_corrupted_input(d, 0.3) for d in data]
predicted = model.predict(test, threshold=0, asyn=False)
print("Show prediction results...")
plot(data, test, predicted)
print("Show network weights matrix...")
#model.plot_weights()
if __name__ == '__main__':
main()
| python |
def main() -> None:
N, K = map(int, input().split())
assert 1 <= K <= N <= 100
for _ in range(N):
P_i = tuple(map(int, input().split()))
assert len(P_i) == 3
assert all(0 <= P_ij <= 300 for P_ij in P_i)
if __name__ == '__main__':
main()
| python |
# -*- coding: UTF-8 -*-
import sys,io,os
from mitie import *
from collections import defaultdict
reload(sys)
sys.setdefaultencoding('utf-8')
#此代码参考:https://nlu.rasa.com/python.html
#这个代码是为了测试,直接通过python api去获取rasa nlu的意图和实体识别接口
sys.path.append('../MITIE/mitielib')
from rasa_nlu.model import Metadata, Interpreter
def print_beatuiful(obj):
if isinstance(obj,dict):
for k,v in obj.items():
print "\t",
print str(k).decode("unicode-escape"),
print " = " ,
print str(v).decode("unicode-escape")
# where `model_directory points to the folder the model is persisted in
interpreter = Interpreter.load("../model/default/latest/")
sentence = u"我 的 手机号 是 xxxxxxx"
result = interpreter.parse(sentence)
print sentence
print "预测结果为:"
import json
print type(result)
print json.dumps(result, indent=4, sort_keys=True).decode("unicode-escape")
# print print_beatuiful(result)
| python |
from tir import Webapp
import unittest
class GTPA107(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGTP", "20/04/2020", "T1", "D MG 01 ")
inst.oHelper.Program('GTPA107')
def test_GTPA107_CT001(self):
self.oHelper.SearchBrowse("D MG 000033", "Filial+lote Remessa")
self.oHelper.SetButton("Visualizar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT002(self):
self.oHelper.SearchBrowse("D MG 000033", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Cancelar Remessa")
self.oHelper.SetButton("OK")
self.oHelper.SetValue('GQG_NUMINI', '000010')
self.oHelper.SetValue('GQG_NUMFIM', '000010')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT003(self):
self.oHelper.SearchBrowse("D MG 000034", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Transferir Remessa")
self.oHelper.SetButton("OK")
self.oHelper.SetValue('GQG_NUMINI', '000010')
self.oHelper.SetValue('GQG_NUMFIM', '000010')
self.oHelper.SetValue('GQG_AGENCI', 'AGREM5')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT004(self):
self.oHelper.SearchBrowse("D MG 000035", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Devolução de Remessa")
self.oHelper.SetButton("OK")
self.oHelper.SetValue('GQG_NUMINI', '000010')
self.oHelper.SetValue('GQG_NUMFIM', '000010')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT006(self):
self.oHelper.SearchBrowse("D MG 000042", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Baixa Protocolo")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT007(self):
self.oHelper.SearchBrowse("D MG 000043", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Estorno Baixa Protocolo")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| python |
#!/usr/bin/env python
import unittest
import os
import time
from bones.utils import *
class TestUtils(unittest.TestCase):
def test_temp_filename_collision(self):
fn1 = temp_filename()
fn2 = temp_filename()
self.assertNotEqual(fn1, fn2)
def test_temp_filename_kwargs(self):
fn = temp_filename(prefix="temp_")
self.assertTrue(fn.startswith("temp_"))
fn = temp_filename(postfix="_temp")
self.assertTrue(fn.endswith("_temp"))
fn = temp_filename(ext="dat")
self.assertTrue(fn.endswith(".dat"))
fn = temp_filename(prefix="/usr/local/", postfix="_temp", ext="dat")
self.assertTrue(fn.startswith("/usr/local/"))
self.assertTrue(fn.endswith("_temp.dat"))
def test_is_stale(self):
younger_fn = temp_filename(prefix="/tmp/")
older_fn = temp_filename(prefix="/tmp/")
ts = time.time()
touch(older_fn, mtime=ts)
touch(younger_fn, mtime=ts - 100)
try:
self.assertFalse(is_stale(younger_fn, older_fn))
self.assertTrue(is_stale(older_fn, younger_fn))
finally:
os.unlink(younger_fn)
os.unlink(older_fn)
def test_common_filename(self):
fn1 = "/this/is/common/filename_elephant"
fn2 = "/this/is/common/filename_rhino"
fn3 = "/this/is/common/filename_cat"
cfn = common_filename(fn1, fn2, fn3)
self.assertEquals(cfn, "/this/is/common/filename_")
# nothing similar
fn4 = "not like the others"
cfn = common_filename(fn1, fn2, fn3, fn4)
self.assertEquals(cfn, "")
# short match
fn5 = "/this/is/common/filename_"
cfn = common_filename(fn1, fn2, fn3, fn5)
self.assertEquals(cfn, "/this/is/common/filename_")
if __name__ == '__main__':
unittest.main()
| python |
from django.http import HttpRequest
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from project_core.tests import database_population
class CallListTest(TestCase):
def setUp(self):
self._user = database_population.create_management_user()
self._funding_instrument = database_population.create_funding_instrument()
self._client_management = database_population.create_management_logged_client()
def test_load_funding_instrument_add(self):
login = self._client_management.login(username='unittest_management', password='12345', request=HttpRequest())
self.assertTrue(login)
response = self._client_management.get(reverse('logged-funding-instrument-add'))
self.assertEqual(response.status_code, 200)
def test_load_funding_instruments_list(self):
response = self._client_management.get(reverse('logged-funding-instrument-list'))
self.assertEqual(response.status_code, 200)
def test_load_funding_instrument_update_get(self):
response = self._client_management.get(reverse('logged-funding-instrument-update', kwargs={'pk': self._funding_instrument.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self._funding_instrument.long_name)
def test_load_funding_instrument_detail(self):
response = self._client_management.get(reverse('logged-funding-instrument-detail', kwargs={'pk': self._funding_instrument.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self._funding_instrument.long_name)
| python |
from .base import Base
class Ls(Base):
"""Show List"""
def run(self):
if self.options['<ctgr>'] == "done":
self.show(None, 1)
elif self.options['<ctgr>'] == "all":
self.show(None, None)
else:
self.show(self.options['<ctgr>'],1 if self.options['<done>'] == "done" else 0)
| python |
import io
import os
import sys
from setuptools import setup
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6.0 is not supported')
DESCRIPTION = 'Images Generator for bouncing objects movie'
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# load __version__
exec(open(os.path.join(here, 'bouncing_objects_generator', '_version.py')).read())
setup(
name='bouncing_objects_generator',
version=__version__,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author='Kazuhiro Serizawa',
author_email='[email protected]',
url='https://github.com/serihiro/bouncing_objects_generator',
license='MIT',
packages=['bouncing_objects_generator'],
install_requires=['numpy>=1.15', 'pillow>=5.0'],
entry_points={
'console_scripts': ['bouncing_objects_generator=bouncing_objects_generator.cli:main']
}
)
| python |
import os
from tqdm import tqdm
from PIL import Image, UnidentifiedImageError
if __name__ == '__main__':
jpg_path = '../shufa_pic/shufa'
broken_jpg_path = '../shufa_pic/broken_img'
for jpg_file in tqdm(os.listdir(jpg_path)):
src = os.path.join(jpg_path, jpg_file)
try:
image = Image.open(src)
except UnidentifiedImageError:
trg = os.path.join(broken_jpg_path, jpg_file)
os.rename(src, trg)
continue
| python |
#!/usr/bin/env python
# this just calculates the roots, it doesn't generate the heat map
# see https://thoughtstreams.io/jtauber/littlewood-fractals/
import itertools
import sys
import time
import numpy
DEGREE = 16
INNER_ONLY = False
print "generating roots for degree={}".format(DEGREE,)
start = time.time()
count = 0
click = 2 ** DEGREE / 10
next = click
if INNER_ONLY:
filename = "roots_{}b.txt".format(DEGREE)
else:
filename = "roots_{}.txt".format(DEGREE)
with open(filename, "wb") as f:
for poly in itertools.product(*([[-1, 1]] * DEGREE)):
count += 1
if count == next:
print >> sys.stderr, count
next += click
for root in numpy.roots((1,) + poly):
if root.real >= 0 and root.imag >= 0:
if not INNER_ONLY or abs(root) <= 1:
print >> f, root.real, root.imag
print >> sys.stderr, "wrote out {} in {} seconds".format(filename, time.time() - start)
| python |
from aiohttp.test_utils import TestClient
from server.serializer import JSendSchema, JSendStatus
from server.serializer.fields import Many
from server.serializer.models import RentalSchema
class TestRentalsView:
async def test_get_rentals(self, client: TestClient, random_admin, random_bike):
"""Assert that you can get a list of all rentals."""
await client.app["rental_manager"].create(random_admin, random_bike)
response = await client.get('/api/v1/rentals', headers={"Authorization": f"Bearer {random_admin.firebase_id}"})
response_schema = JSendSchema.of(rentals=Many(RentalSchema()))
response_data = response_schema.load(await response.json())
assert response_data["status"] == JSendStatus.SUCCESS
assert len(response_data["data"]["rentals"]) == 1
rental = response_data["data"]["rentals"][0]
assert rental["bike_identifier"] == random_bike.identifier
assert (await client.get(rental["bike_url"])).status != 404
class TestRentalView:
async def test_get_rental(self, client: TestClient, random_admin, random_bike):
"""Assert that you get gets a single rental from the system."""
rental, location = await client.app["rental_manager"].create(random_admin, random_bike)
response = await client.get(f'/api/v1/rentals/{rental.id}',
headers={"Authorization": f"Bearer {random_admin.firebase_id}"})
response_schema = JSendSchema.of(rental=RentalSchema())
response_data = response_schema.load(await response.json())
assert response_data["status"] == JSendStatus.SUCCESS
assert response_data["data"]["rental"]["id"] == rental.id
assert response_data["data"]["rental"]["bike_identifier"] == random_bike.identifier
assert (await client.get(response_data["data"]["rental"]["bike_url"])).status != 404
| python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Timer."""
import time
class Timer(object):
"""A simple timer (adapted from Detectron)."""
def __init__(self):
self.total_time = None
self.calls = None
self.start_time = None
self.diff = None
self.average_time = None
self.reset()
def tic(self):
# using time.time as time.clock does not nomalize for multithreading
self.start_time = time.time()
def toc(self):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
def reset(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
| python |
#!/usr/bin/env python
import optparse
import os,sys
#from optparse import OptionParser
import glob
import subprocess
import linecache
import struct
import shutil
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("%prog -f <stack> -p <parameter> -c <ctf> -s")
parser.add_option("-f",dest="stack",type="string",metavar="FILE",
help="raw, IMAGIC particle stack (black particles) - if not specified, only parameter files will be created, no new stack")
parser.add_option("-p",dest="param",type="string",metavar="FILE",
help="EMAN2 output parameter file")
parser.add_option("-c",dest="ctf",type="string",metavar="FILE",
help="per-particle CTF information file from APPION (optional)")
parser.add_option("--mag",dest="mag",type="float", metavar="FLOAT", default=10000,
help="actual magnification of images (default=10000)")
parser.add_option("--norm", action="store_true",dest="norm",default=False,
help="Normalize particles")
parser.add_option("-m",dest="onlymodel",type="int",metavar="#",
help="only convert this model (optional, starts with 0)")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 0:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#=========================
def checkConflicts(params):
if not params['stack']:
print "\nWarning: no stack specified\n"
elif not os.path.exists(params['stack']):
print "\nError: stack file '%s' does not exist\n" % params['stack']
sys.exit()
if not params['param']:
print "\nError: no EMAN2 parameter file specified"
sys.exit()
if not os.path.isfile(params['param']):
print "\nError: EMAN2 parameter file '%s' does not exist\n" % params['param']
sys.exit()
if not params['ctf']:
print "\nError: no CTF parameter file specified"
sys.exit()
elif not os.path.isfile(params['ctf']):
print "\nError: Appion CTF parameter file '%s' does not exist\n" % params['ctf']
sys.exit()
#=========================
def getEMANPath():
### get the imagicroot directory
emanpath = subprocess.Popen("env | grep EMAN2DIR", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if emanpath:
emanpath = emanpath.replace("EMAN2DIR=","")
if os.path.exists(emanpath):
return emanpath
print "EMAN2 was not found, make sure it is in your path"
sys.exit()
#=========================
def getNumModels(params):
## find number of models included in reconstruction
f=open(params['param'])
mods = []
for line in f:
l = line.split()
model=float(l[-1])
if 889 > model > 99:
continue
if model not in mods:
mods.append(model)
f.close()
return len(mods)
#=========================
def Eman2Freali(az,alt,phi):
t1 = Transform({"type":"eman","az":az,"alt":alt,"phi":phi,"mirror":False})
#t_conv = Transform({"type":"eman","alt":31.717474411458415,"az":90,"phi":-90,"mirror":False})
#t2 = t1*t_conv.inverse()
d = t1.get_params("eman")
psi = d["phi"]+90
if psi >360:
psi = psi-360
theta= d["alt"]
phi = d["az"]-90
return psi,theta,phi
#=========================
def createFiles(params):
parm=params['param']
numMods = params['num']
mag = params['mag']
stack = params['stack']
debug = params['debug']
# open EMAN2 param file
f=open(parm,'r')
# for each model, create an output file
mout=[]
mtxt=[]
count=[]
for m in range(numMods):
mout.append(open("%s_%02i_frealign"%(parm,m),'w'))
mtxt.append(open("%s_%02i.txt"%(parm,m),'w'))
count.append(1)
print "Calculating euler angle conversion..."
pcount=1
for line in f:
l = line.split()
parmPSI = float(l[0])
parmTHETA = float(l[1])
parmPHI = float(l[2])
sx =(float(l[3]))
sy =(float(l[4]))
model = int(float(l[5]))
psi,theta,phi = Eman2Freali(parmPSI,parmTHETA,parmPHI)
if model < 99 or model > 889:
if debug is True:
print 'Particle %s is included' %(pcount-1)
if model > 889:
model = 0
mtxt[model].write("%s\n" %(pcount-1))
ctf = linecache.getline(params['ctf'],pcount)
if debug is True:
print 'Reading line %s in ctf file' %(pcount)
print ctf
c = ctf.split()
micro = float(c[7])
df1 = float(c[8])
df2 = float(c[9])
astig = float(c[10])
mout[model].write("%7d%8.3f%8.3f%8.3f%8.3f%8.3f%8.1f%6d%9.1f%9.1f%8.2f%7.2f%6.2f\n" %(count[model],psi,theta,phi,sx,sy,mag,micro,df1,df2,astig,0,0))
count[model] += 1
pcount+=1
# close files
f.close()
for m in range(numMods):
mout[m].close()
mtxt[m].close()
# exit if not converting stack
if stack is None:
return
# get box size
im=EMData.read_images(stack,[0])
nx = im[0].get_xsize()
del im
# from EMAN2PAR import EMTaskCustomer
# if params['nproc'] > 1:
# etc = EMTaskCustomer("thread:%i"%params['nproc'])
# else:
# etc = EMTaskCustomer("thread:1")
for m in range(numMods):
if params['onlymodel'] is not None:
if m!=params['onlymodel']: continue
text='%s_%02i.txt' %(parm,m)
parts = open(text).readlines()
nimg = len(parts)
imstack = "%s_model%02i"%(os.path.splitext(stack)[0],m)
print "\nAllocating space for Model %i stack..."%m
img = EMData(nx,nx,nimg)
img.write_image(imstack+'.mrc')
print "Generating %i particle stack for Model %i..."%(nimg,m)
for i in xrange(nimg):
p = int(float(parts[i]))
d = EMData()
d.read_image(stack, p)
if params['norm'] is True:
d.process_inplace("normalize")
region = Region(0, 0, i, nx, nx, 1)
d.write_image(imstack+".mrc",0,EMUtil.get_image_ext_type("mrc"), False, region, EMUtil.EMDataType.EM_FLOAT, True)
progress = int(float(i)/nimg*100)
if progress%2==0:
print "%3i%% complete\t\r"%progress,
print "100% complete\t"
os.remove(text)
#=========================
#=========================
if __name__ == "__main__":
params=setupParserOptions()
getEMANPath()
from EMAN2 import *
from sparx import *
checkConflicts(params)
params['num']=getNumModels(params)
print "EMAN2 parameter file contains %s models"%params['num']
createFiles(params)
| python |
from itertools import cycle
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.cache import cache
from django.http import Http404
from django.shortcuts import render
from django.views.generic import TemplateView
from django.views.generic.base import View
import requests
from requests.exceptions import ConnectionError
from .google_analytics import get_access_token
from .uptime_robot import UptimeRobot
from .models import Service
from .models import GoogleAnalyticsSite
class HomeView(LoginRequiredMixin, TemplateView):
template_name = 'home.html'
class SpotligthView(LoginRequiredMixin, View):
SPOTLIGTH_CYCLE = cycle('AB')
def get(self, request, *args, **kwargs):
case = next(self.SPOTLIGTH_CYCLE)
if case == 'A':
obj = Service.objects.all().order_by('?').first()
if not obj:
raise Http404('Create a Service first')
return render(request, 'service_detail.html', {
'obj': obj,
})
elif case == 'B':
obj = GoogleAnalyticsSite.objects.all().order_by('?').first()
if not obj:
raise Http404('Create a GoogleAnalyticsSite first')
return render(request, 'googleanalyticssite_detail.html', {
'ACCESS_TOKEN': get_access_token(),
'obj': obj,
})
class TickerView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
response_list = []
# Zendesk
zendesk_data = cache.get('zendesk_data')
if not zendesk_data:
try:
req = requests.get(
settings.ZENDESK_URL,
auth=(settings.ZENDESK_EMAIL, settings.ZENDESK_API),
)
if req.ok:
zendesk_data = {
'title': 'Tickets',
'label': 'Zendesk',
'value': req.json()['view_count']['value'],
}
cache.set('zendesk_data', zendesk_data, 120)
except ConnectionError:
zendesk_data = None
if zendesk_data:
response_list.append(zendesk_data)
# Sentry
sentry_data = cache.get('sentry_data')
if not sentry_data:
try:
req = requests.get(
settings.SENTRY_URL,
auth=(settings.SENTRY_KEY, ''),
)
if req.ok:
sentry_data = {
'title': 'Events',
'label': 'Sentry',
'value': sum([x[1] for x in req.json()]),
}
cache.set('sentry_data', sentry_data, 60)
except ConnectionError:
sentry_data = None
if sentry_data:
response_list.append(sentry_data)
# Uptime Robot
monitor_list = cache.get('monitor_list')
if not monitor_list:
uptime_robot = UptimeRobot()
success, response = uptime_robot.get_monitors()
if success:
monitor_list = []
for monitor in response.get('monitors').get('monitor'):
monitor_list.append({
'title': monitor.get('friendlyname'),
'label': 'Uptime',
'value': '{0}%'.format(
monitor.get('customuptimeratio')
),
})
cache.set('monitor_list', monitor_list, 90)
if monitor_list:
response_list.extend(monitor_list)
return render(request, 'ticker_detail.html', {
'response_list': response_list,
})
| python |
node = S(input, "application/json")
object = {
"name": "test",
"comment": "42!"
}
node.prop("comment", object)
propertyNode = node.prop("comment")
value = propertyNode.prop("comment").stringValue() | python |
# -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
Style Guide`_. Docstrings may extend over multiple lines. Sections are created
with a section header and a colon followed by a block of indented text.
Example:
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_google.py
Section breaks are created by resuming unindented text. Section breaks
are also implicitly created anytime a new section starts.
Attributes:
module level variable1 (int): Module level variables may be documented in
either the ``Attributes`` section of the module docstring, or in an
inline docstring immediately following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
Todo:
* For module TODOs
* You have to also use ``sphinx.ext.todo`` extension
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import logging
import {{cookiecutter.project_slug}}.send_notification # pylint: disable=import-error
import {{cookiecutter.project_slug}}.settings_accessor # pylint: disable=import-error
_SETTINGS = {{cookiecutter.project_slug}}.settings_accessor.SettingsAccessor()
_LOGGER = logging.getLogger(__name__)
_HANDLER = {{cookiecutter.project_slug}}.send_notification.EmailHandler()
_LOGGER.addHandler(_HANDLER)
_LOGGER.setLevel(logging.WARNING)
def main():
"""Main function."""
_LOGGER.warning('here')
if __name__ == '__main__':
main()
| python |
__author__ = "Nathan Ward"
import logging
from datetime import date, datetime
from pytz import timezone, utc
_LOGGER = logging.getLogger()
_LOGGER.setLevel(logging.INFO)
def get_market_open_close() -> dict:
"""
Grab the market open and close settings. Convert timezone.
Lambdas run in UTC. Settings are set to US/Eastern (NYSE).
"""
today = date.today()
date_format = '%H:%M:%S'
current_time = datetime.now(utc)
#Regular hours
market_normal_open = '09:30:00'
market_normal_close = '16:00:00'
#Extended hours for stock
market_extended_open = '09:00:00'
market_extended_close = '18:00:00'
#New york timezone for nasdaq/nyse, same timezone as us-east-1
market_tz = timezone('US/Eastern')
market_open_datetime = datetime.strptime(market_normal_open, date_format).time()
market_extended_open_datetime = datetime.strptime(market_extended_open, date_format).time()
market_close_datetime = datetime.strptime(market_normal_close, date_format).time()
market_extended_close_datetime = datetime.strptime(market_extended_close, date_format).time()
naive_open_datetime = datetime.combine(today, market_open_datetime)
naive_extended_open_datetime = datetime.combine(today, market_extended_open_datetime)
naive_close_datetime = datetime.combine(today, market_close_datetime)
naive_extended_close_datetime = datetime.combine(today, market_extended_close_datetime)
open_local_datetime = market_tz.localize(naive_open_datetime, is_dst=None)
open_extended_local_datetime = market_tz.localize(naive_extended_open_datetime, is_dst=None)
close_local_datetime = market_tz.localize(naive_close_datetime, is_dst=None)
close_extended_local_datetime = market_tz.localize(naive_extended_close_datetime, is_dst=None)
open_utc_converted_datetime = open_local_datetime.astimezone(utc)
open_extended_utc_converted_datetime = open_extended_local_datetime.astimezone(utc)
close_utc_converted_datetime = close_local_datetime.astimezone(utc)
close_extended_utc_converted_datetime = close_extended_local_datetime.astimezone(utc)
time_to_close = current_time - close_utc_converted_datetime
extended_time_to_close = current_time - close_extended_utc_converted_datetime
time_to_open = open_utc_converted_datetime - current_time
extended_time_to_open = open_extended_utc_converted_datetime - current_time
return {
'market_open': open_utc_converted_datetime,
'market_close': close_utc_converted_datetime,
'time_to_close': time_to_close.total_seconds(),
'time_to_open': time_to_open.total_seconds(),
'extended_market_open': open_extended_utc_converted_datetime,
'extended_market_close': close_extended_utc_converted_datetime,
'extended_time_to_close': extended_time_to_close.total_seconds(),
'extended_time_to_open': extended_time_to_open.total_seconds(),
'time_now': current_time
} | python |
# -*- coding: utf-8 -*-
#---------------------------------------
# Import Libraries
#---------------------------------------
import sys
import io
import json
from os.path import isfile
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
from datetime import datetime
#---------------------------------------
# [Required] Script Information
#---------------------------------------
ScriptName = "OwRank"
Website = "https://github.com/lucarin91/overwatch-streamlabs"
Description = "Return the hoster rank on Overwatch."
Creator = "lucarin91"
Version = "2.0.0"
#---------------------------------------
# Set Variables
#---------------------------------------
_command_permission = "everyone"
_command_info = ""
_last_update = None
_responce = None
_battletag = []
_region = 'eu'
_message = "Rank:"
_command = "!owrank"
_cooldown = 10
#---------------------------------------
# [Required] Intialize Data (Only called on Load)
#---------------------------------------
def Init():
global _last_update, _responce
settings = 'Services/Scripts/{}/settings.json'.format(ScriptName)
if isfile(settings):
with io.open(settings, mode='r', encoding='utf-8-sig') as f:
string = f.read()
Parent.Log(ScriptName, 'Load json: {}'.format(string))
conf = json.loads(string)
parse_conf(conf)
_responce = build_message()
_last_update = datetime.today()
#---------------------------------------
# [Required] Execute Data / Process Messages
#---------------------------------------
def Execute(data):
if data.IsChatMessage():
if data.GetParam(0).lower() == _command\
and not Parent.IsOnCooldown(ScriptName, _command)\
and Parent.HasPermission(data.User, _command_permission, _command_info):
Parent.SendTwitchMessage(_responce)
#---------------------------------------
# [Required] Tick Function
#---------------------------------------
def Tick():
global _responce, _last_update
if (datetime.today() - _last_update).seconds > 30:
_responce = build_message()
_last_update = datetime.today()
Parent.Log(ScriptName, 'update rank! ({})'.format(_responce))
def Unload():
pass
def ReloadSettings(jsonData):
parse_conf(json.loads(jsonData))
#---------------------------------------
# My functions
#---------------------------------------
def get_rank(username, region='eu'):
"""Return the rank of the username given in input."""
url = 'https://owapi.net/api/v3/u/{}/stats'.format(username)
res_raw = Parent.GetRequest(url, {"User-Agent":"Linux/generic"})
res = json.loads(res_raw)
status, data = res['status'], json.loads(res['response'])
if status != 200:
Parent.Log(ScriptName, 'Request status {}'.format(status))
return "not placed"
if not data\
or not region in data\
or not 'stats' in data[region]\
or not 'competitive' in data[region]['stats']\
or not 'overall_stats' in data[region]['stats']['competitive']\
or not 'comprank' in data[region]['stats']['competitive']['overall_stats']:
Parent.Log(ScriptName, 'Remote service error.')
return "not placed"
rank = data[region]['stats']['competitive']['overall_stats']['comprank']
return rank if rank is not None else "not placed"
def parse_conf(conf):
"""Set the configuration variable."""
global _battletag, _region, _message, _command, _cooldown
_battletag = [b.strip() for b in conf['battletag'].split(',')]
_region = conf['region']
_message = conf['message']
_command = conf['command']
_cooldown = conf['cooldown']
Parent.Log(ScriptName, 'Load conf: {}'.format((_battletag, _region, _message, _command, _cooldown)))
def build_message():
"""Build the message with the rank to sent to the chat."""
ranks = [(user.split('-')[0], get_rank(user, _region)) for user in _battletag]
responce = "{} {}".format(_message, ', '.join(['{}->{}'.format(u, r) for u, r in ranks]))
return responce
def ShowRank():
"""Send the rank to the chat."""
Parent.Log(ScriptName, 'Send rank to chat!')
responce = build_message()
Parent.SendTwitchMessage(responce)
| python |
import unittest
import sys
module = sys.argv[-1].split(".py")[0]
class PublicTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
global top_3
undertest = __import__(module)
top_3 = getattr(undertest, 'top_3', None)
def test_exemplo(self):
l = [1,2,3,4,8,22,-3,5]
top_3(l)
assert l[0] == 22 and l[1] == 8 and l[2] == 5
assert len(l) == 8
if __name__ == '__main__':
loader = unittest.TestLoader()
runner = unittest.TextTestRunner()
runner.run(loader.loadTestsFromModule(sys.modules[__name__]))
| python |
import datetime
from .wordpress import WordPress
class CclawTranslations(WordPress):
base_urls = [
"https://cclawtranslations.home.blog/",
]
last_updated = datetime.date(2021, 11, 3)
def init(self):
self.blacklist_patterns += ["CONTENIDO | SIGUIENTE"]
def parse_content(self, element) -> str:
self.clean_contents(element)
for div in element.find_all("div", recursive=False):
div.extract()
return str(element)
| python |
from discord.ext import commands
class Echo(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def echo(self, ctx):
await ctx.send(ctx.message.content[6:])
def setup(bot):
bot.add_cog(Echo(bot))
| python |
import json
import logging
from . import BASE
from .oauth import Tokens
logger = logging.getLogger(__name__)
def store(client_id: str, tokens: Tokens) -> None:
cache = BASE / f"{client_id}_cache.json"
# Store tokens
cache.touch(0o600, exist_ok=True)
with cache.open("w") as fh:
temp = {k: v for k, v in tokens._asdict().items() if v is not None}
json.dump(temp, fh)
def exists(client_id: str) -> bool:
tokens = BASE / f"{client_id}_cache.json"
return tokens.exists()
def retrieve(client_id: str) -> Tokens:
cache = BASE / f"{client_id}_cache.json"
with cache.open() as fh:
data = json.load(fh)
return Tokens(**data)
| python |
from .model import TreeNode
"""
BFS Solution
Space : O(n)
Time : O(n)
"""
class Solution:
def pseudoPalindromicPaths(self, root: TreeNode) -> int:
if not root:
return 0
stack = [(root, [])]
res = []
ans = 0
while stack:
node, mem = stack.pop()
if not node.left and not node.right:
res.append(mem + [node.val])
continue
if node.left:
stack.append((node.left, mem + [node.val]))
if node.right:
stack.append((node.right, mem + [node.val]))
for item in res:
d = {}
for i in item:
if i in d:
d[i] += 1
else:
d[i] = 1
odds = 0
for _, v in d.items():
if v % 2 == 1:
odds += 1
if odds <= 1:
ans += 1
return ans
| python |
import sys, os
from MySQLdb import Error as Error
from connect_db import read_connection
class ReaderBase(object):
def __init__(self):
self._password_file = "/n/home00/cadams/mysqldb"
def connect(self):
return read_connection(self._password_file) | python |
"""Geometric Brownian motion."""
import numpy as np
from stochastic.processes.base import BaseTimeProcess
from stochastic.processes.continuous.brownian_motion import BrownianMotion
from stochastic.utils import generate_times
from stochastic.utils.validation import check_numeric
from stochastic.utils.validation import check_positive_integer
from stochastic.utils.validation import check_positive_number
class GeometricBrownianMotion(BaseTimeProcess):
r"""Geometric Brownian motion process.
.. image:: _static/geometric_brownian_motion.png
:scale: 50%
A geometric Brownian motion :math:`S_t` is the analytic solution to the
stochastic differential equation with Wiener process :math:`W_t`:
.. math::
dS_t = \mu S_t dt + \sigma S_t dW_t
and can be represented with initial value :math:`S_0` in the form:
.. math::
S_t = S_0 \exp \left( \left( \mu - \frac{\sigma^2}{2} \right) t +
\sigma W_t \right)
:param float drift: the parameter :math:`\mu`
:param float volatility: the parameter :math:`\sigma`
:param float t: the right hand endpoint of the time interval :math:`[0,t]`
for the process
:param numpy.random.Generator rng: a custom random number generator
"""
def __init__(self, drift=0, volatility=1, t=1, rng=None):
super().__init__(t=t, rng=rng)
self._brownian_motion = BrownianMotion(t=t)
self.drift = drift
self.volatility = volatility
self._n = None
def __str__(self):
return "Geometric Brownian motion with drift {d} and volatility {v} on [0, {t}].".format(
t=str(self.t), d=str(self.drift), v=str(self.volatility)
)
def __repr__(self):
return "GeometricBrownianMotion(drift={d}, volatility={v}, t={t})".format(
t=str(self.t), d=str(self.drift), v=str(self.volatility)
)
@property
def drift(self):
"""Geometric Brownian motion drift parameter."""
return self._drift
@drift.setter
def drift(self, value):
check_numeric(value, "Drift")
self._drift = value
@property
def volatility(self):
"""Geometric Brownian motion volatility parameter."""
return self._volatility
@volatility.setter
def volatility(self, value):
check_positive_number(value, "Volatility")
self._volatility = value
def _sample_geometric_brownian_motion(self, n, initial=1.0):
"""Generate a realization of geometric Brownian motion."""
check_positive_integer(n)
check_positive_number(initial, "Initial")
# Opt for repeated use
if self._n != n:
self._n = n
self._line = generate_times(self.drift - self.volatility ** 2 / 2.0, n)
noise = self.volatility * self._brownian_motion.sample(n)
return initial * np.exp(self._line + noise)
def _sample_geometric_brownian_motion_at(self, times, initial=1.0):
"""Generate a realization of geometric Brownian motion."""
line = [(self.drift - self.volatility ** 2 / 2.0) * t for t in times]
noise = self.volatility * self._brownian_motion.sample_at(times)
return initial * np.exp(line + noise)
def sample(self, n, initial=1):
"""Generate a realization.
:param int n: the number of increments to generate.
:param float initial: the initial value of the process :math:`S_0`.
"""
return self._sample_geometric_brownian_motion(n, initial)
def sample_at(self, times, initial=1):
"""Generate a realization using specified times.
:param times: a vector of increasing time values at which to generate
the realization
:param float initial: the initial value of the process :math:`S_0`.
"""
return self._sample_geometric_brownian_motion_at(times, initial)
| python |
#-----------------------------------------------------
# Make plots from matplotlib using data exported by
# DNSS.jl
# Soham M 05/2022
#-----------------------------------------------------
import numpy as np
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import matplotlib
mpl.rcParams.update({
"font.size": 34.0,
"axes.titlesize": 34.0,
"axes.labelsize": 34.0,
"xtick.labelsize": 34.0,
"ytick.labelsize": 34.0,
"legend.fontsize": 34.0,
"figure.figsize": (25, 10),
"figure.dpi": 300,
"savefig.dpi": 300,
"text.usetex": True
})
def plot_solution():
fguv = glob.glob("../data/minkowski/constraints/minkowski_guv*")
fgrr = glob.glob("../data/minkowski/constraints/minkowski_grr*")
guvmax = np.amax(list(map(lambda x: np.amax(np.load(x)["w"]), fguv)))
guvmin = np.amin(list(map(lambda x: np.amin(np.load(x)["w"]), fguv)))
grrmax = np.amax(list(map(lambda x: np.amax(np.load(x)["w"]), fgrr)))
grrmin = np.amin(list(map(lambda x: np.amin(np.load(x)["w"]), fgrr)))
guvlevels = np.linspace(guvmin, guvmax, 40)
grrlevels = np.linspace(grrmin, grrmax, 40)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False, sharex=True)
for (_guv, _grr) in zip(fguv, fgrr):
guv = np.load(_guv)
grr = np.load(_grr)
A1 = ax1.contourf(guv["v"], guv["u"], guv["w"], vmax=np.amax(guvlevels), vmin=np.amin(guvlevels), levels=guvlevels)
A2 = ax2.contourf(grr["v"], grr["u"], grr["w"], vmax=np.amax(grrlevels), vmin=np.amin(grrlevels), levels=grrlevels)
ax1.tick_params(axis='both', which='major', size=10)
ax1.set_xlabel(r"$v$")
ax1.set_ylabel(r"$u$")
fig.colorbar(A1, ax=ax1)
fig.colorbar(A2, ax=ax2)
plt.tight_layout()
fig.savefig("minkowski_constraints.pdf")
return 0
plot_solution()
| python |
import os
import torch
from torchinfo import summary
from torch.utils.data import DataLoader
import source.utils as utils
import source.arguments as arguments
from source.model import FusionNet, UNet
from source.dataset.dataset import NucleiCellDataset
def main(m_args):
# For reproducibility
torch.manual_seed(123)
# Get model name
model_name = utils.get_model_name(m_args)
# Device
device = torch.device("cuda:" + m_args.gpu_ids) \
if torch.cuda.is_available() else "cpu"
# Model
if m_args.model == "fusion":
model = FusionNet(m_args, 1)
else:
model = UNet(m_args.num_kernel, m_args.kernel_size, 1, 2)
print(list(model.parameters())[0].shape)
summary(model)
model = model.to(device)
# Optimizer
parameters = model.parameters()
if m_args.optimizer == "adam":
optimizer = torch.optim.Adam(parameters, m_args.lr)
else:
optimizer = torch.optim.SGD(parameters, m_args.lr)
# Load model
if m_args.device == "cpu":
utils.load_checkpoint(
torch.load(os.path.join("output/", m_args.experiment_name,
model_name + ".pth.tar"),
map_location=torch.device("cpu")), model, optimizer)
else:
utils.load_checkpoint(
torch.load(os.path.join("output/", m_args.experiment_name,
model_name + ".pth.tar")),
model, optimizer)
# Load data
test_dataset = NucleiCellDataset(m_args.test_data,
phase="test",
transform=m_args.transform,
image_size=m_args.image_size)
validation_dataset = NucleiCellDataset(m_args.train_data,
phase="validation",
transform=m_args.transform,
image_size=m_args.image_size)
validation_dataloader = DataLoader(validation_dataset,
batch_size=m_args.batch_size,
shuffle=False,
num_workers=m_args.num_workers,
pin_memory=True)
test_dataloader = DataLoader(test_dataset,
batch_size=m_args.batch_size,
shuffle=False,
num_workers=m_args.num_workers,
pin_memory=True)
print("Total number of test examples", str(len(test_dataset)))
print("Total number of validation examples", str(len(validation_dataset)))
# Calculate dice and ious
print("---- Validation metrics ----")
dice_val = calculate_metrics(m_args, device, model, validation_dataloader)
print("---- Test metrics ----")
dice_test = calculate_metrics(m_args, device, model, test_dataloader)
print("Total number of parameters")
params = sum(dict((p.data_ptr(), p.numel())
for p in model.parameters()).values())
print(params)
with open(os.path.join("output/results.csv"), "a") as file:
file.write("{},{},{},{},{},{},{},{},{}\n"
.format(model_name,
str(m_args.target_type),
str(m_args.num_kernel),
str(m_args.image_size),
str(m_args.batch_size),
str(m_args.lr),
str(dice_val),
str(dice_test),
str(params)))
def calculate_metrics(f_args, device, model, loader):
intersections, totals = 0, 0
model.eval()
with torch.no_grad():
for i_val, (x_val, y_nuclei_val, y_cell_val) in enumerate(loader):
if f_args.target_type == "nuclei":
y_train = y_nuclei_val
else:
y_train = y_cell_val
# Send data and label to device
x = x_val.to(device)
# Input should be between 0 and 1
x = torch.div(x, 255)
y = y_train.to(device)
# Predict segmentation
pred = model(x).squeeze(1)
# Get the class with the highest probability
_, pred = torch.max(pred, dim=1)
inputs = pred.view(-1)
targets = y.view(-1)
intersection = (inputs * targets).sum()
total = inputs.sum() + targets.sum()
# intersection is equivalent to True Positive count
intersections += intersection
# union is the mutually inclusive area of all labels & predictions
totals += total
dice = (2. * intersections) / totals
print("dice: ", dice.item())
return dice.item()
if __name__ == "__main__":
args = arguments.get_arguments()
main(args)
| python |
"""Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for C++ module twiss.
"""
import os
import IBSLib as ibslib
import numpy as np
import pandas as pd
import pytest
constants = [
(ibslib.clight, 299792458.0),
(ibslib.hbarGeV, 6.582119569e-25),
(ibslib.electron_mass, 0.51099895000e-3),
(ibslib.proton_mass, 0.93827208816),
(ibslib.neutron_mass, 0.93956542052),
(ibslib.mu_mass, 0.1056583755),
(ibslib.atomic_mass_unit, 0.93149410242),
(ibslib.pi, 3.141592653589793),
(ibslib.electric_charge, 1.602176634e-19),
(ibslib.euler, 0.577215664901533),
(ibslib.electron_radius, 2.8179403262e-15),
(ibslib.proton_radius, 1.5346982671888944e-18),
]
@pytest.mark.parametrize("name, value", constants)
def test_constants(name, value):
assert name == value
def test_cpp_sigefromsigs():
assert (ibslib.sige_from_sigs(ibslib.pi * 2 * 1.25e6, 0.005, 5e-4, 3326.0, 37.0)) < 1e-2
def test_cpp_sigsfromsige():
val = ibslib.sigs_from_sige(8.96628617341675e-05, 3326.0, 37.0, 5e-4 * ibslib.pi * 2 * 1.25e6)
assert (val < 0.0051) & (val > 0.004999)
def test_cpp_eta():
assert ibslib.eta(3600.0, 37.0) - 0.00073046018996082 < 1e-9
def test_cpp_fmohl():
a = 5.709563671168914e-04
b = 2.329156389696222e-01
q = 2.272866910079534e00
npp = 1000
actual = ibslib.fmohl(a, b, q, npp)
expected = 6824.655537384558
assert expected - actual < 1e-9
def test_cpp_particle_radius():
charge = -1
aatom = 1
actual = ibslib.particle_radius(charge, aatom)
expected = 1.5346982671888944e-18
assert actual == expected
def test_cpp_BetaRelativisticFromGamma():
gamma = 1
expected = 0
actual = ibslib.beta_relativistic_from_gamma(gamma)
assert expected == actual
def test_cpp_rds():
x, y, z = 1, 2, 3
actual = ibslib.rds(x, y, z)
expected = 0.29046028102188937
assert actual == expected
hvphi = [
([1.0], [1.0], 90, -1.0),
([1.0, 17.6, 20.0], [400.0, 1200.0, 1400.0], 90, 30.742135),
]
@pytest.mark.parametrize("voltages, harmonics, phi, expected", hvphi)
def test_cpp_rfvoltages(voltages, harmonics, phi, expected):
actual = ibslib.rf_voltage_in_ev(phi, -1.0, harmonics, voltages)
assert actual - expected < 1.0e-6
print(actual)
hvphip = [
([1.0], [1.0], 90, -6.123233995e-17),
([1.0, 17.6, 20.0], [400.0, 1200.0, 1400.0], 90, 30.742135),
]
@pytest.mark.parametrize("voltages, harmonics, phi, expected", hvphip)
def test_cpp_rfvoltages_prime(voltages, harmonics, phi, expected):
actual = ibslib.rf_voltage_in_ev_prime(phi, -1.0, harmonics, voltages)
assert actual - expected < 1.0e-6
print(actual)
def test_cpp_rf_voltage_in_ev_with_rad_losses():
actual = ibslib.rf_voltage_in_ev_with_rad_losses(180.0, 179e3, -1.0, [1.0], [1.0])
print(actual)
expected = 1.0000000000219211
assert actual - expected < 1.0e-9
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
my_twiss_file = os.path.join(THIS_DIR, "b2_design_lattice_1996.twiss")
def test_cpp_updateTwiss():
twiss = ibslib.GetTwissTable(my_twiss_file)
tw = ibslib.updateTwiss(twiss)
print(twiss.keys())
print(tw.keys())
assert sorted(list(twiss.keys())) == sorted(
["ALFX", "ALFY", "ANGLE", "BETX", "BETY", "DPX", "DPY", "DX", "DY", "K1L", "K1SL", "L"]
)
assert sorted(list(tw.keys())) == sorted(
[
"ALFX",
"ALFY",
"ANGLE",
"BETX",
"BETY",
"DPX",
"DPY",
"DX",
"DY",
"I1",
"I2",
"I3",
"I4x",
"I4y",
"I5x",
"I5y",
"K1L",
"K1SL",
"L",
"gammax",
"gammay",
"hx",
"hy",
"k",
"rho",
]
)
| python |
import attr
from typing import Any, List, Optional
from tokopedia import TokopediaResponse
@attr.dataclass(slots=True)
class ActiveProductsShop:
id: int
name: str
uri: str
location: str
@attr.dataclass(slots=True)
class ActiveProductShop:
id: int
name: str
url: str
is_gold: bool
location: str
city: str
reputation: str
clover: str
@attr.dataclass(slots=True)
class ActiveProductBadge:
title: str
image_url: str
@attr.dataclass(slots=True)
class ActiveProduct:
id: int
name: str
childs: Optional[Any]
url: str
image_url: str
image_url_700: str
price: str
shop: ActiveProductShop
wholesale_price: List
courier_count: int
condition: int
category_id: int
category_name: str
category_breadcrumb: str
department_id: int
labels: List
badges: List[ActiveProductBadge]
is_featured: int
rating: int
count_review: int
original_price: str
discount_expired: str
discount_percentage: int
sku: str
stock: int
@attr.dataclass(slots=True)
class ActiveProducts:
total_data: int
shop: ActiveProductsShop
products: List[ActiveProduct]
@attr.dataclass(slots=True)
class ResponseActiveProducts(TokopediaResponse):
data: Optional[ActiveProducts] = None
| python |
"""Tests for the models of the ``media_library`` app."""
from django.test import TestCase
from user_media.models import UserMediaImage
from user_media.tests.factories import UserMediaImageFactory
from . import factories
class MediaLibraryTestCase(TestCase):
"""Tests for the ``MediaLibrary`` model class."""
longMessage = True
def setUp(self):
self.library = factories.MediaLibraryFactory()
def test_instantiation(self):
self.assertTrue(self.library.pk)
def test_media_images(self):
image = factories.MediaItemFactory(
image=UserMediaImageFactory(),
library=self.library,
video=None,
)
factories.MediaItemFactory(library=self.library)
self.assertEqual(
self.library.media_images().count(), 1, msg=(
'``media_images`` should return only one item.'
)
)
self.assertEqual(
self.library.media_images()[0], image, msg=(
'``media_images`` should return the created image.'
)
)
def test_media_videos(self):
factories.MediaItemFactory(
image=UserMediaImageFactory(),
library=self.library,
video=None,
)
video = factories.MediaItemFactory(library=self.library)
self.assertEqual(
self.library.media_videos().count(), 1, msg=(
'``media_videos`` should return only one item.'
)
)
self.assertEqual(
self.library.media_videos()[0], video, msg=(
'``media_videos`` should return the created video.'
)
)
class MediaItemTestCase(TestCase):
"""Tests for the ``MediaItem`` model class."""
longMessage = True
def assertNotRaises(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as ex:
self.fail(msg=(
'"{0}" should not have raised an exception, but raised'
' "{1}"'.format(repr(func), str(ex))
))
def setUp(self):
self.library = factories.MediaLibraryFactory()
self.mediaitem = factories.MediaItemFactory(
showreel=self.library,
video='https://youtube.com/watch?v=123456',
)
self.umedia_image = UserMediaImageFactory()
self.mediaitemimage = factories.MediaItemFactory(
video=None, image=self.umedia_image,
)
def test_delete(self):
self.mediaitemimage.delete()
self.assertEqual(UserMediaImage.objects.count(), 0, msg=(
'The user media images should have been deleted as well.'
))
def test_instantiation(self):
self.assertTrue(self.mediaitem.pk)
def test_video_id(self):
self.assertEqual(self.mediaitem.video_id, '123456', msg=(
'The property should have returned the correct video id.'
))
def test_clean(self):
linklist = [
'http://www.youtube.com/watch?v=-JyZLS2IhkQ',
'https://www.youtube.com/watch?v=-JyZLS2IhkQ',
'http://www.youtube.de/watch?v=-JyZLS2IhkQ',
'https://youtube.com/watch?v=-JyZLS2IhkQ',
('https://www.youtube.com/watch?v=PguLNvCcOHQ'
'&list=RDPguLNvCcOHQ#t=0'),
'http://youtu.be/PguLNvCcOHQ?list=RDPguLNvCcOHQ ',
'http://vimeo.com/channels/staffpicks/110140870',
'http://vimeo.com/59777392',
'http://vimeo.com/video/59777392',
('http://vimeo.com/groups/thedirectorofphotography/'
'videos/110016243'),
]
for link in linklist:
self.mediaitem.video = link
self.assertNotRaises(self.mediaitem.clean)
| python |
from slack import WebClient
class SlackApiWrapper(WebClient):
def __init__(self, api_token):
super().__init__(api_token)
def post_message(self, channel, message):
response = self.chat_postMessage(
channel=channel,
text=message)
assert response["ok"]
def post_attachment_message(self, channel, blocks, attachments):
response = self.api_call(
'chat.postMessage',
json=dict(
channel=channel,
blocks=blocks,
attachments=attachments
)
)
assert response["ok"]
def update_message(self, channel, ts, blocks, attachments):
response = self.api_call(
'chat.update',
json=dict(
channel=channel,
ts=ts,
blocks=blocks,
attachments=attachments
)
)
assert response["ok"]
| python |
from sys import stdin, stdout
num_cases = int(stdin.readline())
stdin.readline()
for case in range(num_cases):
n = int(stdin.readline().strip()) # num_candidates
candidates = []
for i in range(n):
candidates.append(stdin.readline().strip())
votes = []
line = stdin.readline().strip()
while line != "":
votes.append(list(map(lambda x: int(x) - 1, line.split())))
line = stdin.readline().strip()
# This set keeps track of candidates that are eliminated in vote counting cycle
eliminated = set([])
v = len(votes)
pointers = [0] * v
is_decided = False
if case > 0:
stdout.write("\n")
while not is_decided:
# Re-count the votes
total_votes = [0]*n
for i in range(v):
# advance pointers[i] till it points to a candidate still in the race
p = pointers[i]
while votes[i][p] in eliminated:
p += 1
pointers[i] = p
# Pointers[i] point to a valid vote
total_votes[votes[i][p]] += 1
# Find max vote (no need to check if not eliminated)
max_vote = max(total_votes)
# Check if it is more than 50 percents
if max_vote*2 >= v :
is_decided = True
for iv in range(len(total_votes)):
if total_votes[iv] == max_vote:
stdout.write(candidates[iv] + "\n")
else:
min_vote = -1
for vi, vv in enumerate(total_votes):
if not vi in eliminated:
if min_vote == -1:
min_vote = vv
else:
min_vote = min(vv, min_vote)
# make sure min is not already eliminated
if min_vote == max_vote:
# No one left to eliminate
is_decided = True
for i, guy in enumerate(candidates):
if not i in eliminated:
stdout.write(guy + "\n")
else:
# find everyone with min_vote and eliminate them
for k in range(n):
if total_votes[k] == min_vote:
eliminated.add(k)
| python |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='syncbn_cpu',
ext_modules=[
CppExtension('syncbn_cpu', [
'operator.cpp',
'syncbn_cpu.cpp',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.