content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import csv
import requests
import io
import json
import uuid
from collections import OrderedDict, defaultdict, Counter
import urllib.parse
from functools import lru_cache
# for LRU cache
CACHE_MAX_SIZE = 65536
__all__ = ['RProperty', 'RQuery', 'PeriodoReconciler',
'CsvReconciler', 'non_none_values', 'grouper', 'CACHE_MAX_SIZE']
# a wrapper for
# https://github.com/periodo/periodo-reconciler/blob/master/API.md
# http://stackoverflow.com/questions/2348317/how-to-write-a-pager-for-python-iterators/2350904#2350904
def grouper(iterator, page_size):
"""
yield pages of results from input interable
Parameters
----------
iterator : Python interator
the iterator to be converted into pages
page_size : int
page size
Returns
-------
iterator
a iterator of pages
"""
page = []
for item in iterator:
page.append(item)
if len(page) == page_size:
yield page
page = []
if len(page) > 0:
yield page
def non_none_values(dict_):
return dict([
(k, v) for (k, v) in dict_.items() if v is not None
])
class RProperty(object):
def __init__(self, p, v):
self.p = p
self.v = v
def to_dict(self):
return {'p': self.p, 'v': self.v}
def __repr__(self):
return ("""RProperty({}, {})"""
.format(json.dumps(self.p), json.dumps(self.v)))
class RQuery(object):
def __init__(self, query, label=None, limit=None, properties=None):
self.query = query
if label is None:
self.label = str(uuid.uuid4())
else:
self.label = label
self.limit = limit
self.properties = properties
def to_key_value(self):
v = {'query': self.query}
if self.limit is not None:
v['limit'] = self.limit
if (self.properties is not None and len(self.properties)):
v['properties'] = [p.to_dict() for p in self.properties]
return (self.label, v)
def __repr__(self):
if (self.properties is not None) and (len(self.properties)):
properties_repr = (""", properties=[{}]"""
.format(",\n".join([repr(p)
for p in self.properties])))
else:
properties_repr = ""
if self.limit is not None:
limit_repr = ", limit={}".format(json.dumps(self.limit))
else:
limit_repr = ""
return ("""RQuery({}, label={}{}{})"""
.format(json.dumps(self.query),
json.dumps(
self.label),
limit_repr,
properties_repr))
class PeriodoReconciler(object):
def __init__(self, host='localhost:8142', protocol='http'):
self.host = host
self.protocol = protocol
self.base_url = '{}://{}/'.format(protocol, host)
def __repr__(self):
return ("""PeriodoReconciler(host={}, protocol={})"""
.format(json.dumps(self.host),
json.dumps(self.protocol)))
def describe(self):
r = requests.get(self.base_url)
return r.json()
@lru_cache(maxsize=CACHE_MAX_SIZE)
def _call_reconciler(self, query_dict_json, method='GET'):
if method.upper() == 'GET':
r = requests.get(self.base_url, params={
'queries': query_dict_json})
elif method.upper() == 'POST':
r = requests.post(self.base_url, data={
'queries': query_dict_json})
if r.status_code == 200:
return r.json()
else:
r.raise_for_status()
def _reconcile_query_by_query(self, queries, method='GET'):
queries_dict = OrderedDict([q.to_key_value() for q in queries])
results_dict = dict()
for (k, v) in queries_dict.items():
# don't let the label for the query mess up the caching
query_dict = {'_': v}
query_dict_json = json.dumps(query_dict, sort_keys=True)
result = self._call_reconciler(query_dict_json, method)
results_dict[k] = result['_']
return results_dict
def reconcile(self, queries, method='GET', query_by_query=False):
if query_by_query:
return self._reconcile_query_by_query(queries, method)
queries_dict = OrderedDict([q.to_key_value() for q in queries])
if method.upper() == 'GET':
r = requests.get(self.base_url, params={
'queries': json.dumps(queries_dict)})
elif method.upper() == 'POST':
r = requests.post(self.base_url, data={
'queries': json.dumps(queries_dict)})
if r.status_code == 200:
return r.json()
else:
r.raise_for_status()
def suggest_properties(self):
r = requests.get(urllib.parse.urljoin(
self.base_url, '/suggest/properties'))
if r.status_code == 200:
return r.json()['result']
def suggest_entities(self, prefix):
r = requests.get(urllib.parse.urljoin(
self.base_url, '/suggest/entities'), params={
'prefix': prefix
})
if r.status_code == 200:
return r.json()['result']
def preview_period(self, period_id, flyout=False):
params = {'id': period_id}
if flyout:
params['flyout'] = True
url = urllib.parse.urljoin(self.base_url, '/preview')
r = requests.get(urllib.parse.urljoin(
self.base_url, '/preview'), params=params)
if r.status_code == 200:
return r.content
else:
r.raise_for_status()
class CsvReconciler(object):
match_column_fields = (
'match_num', 'match_name', 'match_id',
'candidates_count',
'match_fallback_id', 'match_fallback_name')
def __init__(self, csvfile, p_recon, query,
location=None, start=None, stop=None,
ignored_queries='',
transpose_query=False,
page_size=1000,
query_by_query=True,
match_column_prefix="",
match_top_candidate=True):
"""
"""
self.csvfile = csvfile
self.p_recon = p_recon
self.query = query
self.location = location
self.start = start
self.stop = stop
self.ignored_queries = ignored_queries
self.transpose_query = transpose_query
self.page_size = page_size
self.query_by_query = query_by_query
self.match_column_prefix = match_column_prefix
self.match_top_candidate = match_top_candidate
# if the query matches any entry in ignored_queries,
# throw out the match
# using csv.reader to parse ignored_queries because the parameter is
# a comma=delimited list
c_reader = csv.reader(io.StringIO(self.ignored_queries))
try:
self.ignored_queries_set = set(next(c_reader))
except StopIteration as e:
self.ignored_queries_set = set()
self.reader = csv.DictReader(csvfile)
# check that query, location, start, stop are in fieldnames
# TO DO: I may want to move away from using assert
for f in [query, location, start, stop]:
if f is not None:
assert f in self.reader.fieldnames
# which properties are included?
self.included_properties = non_none_values({
'location': location,
'start': start,
'stop': stop
})
# compute the columns names for the match results, which
# have an optional prefix (match_column_prefix)
self.match_column_names = OrderedDict(
[(name, f"{self.match_column_prefix}{name}")
for name in CsvReconciler.match_column_fields])
# initialize a summary count of the matches
self.match_summary = Counter()
def _transpose_query(self, q):
"""
transpose only if there is a single ","
"""
if not self.transpose_query:
return q
terms = [term.strip() for term in q.split(",")]
if (len(terms) == 2):
return terms[1] + " " + terms[0]
else:
return q
def results_with_rows(self):
# bin the input rows into pages and then feed the pages
# to the reconciler
# from the reconciler, yield each result
for (i, page) in enumerate(grouper(self.reader, self.page_size)):
queries = []
# TO DO: I might be unnecessarily reproducing the page in memory
page_dict = OrderedDict()
for (j, row) in enumerate(page):
label = str(j)
page_dict[label] = row
queries.append(RQuery(
self._transpose_query(row[self.query]),
label=label,
properties=[
RProperty(p, row[v]) for (p, v)
in self.included_properties.items()
]
))
responses = self.p_recon.reconcile(
queries,
method='post',
query_by_query=self.query_by_query)
for (label, row) in page_dict.items():
# print ('\r results_with_rows', i, label, end="")
yield(row, responses[label])
def _matches(self, results_with_rows=None):
"""
this method process the results to return only matches
"""
# assume that the new match_* names are not already field names
assert len(set(self.reader.fieldnames) &
set(self.match_column_names.values())) == 0
# return matches from the entire CSV if
# we're not processing the inputted subset of results
if results_with_rows is None:
results_with_rows = self.results_with_rows()
# compute a counter on the matches in the loop
# mapping query to match_id, match_name
self.matches_for_query = defaultdict(Counter)
for (row, response) in results_with_rows:
results = response['result']
matching_results = [
result for result in results if result['match']]
match_num = len(matching_results)
# I think that number of matches must be 0 or 1
# otherwise: a bug in the reconciler
assert match_num < 2
if (match_num == 1) or (self.match_top_candidate and len(results)):
match_name = results[0]['name']
match_id = results[0]['id']
# keep track of how many times a given query
# maps to a (match_id, match_name) tuple
(self.matches_for_query[row[self.query]]
.update([(match_id, match_name)]))
else:
match_name = ''
match_id = ''
row[self.match_column_names['candidates_count']] = len(results)
row[self.match_column_names["match_num"]] = match_num
row[self.match_column_names["match_name"]] = match_name
row[self.match_column_names["match_id"]] = match_id
row[self.match_column_names["match_fallback_id"]] = ''
row[self.match_column_names["match_fallback_name"]] = ''
# eliminate results in which the query is in ignored_queries
if row[self.query] in self.ignored_queries_set:
row[self.match_column_names["match_num"]] = 0
row[self.match_column_names["match_name"]] = ''
row[self.match_column_names["match_id"]] = ''
yield (row)
def matches(self, results_with_rows=None):
"""
_matches is the first pass
"""
rows = list(self._matches(results_with_rows))
self.match_summary = Counter()
# let's now calculate fallback for rows
# without matches
for row in rows:
if not row[self.match_column_names["match_id"]]:
# set as fallback as the most common match
# for the same query term
query = row[self.query]
c = self.matches_for_query[query].most_common(1)
if len(c):
((match_id, match_name), count) = c[0]
row[(self
.match_column_names["match_fallback_id"])] = match_id
row[(self
.match_column_names
["match_fallback_name"])] = match_name
self.match_summary.update([(
row[self.query],
row[self.location] if self.location is not None else '',
row[self.start] if self.start is not None else '',
row[self.stop] if self.stop is not None else '',
row[self.match_column_names["match_num"]],
row[self.match_column_names["match_name"]],
row[self.match_column_names["match_id"]],
row[self.match_column_names["candidates_count"]],
row[self.match_column_names["match_fallback_id"]],
row[self.match_column_names["match_fallback_name"]]
)])
yield row
def to_csv(self, csvfile, rows, fieldnames=None):
if fieldnames is None:
fieldnames = (
self.reader.fieldnames +
list(self.match_column_names.values())
)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in rows:
writer.writerow(row)
def match_summary_to_csv(self, output):
"""
return self.self.match_summary as CSV
"""
headers = (['query', 'location', 'start', 'stop'] +
list(CsvReconciler.match_column_fields) + ['row_count'])
writer = csv.DictWriter(output, fieldnames=headers)
writer.writeheader()
for (v, c) in self.match_summary.most_common():
row = OrderedDict(zip(headers, list(v) + [c]))
writer.writerow(row)
| python |
import os
from .. import FileBuilder
from .file_builder_test import FileBuilderTest
class BuildDirsTest(FileBuilderTest):
"""Tests correct determination of whether build directories are present.
Tests correct determination of whether the parent directories of
output files are present.
"""
def _build_dirs_build_file1(self, builder, filename):
"""The build file function for the first build function."""
self._write(filename, 'text')
def _build_dirs_build1(self, builder):
"""The first build function."""
builder.build_file(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt'),
'build_file1', self._build_dirs_build_file1)
builder.build_file(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output.txt'),
'build_file1', self._build_dirs_build_file1)
def _build_dirs_build_file2(self, builder, filename):
"""The first build file function for the second build function."""
self.assertTrue(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
raise RuntimeError()
def _build_dirs_build_file3(self, builder, filename):
"""The second build file function for the second build function."""
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir2')))
self._write(filename, 'text')
def _build_dirs_build_file4(self, builder, filename):
"""The third build file function for the second build function."""
self._write(filename, 'text')
def _build_dirs_build_file5(self, builder, filename):
"""The fourth build file function for the second build function."""
raise RuntimeError()
def _build_dirs_build2(self, builder):
"""The second build function."""
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
with self.assertRaises(RuntimeError):
builder.build_file(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt'),
'build_file2', self._build_dirs_build_file2)
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
self.assertFalse(
builder.exists(os.path.join(self._temp_dir, 'Dir1', 'Subdir')))
self.assertFalse(
builder.exists(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt')))
with self.assertRaises(RuntimeError):
builder.build_file(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output2.txt'),
'build_file2', self._build_dirs_build_file2)
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir1')))
self.assertFalse(
builder.exists(os.path.join(self._temp_dir, 'Dir1', 'Subdir')))
self.assertFalse(
builder.exists(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output.txt')))
builder.build_file(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output.txt'),
'build_file4', self._build_dirs_build_file4)
with self.assertRaises(RuntimeError):
builder.build_file(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'build_file5', self._build_dirs_build_file5)
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir3')))
self.assertTrue(
builder.is_dir(os.path.join(self._temp_dir, 'Dir3', 'Subdir')))
self.assertFalse(builder.exists(os.path.join(self._temp_dir, 'Dir2')))
builder.build_file(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output.txt'),
'build_file3', self._build_dirs_build_file3)
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir2')))
self.assertTrue(builder.is_dir(os.path.join(self._temp_dir, 'Dir3')))
self.assertTrue(
builder.is_dir(os.path.join(self._temp_dir, 'Dir3', 'Subdir')))
def _build_dirs_build3(self, builder):
"""The third build function."""
self.assertFalse(
builder.exists(
os.path.join(self._temp_dir, 'Dir1', 'Subdir', 'Output2.txt')))
self.assertTrue(builder.exists(os.path.join(self._temp_dir, 'Dir2')))
self.assertTrue(
builder.exists(os.path.join(self._temp_dir, 'Dir2', 'Subdir')))
self.assertTrue(builder.exists(os.path.join(self._temp_dir, 'Dir3')))
self.assertTrue(
builder.exists(os.path.join(self._temp_dir, 'Dir3', 'Subdir')))
builder.declare_read(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'))
self._check_contents(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'text')
def test_build_dirs(self):
"""Test correct determination of whether build directories are present.
"""
FileBuilder.build(
self._cache_filename, 'build_dirs_test', self._build_dirs_build1)
FileBuilder.build(
self._cache_filename, 'build_dirs_test', self._build_dirs_build2)
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output.txt'),
'text')
self.assertFalse(os.path.exists(os.path.join(self._temp_dir, 'Dir1')))
self._write(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output2.txt'),
'text')
self._write(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'text')
FileBuilder.build(
self._cache_filename, 'build_dirs_test', self._build_dirs_build3)
self.assertFalse(os.path.exists(os.path.join(self._temp_dir, 'Dir1')))
self._check_contents(
os.path.join(self._temp_dir, 'Dir2', 'Subdir', 'Output2.txt'),
'text')
self._check_contents(
os.path.join(self._temp_dir, 'Dir3', 'Subdir', 'Output2.txt'),
'text')
| python |
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from hashlib import sha512
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
import cbor
from sawtooth_sdk.protobuf.transaction_pb2 import Transaction
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_sdk.protobuf.batch_pb2 import BatchList
import urllib.request
from urllib.error import HTTPError
import hashlib
def _sha512(data):
return hashlib.sha512(data).hexdigest()
def _get_prefix():
return _sha512("soce".encode('utf-8'))[0:6]
def _get_address(name):
soce_prefix = _get_prefix()
name_address = _sha512(name.encode('utf-8'))[0:64]
return soce_prefix + name_address
context = create_context('secp256k1')
private_key = context.new_random_private_key()
signer = CryptoFactory(context).new_signer(private_key)
action = 'create-voting'
name_id = 'voting1'
configurations_preferences_id = ['a', 'b']
sc_method = 'borda-voting'
payload = {
'action': action,
'name_id': name_id,
'configurations_preferences_id': configurations_preferences_id,
'sc_method': sc_method
}
address = _get_address(str(name_id))
address2 = _get_address(str(configurations_preferences_id))
#payload_bytes = cbor.dumps(payload)
payload_bytes = ";".join([str(action), str(name_id),
str(configurations_preferences_id),
str(None)]).encode()
txn_header_bytes = TransactionHeader(
family_name='soce',
family_version='1.0',
inputs=[address, address2],
outputs=[address, address2],
signer_public_key = signer.get_public_key().as_hex(),
# In this example, we're signing the batch with the same private key,
# but the batch can be signed by another party, in which case, the
# public key will need to be associated with that key.
batcher_public_key = signer.get_public_key().as_hex(),
# In this example, there are no dependencies. This list should include
# an previous transaction header signatures that must be applied for
# this transaction to successfully commit.
# For example,
# dependencies=['540a6803971d1880ec73a96cb97815a95d374cbad5d865925e5aa0432fcf1931539afe10310c122c5eaae15df61236079abbf4f258889359c4d175516934484a'],
dependencies=[],
payload_sha512=sha512(payload_bytes).hexdigest()
).SerializeToString()
signature = signer.sign(txn_header_bytes)
txn = Transaction(
header=txn_header_bytes,
header_signature=signature,
payload=payload_bytes
)
txns = [txn]
batch_header_bytes = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=[txn.header_signature for txn in txns],
).SerializeToString()
signature = signer.sign(batch_header_bytes)
batch = Batch(
header=batch_header_bytes,
header_signature=signature,
transactions=txns
)
batch_list_bytes = BatchList(batches=[batch]).SerializeToString()
try:
request = urllib.request.Request(
'http://localhost:8008/batches',
batch_list_bytes,
method='POST',
headers={'Content-Type': 'application/octet-stream'})
response = urllib.request.urlopen(request)
except HTTPError as e:
response = e.file | python |
"""
Written by Muhammad on 09/02/2018
"""
import datetime as dt
import logging
import numpy as np
import pandas as pd
import ast
def csv_to_dict(fname, stime=None, etime=None, sep="|", orient="list"):
"""Reads data from a csv file and returns a dictionary.
Parameter
---------
fname : str
Full path of a csv file.
stime : Optional[datetime.datetime]
The start time of interest
etime : Optional[datetime.datetime]
The end time of interest.
If set to None, reads data to the end of a day
sep : str
Delimiter to use
Returns
-------
data_dict : dict
A dictionary object that holds the data
"""
# Load to a pandas dataframe
print("Loading csv file to pandas dataframe")
date_parser = lambda x: dt.datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
df = pd.read_csv(fname, sep=sep, na_values="None",
parse_dates=['time'],
date_parser=date_parser)
if stime is not None:
df = df.loc[df.time >= stime, :]
if etime is not None:
df = df.loc[df.time <= etime, :]
# Convert to a dict
print("Converting pandas dataframe to dict")
# NOTE We'll use list orientation even though
# we need records orientation because some of
# the columns from the DF are lists which
# get interpreted as strings by pandas
# and it becomes messy, this is a simple
# method Muhammad deviced and I'm building on it.
data_dict = df.to_dict(orient="list")
print df["ptab"].dtypes
# Convert a string representation of list to a list
prm_keys = ["ptab", "ltab"]
fit_keys = ["elv", "gflg", "nlag", "p_l", "p_l_e", "p_s",
"p_s_e", "phi0", "phi0_e", "pwr0", "qflg", "slist", "v",
"v_e", "w_l", "w_l_e", "w_s", "w_s_e"]
keys_list = prm_keys + fit_keys
print("Converting string representation of lists to normal lists")
for ky in keys_list:
data_dict[ky] = [ast.literal_eval(x) for x in data_dict[ky]]
#for x in data_dict[ky]:
# try:
# ast.literal_eval(x)
# except:
# import pdb
# pdb.set_trace()
# # if we need a list of dicts conver the dict of lists to the format
# if orient == "records":
# listDict = [dict(zip(data_dict,t)) for t in zip(*data_dict.values())]
# return listDict
return data_dict
# run the code
def main(orient="list"):
# Set the logging level
logging.getLogger().setLevel(logging.WARNING)
# input parameters
stime = None
etime = None
#stime = dt.datetime(2012,12,31)
#etime = dt.datetime(2012,12,31, 1, 0)
csv_sep = "|" # Delimiter to use
# Convert dmap format to csv
fdir = "./data/tmp/"
#fname = fdir + "20121231.000000.20130101.000000.fhe.fitacf.csv"
fname = fdir + "20130110.180000.20130111.180000.bks.fitacf.csv"
#data_dict = csv_to_dict(fname, stime=stime, etime=etime, sep=csv_sep)
data_dict = csv_to_dict(fname, stime=stime, etime=etime, sep=csv_sep, orient=orient)
return data_dict
if __name__ == "__main__":
data_dict = main()
| python |
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views import generic
from . import forms, models
class JoinUs(generic.CreateView):
form_class = forms.RegistrationForm
success_url = reverse_lazy('login')
template_name = 'membership/join-us.html'
class MemberDetail(PermissionRequiredMixin, generic.DetailView):
permission_required = ['assignments.view_member']
model = models.Member
slug_field = 'permalink'
class MemberList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Member
class ParentList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Parent
class ScoutList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Scout
class ContributorList(PermissionRequiredMixin, generic.ListView):
permission_required = ['assignments.view_member']
model = models.Contributor
| python |
# vim: ts=4:sw=4:et:cc=120
from typing import Optional, Union
from ace.analysis import RootAnalysis
from ace.system.base import AlertingBaseInterface
class RemoteAlertTrackingInterface(AlertingBaseInterface):
async def register_alert_system(self, name: str) -> bool:
return await self.get_api().register_alert_system(name)
async def unregister_alert_system(self, name: str) -> bool:
return await self.get_api().unregister_alert_system(name)
async def get_alerts(self, name: str, timeout: Optional[int] = None) -> list[str]:
return await self.get_api().get_alerts(name, timeout=timeout)
async def submit_alert(self, root: Union[RootAnalysis, str]) -> bool:
raise NotImplementedError()
async def get_alert_count(self, name: str) -> int:
raise NotImplementedError()
| python |
from jiminy.gym.envs.box2d.lunar_lander import LunarLander
from jiminy.gym.envs.box2d.lunar_lander import LunarLanderContinuous
from jiminy.gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore
from jiminy.gym.envs.box2d.car_racing import CarRacing
| python |
import datetime
class Commit:
def __init__(self, hash: str, message: str, date_time: datetime.datetime,
author: str, email: str, repository: 'Repository'):
self._hash = hash
self.message = message
self.datetime = date_time
self.author = author
self.email = email
self._repository = repository
@property
def hash(self):
return self._hash
@hash.setter
def hash(self, value):
raise Exception(
'It is not possible to set a new hash value, instance a new commit instead'
)
@property
def children(self):
return self._repository.get_commit_children(self.hash)
@property
def parents(self):
return self._repository.get_commit_parents(self.hash)
def __repr__(self):
return self.__str__()
def __str__(self):
return self._hash
def __hash__(self) -> int:
return self._hash.__hash__()
def __eq__(self, other: 'Commit') -> bool:
return self.hash == other.hash
| python |
import os
import argparse
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from apex.parallel import DistributedDataParallel as DDP
from utils import AverageMeter, accuracy
from datasets import ImageList, pil_loader, cv2_loader
from datasets import get_val_transform, HybridValPipe
from networks import MobileNetV3_Large, MobileNetV3_Small
parser = argparse.ArgumentParser(
description="Basic Pytorch ImageNet Example. Testing.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# various paths
parser.add_argument('--val_root', type=str, required=True, help='root path to validating images')
parser.add_argument('--val_list', type=str, required=True, help='validating image list')
parser.add_argument('--weights', type=str, required=True, help='checkpoint for testing')
# testing hyper-parameters
parser.add_argument('--workers', type=int, default=8, help='number of workers to load dataset (global)')
parser.add_argument('--batch_size', type=int, default=512, help='batch size (global)')
parser.add_argument('--model', type=str, default='MobileNetV3_Large', help='type of model',
choices=['MobileNetV3_Large', 'MobileNetV3_Small'])
parser.add_argument('--num_classes', type=int, default=1000, help='class number of testing set')
parser.add_argument('--trans_mode', type=str, default='tv', help='mode of image transformation (tv/dali)')
parser.add_argument('--dali_cpu', action='store_true', default=False, help='runs CPU based DALI pipeline')
parser.add_argument('--ema', action='store_true', default=False, help='whether to use EMA')
# amp and DDP hyper-parameters
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--channels_last', type=str, default='False')
args, unparsed = parser.parse_known_args()
args.channels_last = eval(args.channels_last)
if hasattr(torch, 'channels_last') and hasattr(torch, 'contiguous_format'):
if args.channels_last:
memory_format = torch.channels_last
else:
memory_format = torch.contiguous_format
else:
memory_format = None
def main():
cudnn.enabled=True
cudnn.benchmark = True
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
# create model
if args.model == 'MobileNetV3_Large':
model = MobileNetV3_Large(args.num_classes, 0.0, False)
elif args.model == 'MobileNetV3_Small':
model = MobileNetV3_Small(args.num_classes, 0.0, False)
else:
raise Exception('invalid type of model')
model = model.cuda().to(memory_format=memory_format) if memory_format is not None else model.cuda()
# For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
# This must be done AFTER the call to amp.initialize.
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
else:
model = nn.DataParallel(model)
# define transform and initialize dataloader
batch_size = args.batch_size // args.world_size
workers = args.workers // args.world_size
if args.trans_mode == 'tv':
val_transform = get_val_transform()
val_dataset = ImageList(root=args.val_root,
list_path=args.val_list,
transform=val_transform)
val_sampler = None
if args.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, num_workers=workers,
pin_memory=True, sampler=val_sampler, shuffle=False)
elif args.trans_mode == 'dali':
pipe = HybridValPipe(batch_size=batch_size,
num_threads=workers,
device_id=args.local_rank,
root=args.val_root,
list_path=args.val_list,
size=256,
crop=224,
shard_id=args.local_rank,
num_shards=args.world_size,
dali_cpu=args.dali_cpu)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader")/args.world_size))
else:
raise Exception('invalid image transformation mode')
# restart from weights
if args.weights and os.path.isfile(args.weights):
if args.local_rank == 0:
print('loading weights from {}'.format(args.weights))
checkpoint = torch.load(args.weights, map_location=lambda storage,loc: storage.cuda(args.gpu))
if args.ema:
model.load_state_dict(checkpoint['ema'])
else:
model.load_state_dict(checkpoint['model'])
val_acc_top1, val_acc_top5 = validate(val_loader, model)
if args.local_rank == 0:
print('Val_acc_top1: {:.2f}'.format(val_acc_top1))
print('Val_acc_top5: {:.2f}'.format(val_acc_top5))
def validate(val_loader, model):
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
for data in tqdm(val_loader):
if args.trans_mode == 'tv':
x = data[0].cuda(non_blocking=True)
target = data[1].cuda(non_blocking=True)
elif args.trans_mode == 'dali':
x = data[0]['data'].cuda(non_blocking=True)
target = data[0]['label'].squeeze().cuda(non_blocking=True).long()
with torch.no_grad():
logits = model(x)
prec1, prec5 = accuracy(logits, target, topk=(1, 5))
if args.distributed:
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
top1.update(prec1.item(), x.size(0))
top5.update(prec5.item(), x.size(0))
return top1.avg, top5.avg
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
| python |
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as graph #matlab versiyasi pythonun
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd #csv faylini read etmek ucun
import csv
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
#import datamodify as dat
def datatobeTaken():
data = pd.read_csv("turboazmodified.csv")
dataframe = pd.DataFrame(data, columns= ['Yurush','Qiymet','Buraxilis ili'])
yurush = data['Yurush']
qiymet = data['Qiymet']
buraxilishili = data['Buraxilish ili']
yurush = (yurush - yurush.mean()) / yurush.std()
yurush = np.c_[np.ones(yurush.shape[0]),yurush]
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
yurush.astype(float)
m = len(qiymet)
return yurush, qiymet, buraxilishili;
data = pd.read_csv("turboazmodified.csv")
def firstplot():
yurush, qiymet, buraxilishili = datatobeTaken();
m = len(yurush)
for i in range(0, m):
if '+08' in yurush[i]:
yurush[i] = float(yurush[i].replace('+08',''))
if 'e' in yurush[i]:
yurush[i] = yurush[i].replace('e','')
yurush[i] = yurush[i] * 2.7
graph.xlabel('Yurush')
graph.scatter(yurush[:,1], qiymet, edgecolors='red')
graph.ylabel('Qiymet')
graph.title('Yurush vs Qiymet')
graph.show()
def secondplot():
yurush, qiymet, buraxilishili = datatobeTaken();
graph.scatter(buraxilishili, qiymet, edgecolor = 'b')
graph.xlabel('Buraxilis')
graph.ylabel('Qiymet')
graph.title('Buxaltir')
graph.show()
def thirdplot():
yurush, qiymet, buraxilishili = datatobeTaken();
fig = graph.figure()
ax1 = fig.add_subplot(111, projection='3d')
ax1.scatter(yurush[:,1], qiymet, buraxilishili)
graph.show()
def heuristicFunct(yurush, theta):
return np.dot(yurush, theta)
def costFunction(yurush, qiymet, theta):
m = 1328
sumofvariables = 0
for i in range(1, m):
sumofvariables +=(heuristicFunct(yurush[i], theta) - qiymet[i])**2
sumofvariables = sumofvariables * (1.0/(2*m))
return sumofvariables
def updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations):
theta[0] = theta[0] - learningrate * costFunction(yurush, qiymet, theta) * 2
theta[1] = theta[1] - learningrate * costFunction(yurush, qiymet, theta) * 2
return theta
def plottingCostFunction(sumofvariables):
graph.title("Cost Function is plotted")
graph.xlabel("Number of iterations")
graph.ylabel("Cost")
graph.plot(sumofvariables)
graph.show()
def test1(yurush, qiymet, buraxilishili):
#yurush, qiymet, buraxilishili = datatobeTaken();
yurush = 240000
buraxilishili = 2000
qiymet = 11500
yurush = (yurush - yurush.mean()) / yurush.std()
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
ntheta, costh = updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations)
predprice = ntheta[2] * buraxilishili + ntheta[1] * yurush + ntheta[0]
normqiymet = predprice * qiymet.std() + qiymet.mean()
actqiymet = qiymet * qiymet.std() + qiymet.mean()
print(normqiymet)
print(actqiymet)
def test2(yurush, qiymet, buraxilishili):
yurush = 415558
buraxilishili = 1996
qiymet = 8800
yurush = (yurush - yurush.mean()) / yurush.std()
#yurush = np.c_[np.ones(yurush.shape[0]),yurush]
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
#qiymet = np.c_[np.ones(qiymet.shape[0]),qiymet]
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
#buraxilishili = np.c_[np.ones(buraxilishili.shape[0]),buraxilishili]
ntheta, costh = updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations)
predprice = ntheta[2] * buraxilishili + ntheta[1] * yurush + ntheta[0]
normqiymet = predprice * qiymet.std() + qiymet.mean()
actqiymet = qiymet * qiymet.std() + qiymet.mean()
print(normqiymet)
print(actqiymet)
def linearRegrTrain():
linearreg = LinearRegression()
yurush, qiymet, buraxilishili = datatobeTaken();
yurushTrain, yurushTest, buraxilishiliTrain, buraxilishiliTest = train_test_split(yurush, buraxilishili, test_size = 1/3, random_state = 0)
linearreg.fit(yurushTrain, buraxilishiliTrain)
buraxilishiliPredict = linearreg.predict(yurushTest)
graph.scatter(yurushTrain, buraxilishiliTrain, color = 'black')
graph.plot(yurushTrain, linearreg.predict(yurushTrain), color = 'red')
graph.title("Hello")
graph.xlabel("Yurush")
graph.ylabel("Buraxilish ili")
graph.show()
def linearRegrTest():
linearreg = LinearRegression()
yurush, qiymet, buraxilishili = datatobeTaken();
yurushTrain, yurushTest, buraxilishiliTrain, buraxilishiliTest = train_test_split(yurush, buraxilishili, test_size = 1/3, random_state = 0)
linearreg.fit(yurushTest, buraxilishiliTest)
buraxilishiliPredict = linearreg.predict(yurushTrain)
graph.scatter(yurushTest, buraxilishiliTest, color = 'black')
graph.plot(yurushTest, linearreg.predict(yurushTest), color = 'red')
graph.title("Hello")
graph.xlabel("Yurush")
graph.ylabel("Buraxilish ili")
graph.show()
def normequation(yurush, qiymet):
yurush, qiymet, buraxilishili = datatobeTaken();
yurushTranspose = yurush.T
normeq = inv(yurushTranspose.dot(yurush)).dot(yurushTranspose).dot(qiymet)
print("The value we get from Normal Equation is %s" % (normeq))
return normeq
def PolynomialModel(degree, yurush, qiymet):
yurush, qiymet, buraxilishili = datatobeTaken();
poly = PolynomialFeatures(degree=degree)
polyyurush = poly.fit_transform(yurush)
regs = LinearRegression()
regs.fit(polyyurush, qiymet)
actval = (yurush - polyyurush.mean()) / yurush.std()
print(actval)
#print(yurush.sh)
graph.scatter(yurush[:,0], qiymet, color = "red")
graph.plot(yurush, regs.predict(poly.fit_transform(yurush)), color = 'blue')
graph.show()
def tobePrinted():
#theta = [1,1,1]
theta = [0,0]
numberofiterations = 5 #no. of interations to learn
learningrate = 0.01 #learning rate is 0.01
m = 1328
yurush, qiymet, buraxilishili = datatobeTaken();
for i in range(numberofiterations):
costfinished = costFunction(yurush, qiymet, theta) #getting cost from cost function
theta = (updateruletobeComputed(yurush, qiymet, theta, learningrate, numberofiterations))
print("Cost function in iteration %d is %s" % (i, costfinished))
print(theta[0],theta[1])
graph.scatter(buraxilishili, qiymet, edgecolors='b')
#graph.plot(buraxilishili, qiymet)
#graph.show(block = True)
#graph.close()
#PolynomialModel(2, yurush, qiymet)
#normequation(yurush, qiymet)
#test1(yurush, qiymet, buraxilishili)
#plottingCostFunction()
#firstplot()
#linearRegrTrain()
#linearRegrTest()
#secondplot()
#thirdplot()
test1(yurush, qiymet, buraxilishili)
tobePrinted()
| python |
#!/usr/bin/env python3
###################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: test.py
## Authors: Chris Lovett
##
## Requires: Python 3.x
##
###################################################################################################
import picluster
import sys
import time
# This test script shows how to interact with the Azure pi data center cloud service.
# It uses the 'requests' module to do HTTP interactions with Json data.
# See http://docs.python-requests.org/en/v1.0.0/user/quickstart/
import endpoint
ip = "192.168.1.999" # make it invalid ip address on purpose so it never colides with real machine
entity = {'IpAddress': ip, 'OsName': 'Raspbian', 'OsVersion': 'Jesse', 'CurrentTaskName': "RollingBuild", 'CurrentUserName': '','Command':''}
user = "Test"
def test_assert(e, message):
status = "SUCCESS"
if not e:
status = "FAILED"
print("{}, {}".format(message, status))
# add or update
t = picluster.PiBoardTable(endpoint.url, endpoint.apikey, user)
a = picluster.PiBoardEntity(entity)
r = t.update(a)
test_assert(r is None or r.ip_address != ip, "add or update entity")
# get all
r = t.get_all()
test_assert(len(r) > 0 and ip in [x.ip_address for x in r], "get_all")
# get the entity we added
r = t.get(ip)
test_assert(r and r.ip_address == ip, "get the entity we added")
# locking
r = t.lock(ip, 'Test')
test_assert(r and r.ip_address == ip and r.current_user_name == t.username, "lock our machine")
# now try and free the device using wrong user name
saved = t.username
t.username = 'Chuck'
failed = False
try:
r = t.unlock(ip)
failed = False
except:
failed = True
t.username = saved
test_assert(failed, "try and free the device using wrong user name")
# double check this is really the case
r = t.get(ip)
test_assert(r and r.ip_address == ip, "ensure entity is still there")
# now try and free the device using correct user name
r = t.unlock(ip)
test_assert(r and r.ip_address == ip, "unlock our machine")
# check it really is not locked
r = t.get(ip)
test_assert(r and r.current_user_name != t.username, "lock is gone")
# delete
r = t.delete(ip)
test_assert(r and r.current_user_name != t.username, "delete our machine")
| python |
# -*- coding: utf-8 -*-
#
# This class was auto-generated.
#
from onlinepayments.sdk.data_object import DataObject
from onlinepayments.sdk.domain.decrypted_payment_data import DecryptedPaymentData
from onlinepayments.sdk.domain.mobile_payment_product320_specific_input import MobilePaymentProduct320SpecificInput
class MobilePaymentMethodSpecificInput(DataObject):
"""
| Object containing the specific input details for mobile payments
"""
__authorization_mode = None
__decrypted_payment_data = None
__encrypted_payment_data = None
__ephemeral_key = None
__payment_product320_specific_input = None
__payment_product_id = None
__public_key_hash = None
__requires_approval = None
@property
def authorization_mode(self):
"""
| Determines the type of the authorization that will be used. Allowed values:
| * FINAL_AUTHORIZATION - The payment creation results in an authorization that is ready for capture. Final authorizations can't be reversed and need to be captured for the full amount within 7 days.
| * PRE_AUTHORIZATION - The payment creation results in a pre-authorization that is ready for capture. Pre-authortizations can be reversed and can be captured within 30 days. The capture amount can be lower than the authorized amount.
| * SALE - The payment creation results in an authorization that is already captured at the moment of approval.
| Only used with some acquirers, ignored for acquirers that don't support this. In case the acquirer doesn't allow this to be specified the authorizationMode is 'unspecified', which behaves similar to a final authorization.
Type: str
"""
return self.__authorization_mode
@authorization_mode.setter
def authorization_mode(self, value):
self.__authorization_mode = value
@property
def decrypted_payment_data(self):
"""
| The payment data if you do the decryption of the encrypted payment data yourself.
Type: :class:`onlinepayments.sdk.domain.decrypted_payment_data.DecryptedPaymentData`
"""
return self.__decrypted_payment_data
@decrypted_payment_data.setter
def decrypted_payment_data(self, value):
self.__decrypted_payment_data = value
@property
def encrypted_payment_data(self):
"""
| The payment data if we will do the decryption of the encrypted payment data. Typically you'd use encryptedCustomerInput in the root of the create payment request to provide the encrypted payment data instead.
| * For Apple Pay, the encrypted payment data can be found in property data of the PKPayment.token.paymentData property.
Type: str
"""
return self.__encrypted_payment_data
@encrypted_payment_data.setter
def encrypted_payment_data(self, value):
self.__encrypted_payment_data = value
@property
def ephemeral_key(self):
"""
| Ephemeral Key
| A unique generated key used by Apple to encrypt data.
Type: str
"""
return self.__ephemeral_key
@ephemeral_key.setter
def ephemeral_key(self, value):
self.__ephemeral_key = value
@property
def payment_product320_specific_input(self):
"""
| Object containing information specific to Google Pay. Required for payments with product 320.
Type: :class:`onlinepayments.sdk.domain.mobile_payment_product320_specific_input.MobilePaymentProduct320SpecificInput`
"""
return self.__payment_product320_specific_input
@payment_product320_specific_input.setter
def payment_product320_specific_input(self, value):
self.__payment_product320_specific_input = value
@property
def payment_product_id(self):
"""
| Payment product identifier - Please see Products documentation for a full overview of possible values.
Type: int
"""
return self.__payment_product_id
@payment_product_id.setter
def payment_product_id(self, value):
self.__payment_product_id = value
@property
def public_key_hash(self):
"""
| Public Key Hash
| A unique identifier to retrieve key used by Apple to encrypt information.
Type: str
"""
return self.__public_key_hash
@public_key_hash.setter
def public_key_hash(self, value):
self.__public_key_hash = value
@property
def requires_approval(self):
"""
| * true = the payment requires approval before the funds will be captured using the Approve payment or Capture payment API
| * false = the payment does not require approval, and the funds will be captured automatically
Type: bool
"""
return self.__requires_approval
@requires_approval.setter
def requires_approval(self, value):
self.__requires_approval = value
def to_dictionary(self):
dictionary = super(MobilePaymentMethodSpecificInput, self).to_dictionary()
if self.authorization_mode is not None:
dictionary['authorizationMode'] = self.authorization_mode
if self.decrypted_payment_data is not None:
dictionary['decryptedPaymentData'] = self.decrypted_payment_data.to_dictionary()
if self.encrypted_payment_data is not None:
dictionary['encryptedPaymentData'] = self.encrypted_payment_data
if self.ephemeral_key is not None:
dictionary['ephemeralKey'] = self.ephemeral_key
if self.payment_product320_specific_input is not None:
dictionary['paymentProduct320SpecificInput'] = self.payment_product320_specific_input.to_dictionary()
if self.payment_product_id is not None:
dictionary['paymentProductId'] = self.payment_product_id
if self.public_key_hash is not None:
dictionary['publicKeyHash'] = self.public_key_hash
if self.requires_approval is not None:
dictionary['requiresApproval'] = self.requires_approval
return dictionary
def from_dictionary(self, dictionary):
super(MobilePaymentMethodSpecificInput, self).from_dictionary(dictionary)
if 'authorizationMode' in dictionary:
self.authorization_mode = dictionary['authorizationMode']
if 'decryptedPaymentData' in dictionary:
if not isinstance(dictionary['decryptedPaymentData'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['decryptedPaymentData']))
value = DecryptedPaymentData()
self.decrypted_payment_data = value.from_dictionary(dictionary['decryptedPaymentData'])
if 'encryptedPaymentData' in dictionary:
self.encrypted_payment_data = dictionary['encryptedPaymentData']
if 'ephemeralKey' in dictionary:
self.ephemeral_key = dictionary['ephemeralKey']
if 'paymentProduct320SpecificInput' in dictionary:
if not isinstance(dictionary['paymentProduct320SpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['paymentProduct320SpecificInput']))
value = MobilePaymentProduct320SpecificInput()
self.payment_product320_specific_input = value.from_dictionary(dictionary['paymentProduct320SpecificInput'])
if 'paymentProductId' in dictionary:
self.payment_product_id = dictionary['paymentProductId']
if 'publicKeyHash' in dictionary:
self.public_key_hash = dictionary['publicKeyHash']
if 'requiresApproval' in dictionary:
self.requires_approval = dictionary['requiresApproval']
return self
| python |
bl_info = {
"name": "Run CGA Grammar",
"description": "",
"author": "JUSTOM",
"version": (0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D > Tool Shelf",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Object"
}
import bpy
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Menu,
Operator,
PropertyGroup,
)
# ------------------------------------------------------------------------
# Scene Properties
# ------------------------------------------------------------------------
class PsbProperties(PropertyGroup):
fName: StringProperty(
name = "File",
description="Choose a file:",
default="",
subtype='FILE_PATH'
)
"""
my_enum: EnumProperty(
name="Dropdown:",
description="Apply Data to attribute.",
items=[ ('OP1', "Option 1", ""),
('OP2', "Option 2", ""),
('OP3', "Option 3", ""),
]
)
"""
# ------------------------------------------------------------------------
# Operators
# ------------------------------------------------------------------------
class RunGrammar(Operator):
"""Run Grammar"""
bl_idname = "object.run_cga_grammar"
bl_label = "Run Grammar"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
bpy.ops.object.mode_set(mode='EDIT')
scene = context.scene
psbTool = scene.psb_tool
#context = bpy.context
print(psbTool.fName)
return {'FINISHED'} # Lets Blender know the operator finished successfully.
# ------------------------------------------------------------------------
# Menus
# ------------------------------------------------------------------------
"""
class OBJECT_MT_CustomMenu(bpy.types.Menu):
bl_label = "Select"
bl_idname = "OBJECT_MT_custom_menu"
def draw(self, context):
layout = self.layout
# Built-in operators
layout.operator("object.select_all", text="Select/Deselect All").action = 'TOGGLE'
layout.operator("object.select_all", text="Inverse").action = 'INVERT'
layout.operator("object.select_random", text="Random")
"""
# ------------------------------------------------------------------------
# Panel in Object Mode
# ------------------------------------------------------------------------
class PsbPanel(Panel):
bl_label = "PSB Panel"
bl_idname = "PsbPanel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Tools"
bl_context = "objectmode"
@classmethod
def poll(self,context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
psbTool = scene.psb_tool
layout.prop(psbTool, "fName")
layout.operator("object.run_cga_grammar")
"""
class OBJECT_PT_CustomPanel(Panel):
bl_label = "My Panel"
bl_idname = "OBJECT_PT_custom_panel"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Tools"
bl_context = "objectmode"
@classmethod
def poll(self,context):
return context.object is not None
def draw(self, context):
layout = self.layout
scene = context.scene
psbTool = scene.psb_tool
layout.prop(psbTool, "my_bool")
layout.prop(psbTool, "my_enum", text="")
layout.prop(psbTool, "my_int")
layout.prop(psbTool, "my_float")
layout.prop(psbTool, "my_float_vector", text="")
layout.prop(psbTool, "my_string")
layout.prop(psbTool, "my_path")
layout.operator("wm.hello_world")
layout.menu(OBJECT_MT_CustomMenu.bl_idname, text="Presets", icon="SCENE")
layout.separator()
"""
# ------------------------------------------------------------------------
# Registration
# ------------------------------------------------------------------------
classes = (
PsbProperties,
RunGrammar,
#OBJECT_MT_CustomMenu,
PsbPanel
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Scene.psb_tool = PointerProperty(type=PsbProperties)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del bpy.types.Scene.psb_tool
if __name__ == "__main__":
register() | python |
from scipy import stats
import json
import operator
import subprocess
import statistics as stat
import tweetTextCleaner
from sklearn.feature_extraction.text import *
from datetime import datetime
from sklearn import cluster
import numpy
#import word2vecReader
#from tokenizer import simpleTokenize
filterTerms = ['iphone 7', 'pikachu', 'pokemon go', 'macbook pro', 'trump', 'note 7']
def processDate(inputDate):
dateTemp = inputDate.split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
date = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
return day, hour, datetime.strptime(date, '%b %d %Y')
def label(mode):
tweetIDSet = set()
print('extracting outliers...')
brandList = []
listFile = open('brand.list', 'r')
for line in listFile:
brandList.append(line.strip())
listFile.close()
'''
exceptionFile = open('dataset/exceptions/exceptions.list', 'r')
exceptionList = set()
for line in exceptionFile:
exceptionList.add(long(line.strip()))
exceptionFile.close()
'''
totalDisplayFile = open('dataset/experiment/clean.display', 'w')
totalOutputFile = open('dataset/experiment/clean.labeled', 'w')
statFile = open('dataset/analysis/stat.total', 'w')
#totalCleanScore = []
#totalCleanData = []
mentionList = set()
hashtagList = set()
totalBrandData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
temp = json.loads(line.strip())
brand = temp['brand']
if brand not in totalBrandData:
totalBrandData[brand] = [temp]
else:
totalBrandData[brand].append(temp)
inputFile.close()
for brand in brandList:
print(brand)
outLierFile = open('dataset/exceptions/'+brand+'.outliers', 'w')
brandData = []
brandScoreList = []
for data in totalBrandData[brand]:
tweetID = data['id']
#if tweetID not in exceptionList:
if tweetID not in tweetIDSet:
tweetIDSet.add(tweetID)
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic'])-1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite/retweet
statFile.write(str(favorite)+'\t'+str(retweet)+'\t'+str(followers)+'\t'+str(ratio)+'\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
labelScore = (2.0 * retweet + favorite) * 10000 / followers
brandData.append({'brand': brand,'content': content, 'score': labelScore, 'id': tweetID, 'day': day, 'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count, 'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers})
brandScoreList.append(labelScore)
zScores = stats.zscore(brandScoreList)
if len(zScores) != len(brandData):
print('Z-score Error!')
outputData = []
for index, item in enumerate(brandData):
item['zScore'] = float(zScores[index])
outputData.append(item)
cleanData = []
cleanScore = []
sorted_output = sorted(outputData, key=lambda x: x['score'])
for item in reversed(sorted_output):
z = item['zScore']
if z > 2:
outLierFile.write(str(item['score'])+' | '+str(z)+' : '+' | '+str(item['id'])+' | '+item['content']+'\n')
else:
cleanData.append(item)
cleanScore.append(item['score'])
#totalCleanScore.append(item['score'])
#totalCleanData.append(item)
outLierFile.close()
maxScore = max(cleanScore)
minScore = min(cleanScore)
normalScores = []
for score in cleanScore:
normalScores.append((score - minScore) / (maxScore - minScore))
stdevScore = stat.stdev(normalScores)
meanScore = stat.mean(normalScores)
print('mean: ' + str(meanScore))
print('stdev: ' + str(stdevScore))
print('mdean: ' + str(stat.median(normalScores)))
if stdevScore >= meanScore:
print('CAUTION')
else:
print('PASS')
print()
if mode == 1:
# label post with 1-10 score
cleanSize = len(cleanScore)
binSize = cleanSize/10
threshold = binSize
labelScore = 10
for count, item in enumerate(cleanData):
if count <= threshold or labelScore == 1:
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(labelScore)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = labelScore
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
else:
print(threshold)
threshold += binSize
labelScore -= 1
elif mode == 2:
# binary label (0, 1)
cleanSize = len(cleanScore)
for count, item in enumerate(cleanData):
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
if count <= 0.5 * cleanSize:
labelScore = 1
else:
labelScore = 0
item['label'] = labelScore
totalOutputFile.write(json.dumps(item) + '\n')
try:
totalDisplayFile.write(
brand + ' | ' + str(labelScore) + ' | ' + day + ' | ' + hour + ' | ' + item['content'] + ' | ' + str(
item['id']) + ' | ' + hashtagOutput + ' | ' + mentionsOutput + '\n')
except:
print(content)
else:
# label with normalized scores
scoreDistFile = open('dataset/stats/scoreDist.'+brand, 'w')
for index, normalScore in enumerate(normalScores):
item = cleanData[index]
score = normalScore * 10
scoreDistFile.write(str(score)+'\n')
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(score)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = score
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
scoreDistFile.close()
hashtagFile = open('dataset/experiment/hashtag.list', 'w')
mentionFile = open('dataset/experiment/mention.list', 'w')
for ht in hashtagList:
hashtagFile.write(ht+'\n')
for ment in mentionList:
mentionFile.write(ment+'\n')
hashtagFile.close()
mentionFile.close()
statFile.close()
totalOutputFile.close()
def label_new(mode, inputFile):
totalDisplayFile = open('dataset/commTweets/clean.display', 'w')
totalOutputFile = open('dataset/commTweets/clean.json', 'w')
mentionList = set()
hashtagList = set()
totalBrandData = {}
inputFile = open(inputFile, 'r')
for line in inputFile:
temp = json.loads(line.strip())
brand = temp['brand']
if brand not in totalBrandData:
totalBrandData[brand] = [temp]
else:
totalBrandData[brand].append(temp)
inputFile.close()
for brand in totalBrandData:
print(brand)
outLierFile = open('dataset/commTweets/outliers/'+brand+'.outliers', 'w')
brandData = []
brandScoreList = []
for data in totalBrandData[brand]:
tweetID = data['id']
text = data['text']
content = tweetTextCleaner.tweetCleaner(text)
retweet = float(data['retweet_count'])
favorite = float(data['favorite_count'])
followers = float(data['user_followers_count'])
author_statuses_count = float(data['user_statuses_count'])
author_favorite_count = float(data['user_favorite_count'])
author_listed_count = float(data['user_listed_count'])
day, hour, postData_object = processDate(data['create_at'])
_, _, authorData_object = processDate(data['user_create_at'])
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
labelScore = (2.0 * retweet + favorite) * 10000 / followers
brandData.append({'brand': brand, 'content': content, 'score': labelScore, 'id': tweetID, 'day': day, 'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count, 'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers})
brandScoreList.append(labelScore)
zScores = stats.zscore(brandScoreList)
if len(zScores) != len(brandData):
print('Z-score Error!')
outputData = []
for index, item in enumerate(brandData):
item['zScore'] = float(zScores[index])
outputData.append(item)
cleanData = []
cleanScore = []
sorted_output = sorted(outputData, key=lambda x: x['score'])
for item in reversed(sorted_output):
z = item['zScore']
if z > 2:
outLierFile.write(str(item['score'])+' | '+str(z)+' : '+' | '+str(item['id'])+' | '+item['content']+'\n')
else:
cleanData.append(item)
cleanScore.append(item['score'])
#totalCleanScore.append(item['score'])
#totalCleanData.append(item)
outLierFile.close()
maxScore = max(cleanScore)
minScore = min(cleanScore)
normalScores = []
for score in cleanScore:
normalScores.append((score - minScore) / (maxScore - minScore))
stdevScore = stat.stdev(normalScores)
meanScore = stat.mean(normalScores)
#print('mean: ' + str(meanScore))
#print('stdev: ' + str(stdevScore))
#print('mdean: ' + str(stat.median(normalScores)))
if stdevScore >= meanScore:
print('CAUTION')
else:
print('PASS')
print()
if mode == 1:
# label post with 1-10 score
cleanSize = len(cleanScore)
binSize = cleanSize/10
threshold = binSize
labelScore = 10
for count, item in enumerate(cleanData):
if count <= threshold or labelScore == 1:
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
hashtagOutput = 'NONE' if hashtagOutput == '' else hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
mentionsOutput = 'NONE' if mentionsOutput == '' else mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(labelScore)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = labelScore
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
else:
#print(threshold)
threshold += binSize
labelScore -= 1
elif mode == 2:
# binary label (0, 1)
cleanSize = len(cleanScore)
for count, item in enumerate(cleanData):
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
if count <= 0.5 * cleanSize:
labelScore = 1
else:
labelScore = 0
item['label'] = labelScore
totalOutputFile.write(json.dumps(item) + '\n')
try:
totalDisplayFile.write(
brand + ' | ' + str(labelScore) + ' | ' + day + ' | ' + hour + ' | ' + item['content'] + ' | ' + str(
item['id']) + ' | ' + hashtagOutput + ' | ' + mentionsOutput + '\n')
except:
print(content)
else:
# label with normalized scores
scoreDistFile = open('dataset/stats/scoreDist.'+brand, 'w')
for index, normalScore in enumerate(normalScores):
item = cleanData[index]
score = normalScore * 10
scoreDistFile.write(str(score)+'\n')
hashtagOutput = ''
mentionsOutput = ''
for ht in item['hashtags']:
if ht not in hashtagList:
hashtagList.add(ht)
hashtagOutput += ht + ';'
if hashtagOutput == '':
hashtagOutput = 'NONE'
else:
hashtagOutput = hashtagOutput[:-1]
for ment in item['mentions']:
if ment not in mentionList:
mentionList.add(ment)
mentionsOutput += ment + ';'
if mentionsOutput == '':
mentionsOutput = 'NONE'
else:
mentionsOutput = mentionsOutput[:-1]
try:
totalDisplayFile.write(brand+' | '+str(score)+' | '+day+' | '+hour+' | '+item['content']+' | '+str(item['id'])+' | '+hashtagOutput+' | '+mentionsOutput+'\n')
item['label'] = score
totalOutputFile.write(json.dumps(item)+'\n')
except:
print(content)
scoreDistFile.close()
hashtagFile = open('dataset/commTweets/hashtag.list', 'w')
mentionFile = open('dataset/commTweets/mention.list', 'w')
for ht in hashtagList:
hashtagFile.write(ht+'\n')
for ment in mentionList:
mentionFile.write(ment+'\n')
hashtagFile.close()
mentionFile.close()
totalOutputFile.close()
def groupSampler(groupMode, groupSize, seed):
print(groupMode)
inputFile = open('dataset/experiment/labeled_data/' + groupMode + '_' + str(groupSize) + '.labeled', 'r')
groupData = {}
for num in range(int(groupSize)):
groupData[num] = {}
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['content'].encode('utf-8').replace('\n', ' ').replace('\r', ' ')
group = data['group']
groupData[group][tweetID] = text
inputFile.close()
outputFile = open('dataset/experiment/sample/' + groupMode + '_' + str(groupSize) + '.sample', 'w')
for groupIndex in range(int(groupSize)):
outputFile.write('Group: ' + str(groupIndex)+'\n')
print(len(groupData[groupIndex]))
for count, tweetID in enumerate(groupData[groupIndex]):
if count % seed == 0:
outputFile.write(groupData[groupIndex][tweetID]+'\t'+str(tweetID)+'\n')
outputFile.close()
def brandLabel(removeOutliers=True):
if removeOutliers:
totalOutputFile = open('dataset/experiment/brandGroup_0.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/brandGroup_0.content', 'w')
statFile = open('dataset/analysis/brandGroup_0.stat', 'w')
else:
totalOutputFile = open('dataset/experiment/brandGroup_0__full' + '.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/brandGroup_0__full' + '.content', 'w')
statFile = open('dataset/analysis/brandGroup_0_full' + '.stat', 'w')
totalData = {}
brandGroupData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
brand = data['brand']
if brand not in brandGroupData:
brandGroupData[brand] = []
brandGroupData[brand].append(tweetID)
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
totalData[tweetID] = temp
inputFile.close()
for brand, tweetIDs in brandGroupData.items():
print('Brand: ' + brand)
groupScoreList = []
IDList = []
for tweetID in tweetIDs:
if tweetID in totalData:
successScore = totalData[tweetID]['success_score']
groupScoreList.append(successScore)
IDList.append(tweetID)
cleanDataList = []
if removeOutliers:
zScores = stats.zscore(groupScoreList)
if len(zScores) != len(groupScoreList):
print ('Z-score Error!')
for index, item in enumerate(IDList):
if removeOutliers:
zScore = float(zScores[index])
if zScore <= 2:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
else:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
print('Group Size: ' + str(len(cleanDataList)))
sorted_cleanDataList = sorted(cleanDataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
cleanSize = len(cleanDataList)
binSize = cleanSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_cleanDataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = totalData[tweetID]
tempData['label'] = labelScore
tempData['group'] = brand
totalOutputFile.write(json.dumps(tempData) + '\n')
contentOutputFile.write(tempData['content']+'\n')
else:
#print threshold
threshold += binSize
labelScore -= 1
statFile.close()
totalOutputFile.close()
contentOutputFile.close()
def groupLabel(groupMode, groupSize, removeOutliers=True):
groupFile = open('dataset/experiment/group_indicies/'+groupMode+'.'+str(groupSize), 'r')
for line in groupFile:
groupData = json.loads(line.strip())
groupFile.close()
if removeOutliers:
totalOutputFile = open('dataset/experiment/labeled_data/'+groupMode+'_'+str(groupSize)+'.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/'+groupMode+'_'+str(groupSize)+'.content', 'w')
statFile = open('dataset/analysis/'+groupMode+'_'+str(groupSize)+'.stat', 'w')
else:
totalOutputFile = open('dataset/experiment/labeled_data/' + groupMode + '_' + str(groupSize) + '_full' + '.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/' + groupMode + '_' + str(groupSize) + '_full' + '.content', 'w')
statFile = open('dataset/analysis/' + groupMode + '_' + str(groupSize) + '_full' + '.stat', 'w')
totalData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
brand = data['brand']
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
totalData[tweetID] = temp
inputFile.close()
for groupIndex in range(int(groupSize)):
print(groupMode+': ' + str(groupIndex))
groupScoreList = []
IDList = []
for tweetID in groupData[str(groupIndex)]:
if tweetID in totalData:
successScore = totalData[tweetID]['success_score']
groupScoreList.append(successScore)
IDList.append(tweetID)
cleanDataList = []
if removeOutliers:
zScores = stats.zscore(groupScoreList)
if len(zScores) != len(groupScoreList):
print('Z-score Error!')
for index, item in enumerate(IDList):
if removeOutliers:
zScore = float(zScores[index])
if zScore <= 2:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
else:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
print('Group Size: ' + str(len(cleanDataList)))
sorted_cleanDataList = sorted(cleanDataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
cleanSize = len(cleanDataList)
binSize = cleanSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_cleanDataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = totalData[tweetID]
tempData['label'] = labelScore
tempData['group'] = groupIndex
totalOutputFile.write(json.dumps(tempData) + '\n')
contentOutputFile.write(tempData['content']+'\n')
else:
#print threshold
threshold += binSize
labelScore -= 1
statFile.close()
totalOutputFile.close()
contentOutputFile.close()
def simpleLabel(groupVersion, removeOutliers=True):
if removeOutliers:
totalOutputFile = open('dataset/experiment/labeled_data/simple_'+str(groupVersion)+'.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/simple_'+str(groupVersion)+'.content', 'w')
statFile = open('dataset/analysis/simple_'+str(groupVersion)+'.stat', 'w')
else:
totalOutputFile = open('dataset/experiment/labeled_data/simple_'+str(groupVersion)+'_full.labeled', 'w')
contentOutputFile = open('dataset/experiment/content/simple_'+str(groupVersion)+'_full.content', 'w')
statFile = open('dataset/analysis/simple_'+str(groupVersion)+'_full.stat', 'w')
totalData = {}
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
filtered = False
for term in filterTerms:
if term in text.lower():
filtered = True
break
if not filtered:
brand = data['brand']
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
totalData[tweetID] = temp
inputFile.close()
groupScoreList = []
IDList = []
for tweetID in totalData:
successScore = totalData[tweetID]['success_score']
groupScoreList.append(successScore)
IDList.append(tweetID)
cleanDataList = []
if removeOutliers:
zScores = stats.zscore(groupScoreList)
if len(zScores) != len(groupScoreList):
print('Z-score Error!')
for index, item in enumerate(IDList):
if removeOutliers:
zScore = float(zScores[index])
if zScore <= 2:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
else:
cleanDataList.append({'id': item, 'success_score': groupScoreList[index]})
print('Group Size: ' + str(len(cleanDataList)))
sorted_cleanDataList = sorted(cleanDataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
cleanSize = len(cleanDataList)
binSize = cleanSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_cleanDataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = totalData[tweetID]
tempData['label'] = labelScore
tempData['group'] = 0
totalOutputFile.write(json.dumps(tempData) + '\n')
contentOutputFile.write(tempData['content']+'\n')
else:
#print threshold
threshold += binSize
labelScore -= 1
statFile.close()
totalOutputFile.close()
contentOutputFile.close()
def keywordLabel(keyword):
outputFile = open('dataset/experiment/'+keyword+'.labeled', 'w')
statFile = open('dataset/analysis/'+keyword+'.stat', 'w')
tweetData = {}
dataList = []
inputFile = open('dataset/experiment/total.json', 'r')
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
if keyword in text.lower():
brand = data['brand']
content = tweetTextCleaner.tweetCleaner(text)
finalIndex = len(data['dynamic']) - 1
retweet = float(data['dynamic'][finalIndex]['retweet_count'])
favorite = float(data['dynamic'][finalIndex]['favorite_count'])
followers = float(data['dynamic'][finalIndex]['user_followers_count'])
if retweet == 0:
ratio = 0
else:
ratio = favorite / retweet
statFile.write(
str(favorite) + '\t' + str(retweet) + '\t' + str(followers) + '\t' + str(ratio) + '\n')
author_statuses_count = float(data['dynamic'][finalIndex]['user_statuses_count'])
author_favorite_count = float(data['dynamic'][finalIndex]['user_favorite_count'])
author_listed_count = float(data['dynamic'][finalIndex]['user_listed_count'])
dateTemp = data['create_at'].split()
day = dateTemp[0]
hour = dateTemp[3].split(':')[0]
postDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
dateTemp = data['user_create_at'].split()
authorDate = dateTemp[1] + ' ' + dateTemp[2] + ' ' + dateTemp[5]
postData_object = datetime.strptime(postDate, '%b %d %Y')
authorData_object = datetime.strptime(authorDate, '%b %d %Y')
authorInterval = float((postData_object - authorData_object).days)
if followers > 0:
successScore = (2.0 * retweet + favorite) * 10000 / followers
temp = {'brand': brand, 'content': content, 'success_score': successScore, 'id': tweetID, 'day': day,
'hour': hour, 'mentions': data['mentions'], 'hashtags': data['hashtags'],
'author_statuses_count': author_statuses_count,
'author_favorite_count': author_favorite_count, 'author_listed_count': author_listed_count,
'authorInterval': authorInterval, 'author_followers_count': followers}
tweetData[tweetID] = temp
dataList.append({'id': tweetID, 'success_score': successScore})
inputFile.close()
print(len(dataList))
sorted_dataList = sorted(dataList, key=lambda x: x['success_score'], reverse=True)
# label post with 1-10 score
dataSize = len(dataList)
binSize = dataSize / 10
threshold = binSize
labelScore = 10
for count, item in enumerate(sorted_dataList):
tweetID = item['id']
if count <= threshold or labelScore == 1:
tempData = tweetData[tweetID]
tempData['label'] = labelScore
tempData['keyword'] = keyword
outputFile.write(json.dumps(tempData) + '\n')
else:
threshold += binSize
labelScore -= 1
statFile.close()
outputFile.close()
def scoreFileBlender():
data = []
listFile = open('brand.list', 'r')
for line in listFile:
brand = line.strip()
inputFile = open('dataset/stats/scoreDist.' + brand, 'r')
for line in inputFile:
data.append(float(line.strip()))
inputFile.close()
listFile.close()
sorted_data = sorted(data, reverse=True)
outputFile = open('dataset/stats/scoreDist.total', 'w')
for num in sorted_data:
outputFile.write(str(num)+'\n')
outputFile.close()
def maxIndex(input, num):
line = {}
for index in range(len(input)):
line[index] = float(input[index])
sorted_line = sorted(line.iteritems(), key=operator.itemgetter(1), reverse=True)
output = []
for i in range(num):
output.append(sorted_line[i][0])
return output
def dataGrouper(groupMode, groupSize, hierarchical=False):
inputFile = open('dataset/experiment/total.json', 'r')
tweetData = []
outputData = {}
for index in range(int(groupSize)):
outputData[str(index)] = []
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
content = text.replace('\r', ' ').replace('\n', ' ')
brand = data['brand']
tweetData.append({'id': tweetID, 'content': content, 'brand': brand})
inputFile.close()
if groupMode == 'brandGroup':
print('running brand grouping...')
brandMapper = {}
groupFile = open('brandGroup.list', 'r')
for index, line in enumerate(groupFile):
brands = line.strip().split()
for brand in brands:
brandMapper[brand] = str(index)
groupFile.close()
for tweet in tweetData:
if tweet['brand'] in brandMapper:
outputData[brandMapper[tweet['brand']]].append(tweet['id'])
elif groupMode == 'topicGroup':
print('running LDA grouping...')
csvFile = open('TMT/LDAinput.csv', 'w')
for tweet in tweetData:
csvFile.write(tweetTextCleaner.tweetCleaner(tweet['content']).replace('"', '\'') + '\n')
csvFile.close()
subprocess.check_output('java -Xmx1024m -jar TMT/tmt-0.4.0.jar TMT/assign.scala', shell=True)
distFile = open('TMTSnapshots/document-topic-distributions.csv', 'r')
topicOut = {}
for line in distFile:
seg = line.strip().split(',')
if seg[1] != 'NaN':
topicOutList = maxIndex(seg[1:], int(groupSize))
topicOut[int(seg[0])] = topicOutList
distFile.close()
for index, value in topicOut.items():
outputData[str(value[0])].append(tweetData[index]['id'])
elif groupMode == 'simGroup_binary':
print('running kmeans clustering with binary representation...')
tweets = []
for tweet in tweetData:
tweets.append(tweetTextCleaner.tweetCleaner(tweet['content']))
vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1, stop_words='english', binary='True')
matrix = vectorizer.fit_transform(tweets)
print(matrix.shape)
if hierarchical:
print()
#z = cluster.hierarchy.linkage(matrix, 'ward')
else:
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(matrix)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
elif groupMode == 'simGroup_emb':
print('running kmeans clustering with CMU encoding...')
'''
contentFile = open('embedding/CMU_hashtag/tweet.content', 'w')
for tweet in tweetData:
contentFile.write(tweet['content']+'\n')
contentFile.close()
subprocess.check_output('python embedding/CMU_hashtag/preprocess.py', shell=True)
subprocess.check_output('python embedding/CMU_hashtag/encode_char.py embedding/CMU_hashtag/tweet.input embedding/CMU_hashtag/best_model embedding/CMU_hashtag/', shell=True)
'''
embData = numpy.load('embedding/CMU_hashtag/embeddings.npy')
print(len(embData))
if hierarchical:
print()
else:
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(embData)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
outputFile = open('dataset/experiment/group_indicies/'+groupMode + '.' + str(groupSize), 'w')
outputFile.write(json.dumps(outputData))
outputFile.close()
'''
def content2vec(model, content):
words = simpleTokenize(content)
tempList = []
for word in words:
if word in model.vocab:
tempList.append(model[word])
if len(tempList) < 1:
return numpy.zeros(400)
vecSize = len(tempList[0])
sumList = []
for i in range(vecSize):
sumList.append(0.0)
for vec in tempList:
for i in range(vecSize):
sumList[i] += vec[i]
output = []
dataSize = len(tempList)
for value in sumList:
output.append(value/dataSize)
return numpy.array(output)
'''
'''
def dataGrouperKey(groupMode, groupSize):
keyData = {}
keyFile = open('dataset/experiment/parser/total.key', 'r')
for line in keyFile:
if line.strip().startswith(':: '):
keyData[int(line.strip().replace(':: ', ''))] = 'NONE'
else:
temp = line.strip().split(' :: ')
keyData[int(temp[1])] = temp[0]
keyFile.close()
inputFile = open('dataset/experiment/total.json', 'r')
tweetData = []
outputData = {}
for index in range(int(groupSize)):
outputData[str(index)] = []
for line in inputFile:
data = json.loads(line.strip())
tweetID = data['id']
text = data['text'].encode('utf-8')
key = keyData[tweetID]
content = text.replace('\r', ' ').replace('\n', ' ')
brand = data['brand']
tweetData.append({'id': tweetID, 'content': content, 'brand': brand, 'key': key})
inputFile.close()
if groupMode == 'topicGroup':
print('running LDA grouping...')
csvFile = open('TMT/LDAinput.csv', 'w')
for tweet in tweetData:
csvFile.write(tweet['key'].replace('"', '\'') + '\n')
csvFile.close()
subprocess.check_output('java -Xmx1024m -jar TMT/tmt-0.4.0.jar TMT/assign.scala', shell=True)
distFile = open('TMTSnapshots/document-topic-distributions.csv', 'r')
topicOut = {}
for line in distFile:
seg = line.strip().split(',')
if seg[1] != 'NaN':
topicOutList = maxIndex(seg[1:], int(groupSize))
topicOut[int(seg[0])] = topicOutList
distFile.close()
for index, value in topicOut.items():
outputData[str(value[0])].append(tweetData[index]['id'])
elif groupMode == 'simGroup_binary':
print('running kmeans clustering with binary representation...')
tweets = []
for tweet in tweetData:
tweets.append(tweet['key'])
vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1, stop_words='english', binary='True')
matrix = vectorizer.fit_transform(tweets)
print(matrix.shape)
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(matrix)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
elif groupMode == 'simGroup_emb':
w2v = word2vecReader.Word2Vec()
embModel = w2v.loadModel()
contents = []
for tweet in tweetData:
tweetVec = content2vec(embModel, tweet['key'])
contents.append(tweetVec)
matrix = numpy.array(contents)
print(matrix.shape)
kmeans = cluster.KMeans(n_clusters=int(groupSize), init='k-means++')
kmeans.fit(matrix)
for index, label in enumerate(kmeans.labels_):
outputData[str(label)].append(tweetData[index]['id'])
outputFile = open('dataset/experiment/group_indicies/' + groupMode + '.' + str(groupSize), 'w')
outputFile.write(json.dumps(outputData))
outputFile.close()
'''
def dataAligner(groupMode, groupSize):
tweetData = {}
inputDataFile = open('dataset/experiment/'+groupMode+'_'+str(groupSize)+'.labeled', 'r')
for line in inputDataFile:
temp = json.loads(line.strip())
tweetData[str(temp['id'])] = temp['label']
orderTweetIDList = []
cleanDataFile = open('dataset/experiment/clean.labeled', 'r')
for line in cleanDataFile:
temp = json.loads(line.strip())
orderTweetIDList.append(temp['id'])
if __name__ == "__main__":
label_new(1, 'dataset/commTweets.json')
#label2(1)
#scoreFileBlender()
#dataGrouper('topicGroup', 7.2)
#dataGrouperKey('topicGroup', 2.4)
#groupLabel('topicGroup', 2.4, True)
#simpleLabel(1.1, True)
#groupSampler('simGroup_emb', 5.4, 300)
#groupSampler('topicGroup', 2.2, 3000)
#groupSampler('topicGroup', 2.1, 1000)
#groupSampler('topicGroup', 2.2, 1000)
#brandLabel()
#keywordLabel('trump')
#keywordLabel('iphone') | python |
import pytest
from collections import Counter
from asttools import (
quick_parse,
)
from ..pattern_match import (
pattern,
UnhandledPatternError,
config_from_subscript,
split_case_return
)
class Hello:
def __init__(self, greeting):
self.greeting = greeting
class Unhandled:
def __repr__(self):
return 'Unhandled'
def test_single_pattern():
@pattern
def pat(val):
meta[match: val] # noqa: F821
~ 'dale' | "DALE"
~ 'list' | []
~ str | val
~ int | 'int'+str(val)
~ Hello | val.greeting
~ default | 'default_' + str(val) # noqa: F821
obj = Hello("Welcome Friend")
assert pat(obj) == "Welcome Friend"
assert pat('dale') == "DALE"
assert pat('some_string') == "some_string"
assert pat(101) == "int101"
assert pat('list') == []
assert pat(Unhandled()) == 'default_Unhandled'
def test_multi_return():
@pattern
def multi_return(x):
meta[match: x] # noqa: F821
~ float | type(x), x, x
~ int | type(x), x
assert multi_return(1) == (int, 1)
assert multi_return(1.1) == (float, 1.1, 1.1)
def test_when():
@pattern
def multi_return(x):
meta[match: x] # noqa: F821
~ float [when: x > 1] | type(x), x, x # noqa: F821, E211
~ int [when: x > 100 and x < 150] | x, 'Between 100 and 150' # noqa: F821, E211, E501
~ int [when: x > 10] | 'INT OVER 10' # noqa: F821, E211
~ int | type(x), x
assert multi_return(1) == (int, 1)
assert multi_return(11) == "INT OVER 10"
assert multi_return(122) == (122, "Between 100 and 150")
assert multi_return(1.1) == (float, 1.1, 1.1)
with pytest.raises(UnhandledPatternError):
assert multi_return(0.1) == (float, 1.1, 1.1)
def test_config_from_subscript():
node = quick_parse("bob[match: x]").value
meta = config_from_subscript(node)
assert meta['match'][0].id == 'x'
assert Counter(list(meta)) == Counter(['match'])
node = quick_parse("bob[match: x, second: 1]").value
meta = config_from_subscript(node)
assert meta['match'][0].id == 'x'
assert meta['second'][0].n == 1
assert Counter(list(meta)) == Counter(['match', 'second'])
node = quick_parse("bob[match: x, y, second: 1]").value
meta = config_from_subscript(node)
assert meta['match'][0].id == 'x'
assert meta['match'][1].id == 'y'
assert meta['second'][0].n == 1
assert Counter(list(meta)) == Counter(['match', 'second'])
def test_split_case_return():
node = quick_parse("~ x | type(x), y").value
case_nodes, return_nodes = split_case_return(node)
assert len(case_nodes) == 1
assert len(return_nodes) == 2
def test_multi_pattern():
@pattern
def multi(x, y):
meta[match: x, y] # noqa: F821
~ float, 3 | type(x), x, y
~ int, 3 | type(x), x, 'int'
~ int, int | 'INT'
assert multi(1, 2) == 'INT'
assert multi(1, 3) == (int, 1, 'int')
assert multi(1.0, 3) == (float, 1, 3)
def test_pattern_match_doc():
# should ignore doc string.
@pattern
def docstring(x, y):
"""
doc string
"""
meta[match: x, y] # noqa: F821
_missing = object()
def test_pattern_match_object():
# test again object() sentinels
@pattern
def match(x):
meta[match: x] # noqa: F821
~ _missing | "MISSING"
~ default | x # noqa: F821
assert match(_missing) == "MISSING"
assert match(100) == 100
@pattern
def multimatch(x, y):
meta[match: x, y] # noqa: F821
~ 1, _missing | x, "MISSING"
~ default | x, y # noqa: F821
assert multimatch(1, _missing) == (1, "MISSING")
assert multimatch(_missing, 100) == (_missing, 100)
| python |
import math
from functools import reduce
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display
from matplotlib.dates import DateFormatter
from scipy.stats import linregress
from utils import get_vlines, fmt_number, fmt_pct
class CovidDataViz(object):
"""
A class to make plots from processed COVID-19 and World Bank data.
"""
def __init__(self, path='../data/processed'):
self.path = path
self.data = dict()
self.data['Confirmed'] = pd.read_csv(f'{path}/confirmed_cases.csv')
self.data['Confirmed chg'] = pd.read_csv(f'{path}/confirmed_cases_daily_change.csv')
self.data['Confirmed t0'] = pd.read_csv(f'{path}/confirmed_cases_since_t0.csv')
self.data['Recovered'] = pd.read_csv(f'{path}/recovered_cases.csv')
self.data['Dead'] = pd.read_csv(f'{path}/dead_cases.csv')
self.data['Active'] = pd.read_csv(f'{path}/active_cases.csv')
self.data['Mortality'] = pd.read_csv(f'{path}/mortality_rate.csv')
self.data['Coordinates'] = pd.read_csv(f'{path}/coordinates.csv')
self.data['Continents'] = pd.read_csv(f'{path}/continents.csv')
self.data['Ctry to cont'] = pd.read_csv(f'{path}/country_to_continent.csv')
self.data['Country stats'] = pd.read_csv(f'{path}/country_stats.csv')
self.data['World bank'] = pd.read_csv(f'{path}/world_bank.csv')
for _, df in self.data.items():
if 'Date' in df.columns:
df['Date'] = pd.to_datetime(df['Date'])
self.all_countries = sorted(set(self.data['Coordinates']['Country']))
self.all_continents = sorted(set(self.data['Continents']['Continent']))
def list_highest_mortality(self, n=10):
"""
Generate a list of countries with the highest moratlity rate.
Notes
-----
mortality = dead / confirmed.
"""
df = self._sort_ctry_stats(stat_name='Mortality', n=n)
return df
def get_country_ts(self, country):
"""
Extract country level cases time series.
"""
dfs = [self.data['Confirmed'][['Date', country]],
self.data['Recovered'][['Date', country]],
self.data['Dead'][['Date', country]],
self.data['Active'][['Date', country]]]
df = reduce(lambda x, y: pd.merge(x, y, on='Date', how='outer'), dfs)
df.columns = ['Date', 'Confirmed', 'Recovered', 'Dead', 'Active']
return df
def get_continent_ts(self, continent):
"""
Get continent level cases time series.
"""
cont = self.data['Continents'].copy()
cont = cont[cont['Continent'] == continent]
cont = pd.merge(self.data['Coordinates'], cont, on='Country')
countries = sorted(list(cont['Country']))
cases = ['Confirmed', 'Recovered', 'Dead', 'Active']
dfs = []
for c in cases:
tmp = self.data[c][countries].sum(axis=1)
tmp.name = c
tmp = tmp.to_frame()
tmp['Date'] = self.data[c]['Date']
dfs.append(tmp)
df = reduce(lambda x, y: pd.merge(x, y, on='Date', how='outer'), dfs)
df = df[['Date'] + cases]
return df
def get_world_ts(self):
"""
Get world level cases time series.
"""
cases = ['Confirmed', 'Recovered', 'Dead', 'Active']
dfs = []
for case in cases:
tmp = self.data[case].drop('Date', axis=1).sum(axis=1)
tmp.name = case
tmp = tmp.to_frame()
tmp['Date'] = self.data[case]['Date']
dfs.append(tmp)
df = reduce(lambda x, y: pd.merge(x, y, on='Date', how='outer'), dfs)
return df
def get_highest_mortality(self, n_countries, min_cases=10 ** 4):
"""
List countries with highest moratlity rate.
"""
df = self.data['Country stats']
df = df[df['Confirmed'] > min_cases]
df = df.sort_values('Mortality', ascending=False).copy()
df = df.reset_index(drop=True)
df = df.head(n_countries)
df = df[['Country', 'Mortality']]
return df
def get_most_cases(self, case_type, n=10):
"""
Get n countries with most cases.
"""
df = self._sort_ctry_stats(stat_name=case_type, n=n)
return df
def plot_world_cases(self):
"""
Create world cases line plot.
"""
df = self.get_world_ts()
self.plot_ts(df=df, title='World', suffix='cases')
def plot_country_cases(self, country):
"""
Create individual country cases line plot.
"""
df = self.get_country_ts(country=country)
self.plot_ts(df, country, 'cases')
def plot_continent_cases(self, continent):
"""
Create continent cases line plot.
"""
df = self.get_continent_ts(continent=continent)
self.plot_ts(df, continent, 'cases')
def plot_ts(self, df, title, suffix):
"""
Draw individual time series as a line plot.
Inputs
------
df : pd.DataFrame
A dataframe with a `Date` column and cases data.
title : str
The title of the plot
Notes
-----
This will create a time series plot of cases. It
will also save the plot to ../img/{title}.png
"""
# Set proper aspect ratio and dpi
width = 1000
height = width / 1.78
dpi = 300
fontsize = 3
fontfamily = 'serif'
plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
ax = plt.subplot(111)
# Extend x axis so that labels fit inside the plot
extend_x_axis = pd.Timedelta('7 days')
# Extend plot by 5% to make space between
# plot and title
extend_y_axis = 0.04
# Disable spines
ax.spines['top'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
# Set spine width
ax.spines['left'].set_linewidth(1/5)
ax.spines['bottom'].set_linewidth(1/5)
# Force ticks to bottom left
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Get min and max values to set limits
# points fit inside the plot.
xmin = df['Date'].min()
xmax = df['Date'].max() + extend_x_axis
ymin = df.drop(['Date'], axis=1).min().min()
ymax = df.drop(['Date'], axis=1).max().max()
yticks, ylabels = get_vlines(ymin, ymax, k=5)
plt.yticks(ticks=yticks, labels=ylabels,
fontsize=fontsize, family=fontfamily)
plt.xticks(fontsize=fontsize, family=fontfamily)
# Display label of every other month
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m'))
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=2))
# Plot horizontal greyed out lines so that people can
# actually see the data without squinting
for y_val in yticks:
ax.plot(df['Date'], np.full((len(df), 1), y_val), c='black',
linestyle='dashed', linewidth=1/6, alpha=3/10)
# User colors from color brewer.
colours = ['#d7191c', '#fdae61', '#a6d96a', '#1a9641']
# Extract list of columns in alphabeticall order
cols = sorted(df.drop('Date', axis=1).columns)
# Plot the actual data
for col,c in zip(cols, colours):
# Line plot
ax.plot(df['Date'], df[col], linewidth=1/3, alpha=9/10, c=c)
# Plot marker at end of x axis
x = df['Date'].tail(1)
y = df[col].tail(1)
ax.scatter(x=x, y=y, linewidth=1/3, c=c, marker='.', alpha=9/10)
# Plot label outside plot
ax.text(x=df['Date'].tail(1) + pd.Timedelta('7 days'),
y=df[col].tail(1), s=col, fontsize=fontsize, c=c,
family=fontfamily, horizontalalignment='left',
verticalalignment='center')
# Display title left aligned to y axis
plt.title(label=title, fontsize=fontsize + 1, family=fontfamily,
weight='bold', loc='center')
# Set plot limits and extend y by 5%
plt.xlim(xmin, xmax)
# Set minimum y value to -2% of ymax so that
plt.ylim(0, (1 + extend_y_axis) * ymax)
plt.tick_params(axis='both', which='both',
bottom=False, top=False,
labelbottom='on', left=False,
right=False, labelleft='on')
plt.tight_layout()
plt.savefig(f'../img/{title.lower()}_{suffix}.png', bbox_inches='tight')
def plot_highest_country_stats(self, statistic, n=10):
"""
Bar plot of countries with the most cases of a certain type.
"""
df = self.get_most_cases(case_type=statistic)
df.loc[df['Country'] == 'United Kingdom', 'Country'] = 'UK'
# Set proper aspect ratio and dpi
width = 1000
height = width / 1.33
dpi = 300
fontsize = 3
fontfamily = 'serif'
plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
ax = plt.subplot(111)
# Spines
ax.spines['top'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(1/5)
ax.spines['bottom'].set_linewidth(1/5)
# Plot
x = df['Country']
y = df[statistic]
ax.bar(x=x, height=y, width=1/2)
# Ticks
plt.xticks(rotation=90, fontsize=fontsize, family=fontfamily)
if statistic == 'Mortality':
ymin, ymax = math.floor(y.min()), y.max()
yticks, ylabels = get_vlines(ymin, ymax, k=5, shift=ymin)
ylabels = [lab+'%' for lab in ylabels]
bar_labels = [ fmt_pct(y) for y in list(df[statistic]) ]
else:
ymin, ymax = 0, y.max()
yticks, ylabels = get_vlines(ymin, ymax, k=5, shift=0)
bar_labels = [ fmt_number(y) for y in list(df[statistic]) ]
plt.tick_params(axis='both', which='both',
bottom=False, top=False,
labelbottom='on', left=False,
right=False, labelleft='on')
plt.yticks(ticks=yticks, labels=ylabels,
fontsize=fontsize, family=fontfamily)
ax.tick_params(width=1/5, color='black')
# Limits
plt.xlim(-1/2, len(df) - 1/2)
plt.ylim(ymin, ymax + (0.02 * ymax))
# Horizontal lines
for y_val in yticks:
ax.plot(np.linspace(-1, len(x), 1000), np.full((1000, 1), y_val), c='black',
linestyle='dashed', linewidth=1/5, alpha=3/10)
# Annotations
rects = ax.patches
for rect, label in zip(rects, bar_labels):
height = rect.get_height()
ax.text(x=rect.get_x() + rect.get_width() / 2,
y=height + (0.02 * ymax), s=label, ha='center', va='bottom',
fontsize=fontsize, family=fontfamily)
# Labels
if statistic == 'Mortality':
plt.ylabel('Moratlity rate in percent', fontsize=fontsize, family=fontfamily)
else:
plt.ylabel('Number of cases', fontsize=fontsize, family=fontfamily)
# Title
plt.title(label=f'{statistic}', fontsize=fontsize + 1,
family=fontfamily, weight='bold', loc='center')
plt.tight_layout()
plt.savefig(fname=f'../img/{statistic.lower()}_cases_most.png',
bbox_inches='tight')
plt.show()
def plot_growth(self, countries, periods, steps=60, save=False):
"""
Plot growth curves, log scale.
Inputs
------
countries : list
List of countries
periods : list of ints
Doubling periods for growth curves.
steps : int
Number of data points to use.
"""
countries = sorted(countries)
# Extract mean and use as starting point for
# exponential growth curves.
a = self.data['Confirmed t0'].mean(axis=1)[0]
b = 2
# List of growth curves
growth = list()
for period in periods:
g = exp_growth(a=a,
b=b,
t=np.arange(steps),
tau=period)
g = np.log(g)
growth.append(list(g))
# Plot
# Set proper aspect ratio and dpi
width = 1000
height = width / 1.33
dpi = 300
fontsize = 3
fontfamily = 'serif'
plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
ax = plt.subplot(111)
ymax = 0
for g,p in zip(growth, periods):
# Draw growth curves
ax.plot(range(steps), g, c='grey', linestyle='dashed',
lw=1/3, alpha=1/2)
if p == 1:
s = f'Double every day'
else:
s = f'Double every {str(p)} days'
# Draw marker
x = steps - 1
y = g[steps - 1]
ax.scatter(x=x, y=y, linewidth=1/12, c='grey', alpha=1/2, marker='.')
# Draw text outside
x = steps
y = g[steps - 1]
ax.text(x=x, y=y, s=s, alpha=1, fontsize=fontsize, c='grey',
family=fontfamily, horizontalalignment='left',
verticalalignment='center', rotation_mode='anchor')
if g[-1] >= ymax:
ymax = g[-1]
# Spines
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Draw country level data
plot_df = self.data['Confirmed t0'][countries].head(steps)
for c in countries:
ax.plot(range(len(plot_df)), np.log(plot_df[c]), label=c, lw=1/3)
# Ticks
plt.xticks(fontsize=fontsize, family=fontfamily)
plt.yticks(fontsize=fontsize, family=fontfamily)
plt.tick_params(axis='both', which='both',
bottom=False, top=False,
labelbottom='on', left=False,
right=False, labelleft='on')
# Spines
for axis in ['top', 'bottom','left', 'right']:
ax.spines[axis].set_linewidth(1/5)
# Limits
plt.xlim(0, steps)
plt.ylim(np.log(a), ymax + 1/2)
# Legend
legend = ax.legend(loc='upper left',
fancybox=False, prop={'family': fontfamily,
'size': fontsize})
legend.get_frame().set_linewidth(1/5)
legend.get_frame().set_edgecolor('black')
# Labels
plt.ylabel(ylabel='Confirmed cases, log scale', fontsize=fontsize,
family=fontfamily)
plt.xlabel(xlabel='Days since 100 cases', fontsize=fontsize,
family=fontfamily)
plt.title(label='Doubling rate', fontsize=fontsize + 1, family=fontfamily,
weight='bold', loc='center')
plt.tight_layout()
if save:
plt.savefig(fname='../img/growth_plot.png', bbox_inches='tight')
plt.show()
def plot_country_cases_chg(self, country, n=7):
"""
Plot country level change in cases with n day moving average.
"""
df = self.data['Confirmed chg'][['Date', country]].copy()
df[f'{n} day average \n of new cases'] = df[country].rolling(n).mean()
df = df.drop(country, axis=1)
self.plot_ts(df=df, title=country, suffix='chg')
def plot_with_slope(self, x, y):
"""
Create scatter plot with regression line and
greyed out R squared.
"""
X = self.data['World bank'][x]
Y = self.data['World bank'][y]
X_reg = np.linspace(np.min(X), np.max(X), 1000)
# Estimate Y = aX +b
a, b, c, p, _ = linregress(X, Y)
# Get r squared
r = c * c
Y_reg = a * X_reg + b
label_reg = f'y = {round(a, 4)}x + {round(b, 4)}'
text_reg = r'$R^{2}$' + f'={round(r, 2)}'# + '\n' + r'$p$-value' + f'={round(p, 2)}'
plt.figure(figsize=(5,5))
plt.scatter(x=X, y=Y, s=4, alpha=2/3)
plt.plot(X_reg, Y_reg,
linewidth=1,
color='black',
label=label_reg)
plt.text(x=(np.min(X) + np.max(X))/2,
y=(np.min(Y) + np.max(Y))/2,
s=text_reg,
alpha=1/4,
fontsize=30,
verticalalignment='center',
horizontalalignment='center')
plt.xlabel(f'{x}')
plt.ylabel(f'{y}')
# plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
def _sort_ctry_stats(self, stat_name, min_cases=5000, n=10):
"""
Sort the dataframe of country statistics using a cutoff
of `min_cases` and return top `n` countries.
"""
df = self.data['Country stats'].copy()
df['Has min cases'] = df['Confirmed'] > min_cases
df = df[df['Has min cases'] == True]
df = df.sort_values(stat_name, ascending=False)
df = df.reset_index(drop=True)
df = df[['Country', stat_name]]
df = df.head(n)
return df
def show_corr_mat(self):
"""
Display colourfull correlation matrix of cases with socioeconomic factors.
"""
C = self.data['World bank'].corr()
C = C.style.background_gradient(cmap='coolwarm')
C = C.set_precision(2)
C = C.set_table_attributes('style="font-size: 13px"')
display(C)
def exp_growth(a, b, t, tau):
"""
Calculate exponential growth.
Parameters
----------
a : int
Initial value.
b : int
Growth factor.
t : int
Time.
tau : int
Time required for increase by factor of b.
Notes
-----
See https://en.wikipedia.org/wiki/Exponential_growth
for details.
"""
return a * np.power(b, t / tau)
| python |
#! /usr/bin/env python3
import argparse
import usb.core
import usb.util
import array
import sys
import hashlib
import csv
from progressbar.bar import ProgressBar
class PrecursorUsb:
def __init__(self, dev):
self.dev = dev
self.RDSR = 0x05
self.RDSCUR = 0x2B
self.RDID = 0x9F
self.WREN = 0x06
self.WRDI = 0x04
self.SE4B = 0x21
self.BE4B = 0xDC
self.PP4B = 0x12
self.registers = {}
self.regions = {}
self.gitrev = ''
def register(self, name):
return int(self.registers[name], 0)
def peek(self, addr, display=False):
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
if display == True:
print("0x{:08x}".format(read_data))
return read_data
def poke(self, addr, wdata, check=False, display=False):
if check == True:
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
print("before poke: 0x{:08x}".format(read_data))
data = array.array('B', wdata.to_bytes(4, 'little'))
numwritten = self.dev.ctrl_transfer(bmRequestType=(0x00 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
if check == True:
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
print("after poke: 0x{:08x}".format(read_data))
if display == True:
print("wrote 0x{:08x} to 0x{:08x}".format(wdata, addr))
def burst_read(self, addr, len):
_dummy_s = '\x00'.encode('utf-8')
maxlen = 4096
ret = bytearray()
packet_count = len // maxlen
if (len % maxlen) != 0:
packet_count += 1
for pkt_num in range(packet_count):
cur_addr = addr + pkt_num * maxlen
if pkt_num == packet_count - 1:
if len % maxlen != 0:
bufsize = len % maxlen
else:
bufsize = maxlen
else:
bufsize = maxlen
data = array.array('B', _dummy_s * bufsize)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(cur_addr & 0xffff), wIndex=((cur_addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
if numread != bufsize:
print("Burst read error: {} bytes requested, {} bytes read at 0x{:08x}".format(bufsize, numread, cur_addr))
exit(1)
ret = ret + data
return ret
def burst_write(self, addr, data):
if len(data) == 0:
return
maxlen = 4096
packet_count = len(data) // maxlen
if (len(data) % maxlen) != 0:
packet_count += 1
for pkt_num in range(packet_count):
cur_addr = addr + pkt_num * maxlen
if pkt_num == packet_count - 1:
if len(data) % maxlen != 0:
bufsize = len(data) % maxlen
else:
bufsize = maxlen
else:
bufsize = maxlen
wdata = array.array('B', data[(pkt_num * maxlen):(pkt_num * maxlen) + bufsize])
numwritten = self.dev.ctrl_transfer(bmRequestType=(0x00 | 0x43), bRequest=0,
wValue=(cur_addr & 0xffff), wIndex=((cur_addr >> 16) & 0xffff),
data_or_wLength=wdata, timeout=500)
if numwritten != bufsize:
print("Burst write error: {} bytes requested, {} bytes written at 0x{:08x}".format(bufsize, numwritten, cur_addr))
exit(1)
def ping_wdt(self):
self.poke(self.register('wdt_watchdog'), 1, display=False)
self.poke(self.register('wdt_watchdog'), 1, display=False)
def spinor_command_value(self, exec=0, lock_reads=0, cmd_code=0, dummy_cycles=0, data_words=0, has_arg=0):
return ((exec & 1) << 1 |
(lock_reads & 1) << 24 |
(cmd_code & 0xff) << 2 |
(dummy_cycles & 0x1f) << 11 |
(data_words & 0xff) << 16 |
(has_arg & 1) << 10
)
def flash_rdsr(self, lock_reads):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=lock_reads, cmd_code=self.RDSR, dummy_cycles=4, data_words=1, has_arg=1)
)
return self.peek(self.register('spinor_cmd_rbk_data'), display=False)
def flash_rdscur(self):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.RDSCUR, dummy_cycles=4, data_words=1, has_arg=1)
)
return self.peek(self.register('spinor_cmd_rbk_data'), display=False)
def flash_rdid(self, offset):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, cmd_code=self.RDID, dummy_cycles=4, data_words=offset, has_arg=1)
)
return self.peek(self.register('spinor_cmd_rbk_data'), display=False)
def flash_wren(self):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.WREN)
)
def flash_wrdi(self):
self.poke(self.register('spinor_cmd_arg'), 0)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.WRDI)
)
def flash_se4b(self, sector_address):
self.poke(self.register('spinor_cmd_arg'), sector_address)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.SE4B, has_arg=1)
)
def flash_be4b(self, block_address):
self.poke(self.register('spinor_cmd_arg'), block_address)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.BE4B, has_arg=1)
)
def flash_pp4b(self, address, data_bytes):
self.poke(self.register('spinor_cmd_arg'), address)
self.poke(self.register('spinor_command'),
self.spinor_command_value(exec=1, lock_reads=1, cmd_code=self.PP4B, has_arg=1, data_words=(data_bytes//2))
)
def load_csrs(self):
LOC_CSRCSV = 0x20277000 # this address shouldn't change because it's how we figure out our version number
csr_data = self.burst_read(LOC_CSRCSV, 0x8000)
hasher = hashlib.sha512()
hasher.update(csr_data[:0x7FC0])
digest = hasher.digest()
if digest != csr_data[0x7fc0:]:
print("Could not find a valid csr.csv descriptor on the device, aborting!")
exit(1)
csr_len = int.from_bytes(csr_data[:4], 'little')
csr_extracted = csr_data[4:4+csr_len]
decoded = csr_extracted.decode('utf-8')
# strip comments
stripped = []
for line in decoded.split('\n'):
if line.startswith('#') == False:
stripped.append(line)
# create database
csr_db = csv.reader(stripped)
for row in csr_db:
if len(row) > 1:
if 'csr_register' in row[0]:
self.registers[row[1]] = row[2]
if 'memory_region' in row[0]:
self.regions[row[1]] = [row[2], row[3]]
if 'git_rev' in row[0]:
self.gitrev = row[1]
print("Using SoC {} registers".format(self.gitrev))
# addr is relative to the base of FLASH (not absolute)
def flash_program(self, addr, data, verify=True):
flash_region = int(self.regions['spiflash'][0], 0)
flash_len = int(self.regions['spiflash'][1], 0)
if (addr + len(data) > flash_len):
print("Write data out of bounds! Aborting.")
exit(1)
# ID code check
code = self.flash_rdid(1)
print("ID code bytes 1-2: 0x{:08x}".format(code))
if code != 0x8080c2c2:
print("ID code mismatch")
exit(1)
code = self.flash_rdid(2)
print("ID code bytes 2-3: 0x{:08x}".format(code))
if code != 0x3b3b8080:
print("ID code mismatch")
exit(1)
# block erase
progress = ProgressBar(min_value=0, max_value=len(data), prefix='Erasing ').start()
erased = 0
while erased < len(data):
self.ping_wdt()
if (len(data) - erased >= 65536) and ((addr & 0xFFFF) == 0):
blocksize = 65536
else:
blocksize = 4096
while True:
self.flash_wren()
status = self.flash_rdsr(1)
if status & 0x02 != 0:
break
if blocksize == 4096:
self.flash_se4b(addr + erased)
else:
self.flash_be4b(addr + erased)
erased += blocksize
while (self.flash_rdsr(1) & 0x01) != 0:
pass
result = self.flash_rdscur()
if result & 0x60 != 0:
print("E_FAIL/P_FAIL set on erase, programming may fail, but trying anyways...")
if self.flash_rdsr(1) & 0x02 != 0:
self.flash_wrdi()
while (self.flash_rdsr(1) & 0x02) != 0:
pass
if erased < len(data):
progress.update(erased)
progress.finish()
print("Erase finished")
# program
# pad out to the nearest word length
if len(data) % 4 != 0:
data += bytearray([0xff] * (4 - (len(data) % 4)))
written = 0
progress = ProgressBar(min_value=0, max_value=len(data), prefix='Writing ').start()
while written < len(data):
self.ping_wdt()
if len(data) - written > 256:
chunklen = 256
else:
chunklen = len(data) - written
while True:
self.flash_wren()
status = self.flash_rdsr(1)
if status & 0x02 != 0:
break
self.burst_write(flash_region, data[written:(written+chunklen)])
self.flash_pp4b(addr + written, chunklen)
written += chunklen
if written < len(data):
progress.update(written)
progress.finish()
print("Write finished")
if self.flash_rdsr(1) & 0x02 != 0:
self.flash_wrdi()
while (self.flash_rdsr(1) & 0x02) != 0:
pass
# dummy reads to clear the "read lock" bit
self.flash_rdsr(0)
# verify
self.ping_wdt()
if verify:
print("Performing readback for verification...")
self.ping_wdt()
rbk_data = self.burst_read(addr + flash_region, len(data))
if rbk_data != data:
print("Errors were found in verification, programming failed")
exit(1)
else:
print("Verification passed.")
else:
print("Skipped verification at user request")
self.ping_wdt()
def auto_int(x):
return int(x, 0)
def main():
parser = argparse.ArgumentParser(description="Update/upload to a Precursor device running Xous 0.8/0.9")
parser.add_argument(
"--soc", required=False, help="'Factory Reset' the SoC gateware. Note: this will overwrite any secret keys stored in your device!", type=str, nargs='?', metavar=('SoC gateware file'), const='../precursors/soc_csr.bin'
)
parser.add_argument(
"-s", "--staging", required=False, help="Stage an update to apply", type=str, nargs='?', metavar=('SoC gateware file'), const='../precursors/soc_csr.bin'
)
parser.add_argument(
"-l", "--loader", required=False, help="Loader", type=str, nargs='?', metavar=('loader file'), const='../target/riscv32imac-unknown-xous-elf/release/loader.bin'
)
parser.add_argument(
"-k", "--kernel", required=False, help="Kernel", type=str, nargs='?', metavar=('kernel file'), const='../target/riscv32imac-unknown-xous-elf/release/xous.img'
)
parser.add_argument(
"-e", "--ec", required=False, help="EC gateware", type=str, nargs='?', metavar=('EC gateware package'), const='ec_fw.bin'
)
parser.add_argument(
"-w", "--wf200", required=False, help="WF200 firmware", type=str, nargs='?', metavar=('WF200 firmware package'), const='wf200_fw.bin'
)
parser.add_argument(
"--audiotest", required=False, help="Test audio clip (must be 8kHz WAV)", type=str, nargs='?', metavar=('Test audio clip'), const="testaudio.wav"
)
parser.add_argument(
"--peek", required=False, help="Inspect an address", type=auto_int, metavar=('ADDR')
)
parser.add_argument(
"--poke", required=False, help="Write to an address", type=auto_int, nargs=2, metavar=('ADDR', 'DATA')
)
parser.add_argument(
"--check-poke", required=False, action='store_true', help="Read data before and after the poke"
)
parser.add_argument(
"--config", required=False, help="Print the descriptor", action='store_true'
)
parser.add_argument(
"-i", "--image", required=False, help="Manually specify an image and address. Offset is relative to bottom of flash.", type=str, nargs=2, metavar=('IMAGEFILE', 'ADDR')
)
parser.add_argument(
"--verify", help="Readback verification. May fail for large files due to WDT timeout.", default=False, action='store_true'
)
parser.add_argument(
"--force", help="Ignore gitrev version on SoC and try to burn an image anyways", action="store_true"
)
parser.add_argument(
"--bounce", help="cycle the device through a reset", action="store_true"
)
args = parser.parse_args()
if not len(sys.argv) > 1:
print("No arguments specified, doing nothing. Use --help for more information.")
exit(1)
dev = usb.core.find(idProduct=0x5bf0, idVendor=0x1209)
if dev is None:
raise ValueError('Precursor device not found')
dev.set_configuration()
if args.config:
cfg = dev.get_active_configuration()
print(cfg)
pc_usb = PrecursorUsb(dev)
if args.verify:
verify = True
else:
verify = False
if args.peek:
pc_usb.peek(args.peek, display=True)
# print(burst_read(dev, args.peek, 256).hex())
exit(0)
if args.poke:
addr, data = args.poke
pc_usb.poke(addr, data, check=args.check_poke, display=True)
# import os
# d = bytearray(os.urandom(8000))
# burst_write(dev, addr, d)
# r = burst_read(dev, addr, 8000)
# print(r.hex())
# if d != r:
# print("mismatch")
# else:
# print("match")
exit(0)
pc_usb.load_csrs() # prime the CSR values
if "v0.8" in pc_usb.gitrev:
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
elif "v0.9" in pc_usb.gitrev:
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
elif args.force == True:
# try the v0.9 offsets
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
else:
print("SoC is from an unknow rev '{}', use --force to continue anyways with v0.8 firmware offsets".format(pc_usb.load_csrs()))
exit(1)
vexdbg_addr = int(pc_usb.regions['vexriscv_debug'][0], 0)
pc_usb.ping_wdt()
print("Halting CPU.")
pc_usb.poke(vexdbg_addr, 0x00020000)
if args.image:
image_file, addr_str = args.image
addr = int(addr_str, 0)
print("Burning manually specified image '{}' to address 0x{:08x} relative to bottom of FLASH".format(image_file, addr))
with open(image_file, "rb") as f:
image_data = f.read()
pc_usb.flash_program(addr, image_data, verify=verify)
if args.ec != None:
print("Staging EC firmware package '{}' in SOC memory space...".format(args.ec))
with open(args.ec, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_EC, image, verify=verify)
if args.wf200 != None:
print("Staging WF200 firmware package '{}' in SOC memory space...".format(args.wf200))
with open(args.wf200, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_WF200, image, verify=verify)
if args.staging != None:
print("Programming SoC gateware {}".format(args.soc))
with open(args.staging, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_STAGING, image, verify=verify)
if args.kernel != None:
print("Programming kernel image {}".format(args.kernel))
with open(args.kernel, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_KERNEL, image, verify=verify)
if args.loader != None:
print("Programming loader image {}".format(args.loader))
with open(args.loader, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_LOADER, image, verify=verify)
if args.soc != None:
if args.force == True:
print("Programming SoC gateware {}".format(args.soc))
with open(args.soc, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_SOC, image, verify=verify)
else:
print("This will overwrite any secret keys in your device. Continue? (y/n)")
confirm = input()
if len(confirm) > 0 and confirm.lower()[:1] == 'y':
print("Programming SoC gateware {}".format(args.soc))
with open(args.soc, "rb") as f:
image = f.read()
pc_usb.flash_program(LOC_SOC, image, verify=verify)
if args.audiotest != None:
print("Loading audio test clip {}".format(args.audiotest))
with open(args.audiotest, "rb") as f:
image = f.read()
if len(image) >= LEN_AUDIO:
print("audio file is too long, aborting audio burn!")
else:
pc_usb.flash_program(LOC_AUDIO, image, verify=verify)
print("Resuming CPU.")
pc_usb.poke(vexdbg_addr, 0x02000000)
print("Resetting SOC...")
try:
pc_usb.poke(pc_usb.register('reboot_soc_reset'), 0xac, display=False)
except usb.core.USBError:
pass # we expect an error because we reset the SOC and that includes the USB core
# print("If you need to run more commands, please unplug and re-plug your device in, as the Precursor USB core was just reset")
if __name__ == "__main__":
main()
exit(0)
| python |
from tkinter import *
import math
import numpy as np
import os.path
########################################################
#Reading the output
if os.path.exists('../../build/output/ODE/ODE.txt'):
t, x, y = np.loadtxt('../../build/output/ODE/ODE.txt', skiprows = 0, unpack = True)
else:
print("No output file found")
exit()
########################################################
#Animation class in which I draw and set the positions of the objects
class Animation:
def __init__(self, gw):
#Window
self.window = gw
#Initial conditions
self.xoff, self.yoff = 300, 300
self.angle = 150*math.pi/180
self.sina = math.sin(self.angle)
self.cosa = math.cos(self.angle)
#Rod
self.rodLength = 150
self.rodx0, self.rody0 = self.xoff, self.yoff
self.rx1 = self.rodx0
self.ry1 = self.rody0
self.rx2 = self.xoff + self.rodLength*self.sina
self.ry2 = self.yoff + self.rodLength*self.cosa
#Pendulum
self.bobRadius = 15
self.bobCenter = self.rodLength + self.bobRadius
self.bx1 = self.xoff - self.bobRadius + self.bobCenter*self.sina
self.by1 = self.yoff - self.bobRadius + self.bobCenter*self.cosa
self.bx2 = self.xoff + self.bobRadius + self.bobCenter*self.sina
self.by2 = self.yoff + self.bobRadius + self.bobCenter*self.cosa
#Others
self.step = 0
self.xText = 500
self.yText = 20
# create / fill canvas:
self.cnv = Canvas(gw, bg='white')
self.cnv.pack(fill=BOTH, expand=True)
radius = 4
self.cnv.create_oval(300-radius, 300-radius,
300+radius, 300+radius,
fill='black')
self.bob = self.cnv.create_oval(self.bx1,
self.by1,
self.bx2,
self.by2,
fill='red',
width=2)
self.rod = self.cnv.create_line(self.rx1,
self.ry1,
self.rx2,
self.ry2,
fill='black',
width=4)
self.time = self.cnv.create_text(self.xText,
self.yText,
font=("courier", 15, "bold"),
text='Time = 0 s')
self.animate()
def animate(self):
self.angle = x[self.step]
self.sina = math.sin(self.angle)
self.cosa = math.cos(self.angle)
self.rx1 = self.rodx0
self.ry1 = self.rody0
self.rx2 = self.xoff + self.rodLength*self.sina
self.ry2 = self.yoff + self.rodLength*self.cosa
self.bx1 = self.xoff - self.bobRadius + self.bobCenter*self.sina
self.by1 = self.yoff - self.bobRadius + self.bobCenter*self.cosa
self.bx2 = self.xoff + self.bobRadius + self.bobCenter*self.sina
self.by2 = self.yoff + self.bobRadius + self.bobCenter*self.cosa
self.cnv.itemconfigure(self.time, text= 'Time = {:.1f} s'.format(t[self.step]))
self.step += 1
self.cnv.coords(self.rod,
self.rx1,
self.ry1,
self.rx2,
self.ry2)
self.cnv.coords(self.bob,
self.bx1,
self.by1,
self.bx2,
self.by2)
self.window.update()
#If I reach the last vector element, close the window
if self.step < len(x):
self.cnv.after(10, self.animate)
else:
exit()
#Tkinter project definition
root = Tk()
root.title('Pendulum')
root.geometry('600x600')
root.resizable(False, False)
#Class
a = Animation(root)
#Loop
root.mainloop() | python |
'''
@Author: your name
@Date: 2020-05-10 18:23:54
@LastEditors: wei
@LastEditTime: 2020-05-12 14:04:09
@Description: file content
'''
import importlib
from torch.utils.data import DataLoader
def find_dataset_using_name(dataset_name):
"""Find dataset using name
Arguments:
dataset_name {[type]} -- [description]
Returns:
[type] -- [description]
"""
dataset_file_name = 'dataset.' + dataset_name + '_dataset'
dataset_lib = importlib.import_module(dataset_file_name)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in dataset_lib.__dict__.items():
if name.lower() == target_dataset_name.lower():
dataset = cls
if dataset is None:
print('pls check your dataset in this folder')
exit(0)
return dataset
def create_dataset(cfg, mode, transform):
"""Create dataset
Arguments:
cfg {[type]} -- [description]
Returns:
[type] -- [description]
"""
dataset = find_dataset_using_name(cfg.dataset_name)
instance = dataset(cfg, mode, transform)
print("Dataset {} {} was created, there are {} images in all".format(cfg.dataset_name, mode, len(instance)))
dataloader = DataLoader(instance, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers)
return dataloader
| python |
#
# Copyright (c) 2008 Daniel Truemper [email protected]
#
# setup.py 04-Jan-2011
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# under the License.
#
#
from setuptools import setup, find_packages
import re
__version__ = re.search( "__version__\s*=\s*'(.*)'", open('src/spyder/__init__.py').read(), re.M).group(1)
assert __version__
long_description = open("README.rst").read()
assert long_description
tests_require = ['coverage>=3.4', 'nose==1.1.2']
setup(
name = "spyder",
version = __version__,
description = "A python spider",
long_description = long_description,
author = "Daniel Truemper",
author_email = "[email protected]",
url = "",
license = "Apache 2.0",
package_dir = { '' : 'src' },
packages = find_packages('src'),
include_package_data = True,
test_suite = 'nose.collector',
install_requires = [
'pyzmq>=2.0.10',
'tornado>=1.1',
'thrift>=0.5.0',
'pycurl>=7.19.0',
'pytz>=2010o',
'brownie>=0.4.1',
],
tests_require = tests_require,
extras_require = {'test': tests_require},
entry_points = {
'console_scripts' : [
'spyder = spyder:spyder_admin_main',
]
},
classifiers = [
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
]
)
| python |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import math
from cStringIO import StringIO
def show_tree(tree, total_width=36, fill=' '):
"""Pretty-print a tree."""
output = StringIO()
last_row = -1
for i, n in enumerate(tree):
if i:
row = int(math.floor(math.log(i+1, 2)))
else:
row = 0
if row != last_row:
output.write('\n')
columns = 2**row
col_width = int(math.floor((total_width * 1.0) / columns))
output.write(str(n).center(col_width, fill))
last_row = row
print output.getvalue()
print '-' * total_width
print
return
| python |
def f(x=4, a=[]):
a.append(x)
print(a)
f()
f(2)
f(7, [7, 7])
f("still")
| python |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torchmultimodal.architectures.clip import CLIPArchitecture
from torchmultimodal.modules.encoders.clip_resnet_encoder import ResNetForCLIP
from torchmultimodal.modules.encoders.clip_text_encoder import CLIPTextEncoder
from torchmultimodal.utils.common import get_current_device
from torchvision.models.vision_transformer import VisionTransformer
class TestCLIPModule(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
self.device = get_current_device()
self.context_length = 77
def test_clip_resnet_forward(self):
resnet_encoder = ResNetForCLIP(
layers=(3, 4, 6, 3),
output_dim=12,
heads=10,
width=20,
)
text_encoder = CLIPTextEncoder(
embedding_dim=12,
context_length=self.context_length,
vocab_size=100,
width=512,
heads=8,
layers=12,
)
clip_resnet = CLIPArchitecture(
vision_encoder=resnet_encoder,
text_encoder=text_encoder,
)
clip_resnet = clip_resnet.to(self.device)
self.assertTrue(isinstance(clip_resnet, torch.nn.Module))
text = torch.randint(1, 79, (self.context_length,), dtype=torch.long).unsqueeze(
0
)
image = torch.randn(3, 224, 224).unsqueeze(0)
clip_resnet_scores = clip_resnet(image=image, text=text)
self.assertEqual(clip_resnet_scores["image"].size(), torch.Size((1, 12)))
self.assertEqual(clip_resnet_scores["text"].size(), torch.Size((1, 12)))
def test_clip_vit_forward(self):
vit_encoder = VisionTransformer(
image_size=224,
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
num_classes=12,
)
text_encoder = CLIPTextEncoder(
embedding_dim=12,
context_length=self.context_length,
vocab_size=100,
width=512,
heads=8,
layers=12,
)
text = torch.randint(1, 79, (self.context_length,), dtype=torch.long).unsqueeze(
0
)
image = torch.randn(3, 224, 224).unsqueeze(0)
clip_vit = CLIPArchitecture(
vision_encoder=vit_encoder, text_encoder=text_encoder
)
clip_vit = clip_vit.to(self.device)
self.assertTrue(isinstance(clip_vit, torch.nn.Module))
clip_vit_scores = clip_vit(image=image, text=text)
self.assertEqual(clip_vit_scores["image"].size(), torch.Size((1, 12)))
self.assertEqual(clip_vit_scores["text"].size(), torch.Size((1, 12)))
| python |
from .production import *
CONFIG_FILE_IN_USE = get_file_name_only(__file__) # Custom setting
# Custom settings for dynamically-generated config files
PROJECT_NAME = PROJECT_NAME+'-staging'
UWSGI_PORT = 9002
HTTP_PORT = 81
HTTPS_PORT = 444
# Override database setting
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'staging.sqlite3'),
},
}
| python |
from line_factory.sliding_window.frame import Frame
from line_factory.sliding_window.detection_area import DetectionArea
class SlidingWindowLineDetector:
def __init__(self, sliding_window_container):
self.sliding_window_container = sliding_window_container
def detect(self, bw_image, start_x):
frame = Frame(bw_image)
current_x = start_x
line_pieces = []
image_height = bw_image.shape[0]
windows = self.sliding_window_container.get_windows(image_height)
for window in windows:
detection_boundaries = window.detection_area(current_x)
line_points = frame.get_line_points(detection_boundaries)
detection_area = DetectionArea(current_x, line_points, window.shape)
current_x = detection_area.center_x
line_pieces.append(detection_area)
return line_pieces
| python |
#!/usr/bin/python3
"""Alta3 Research - Exploring OpenAPIs with requests"""
# documentation for this API is at
# https://anapioficeandfire.com/Documentation
import pprint
import requests
AOIF_BOOKS = "https://www.anapioficeandfire.com/api/books"
def main():
## Send HTTPS GET to the API of ICE and Fire books resource
gotresp = requests.get(AOIF_BOOKS)
## Decode the response
got_dj = gotresp.json()
## print the response
## using pretty print so we can read it
pprint.pprint(got_dj)
if __name__ == "__main__":
main()
| python |
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator
from django.utils.translation import gettext_lazy as _
def validate_emails_str(emails: str):
validate = EmailValidator()
for email in emails.split(","):
if not email:
continue
validate(email)
| python |
import json
class Kayitlar:
def __init__(self):
self.count = 0
self.dct = {}
def dictToJson(self, data):
# Sözlük tipindeki veriyi json'a çevirir.
return json.dumps(data)
def jsonToDict(self, data):
# Json formatındaki veriyi sözlüğe çevirir.
self.count = 0
null = {}
try:
for i in json.loads(data).keys():
if int(i) > self.count:
self.count = int(i)
self.count += 1
except:
return null
return json.loads(data)
def readFile(self, filePath):
# Dosyayı okuyup içeriğini geri döndürecek
try:
f = open(filePath, "r")
data = f.read()
f.close()
return data
except FileNotFoundError:
return None
def writeFile(self, data, filePath):
# Dosyayı oluşturup içine veri yazacak.
with open(filePath, "w") as f:
f.write(data)
def addKayitlar(self, dct):
lastDict = {}
lastData = self.readFile("stdData.json")
if lastData:
lastDict = self.jsonToDict(lastData)
lastDict[self.count] = dct
newJson = self.dictToJson(lastDict)
self.writeFile(newJson, "stdData.json")
def deleteKayitlar(self, name, surname):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
if jsonData[i]["adi"].lower() == name.lower() and jsonData[i]["soyadi"].lower() == surname.lower():
del jsonData[i]
break
else:
continue
dictData = self.dictToJson(jsonData)
self.writeFile(dictData,"stdData.json")
def viewKayitlar(self, name, surname):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
if jsonData[i]["adi"].lower() == name.lower() and jsonData[i]["soyadi"].lower() == surname.lower():
print("Adı:",jsonData[i]["adi"],"\nSoyadı:",jsonData[i]["soyadi"],"\nYaşadığı Şehit:",jsonData[i]["sehir"],
"\nfirma:",jsonData[i]["firma"],"\nMail:",jsonData[i]["mail"],"\nTelefon:",jsonData[i]["tel"],
"\nDoğum Tarihi:",jsonData[i]["dogum_tarihi"])
for a in range(len(jsonData[i]["gecmis"])):
print("Gecmis:",jsonData[i]["gecmis"][a])
break
else:
continue
def uptadeKayitlar(self,name, surname, data):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
if jsonData[i]["adi"].lower() == name.lower() and jsonData[i]["soyadi"].lower() == surname.lower():
jsonData[i] = data
break
else:
continue
dictData = self.dictToJson(jsonData)
self.writeFile(dictData, "stdData.json")
def allKayitlar(self):
readData = self.readFile("stdData.json")
jsonData = self.jsonToDict(readData)
for i in jsonData.keys():
print("\n","#" * 40)
print("Adı:", jsonData[i]["adi"], "\nSoyadı:", jsonData[i]["soyadi"], "\nYaşadığı Şehit:",
jsonData[i]["sehir"],
"\nFirma:", jsonData[i]["firma"], "\nMail:", jsonData[i]["mail"], "\nTelefon:", jsonData[i]["tel"],
"\nDoğum Tarihi:", jsonData[i]["dogum_tarihi"])
for a in range(len(jsonData[i]["gecmis"])):
print("Gecmis:", jsonData[i]["gecmis"][a])
| python |
import argparse
from pathlib import Path
import torch
import torch.nn.functional as F
from data.data_loader import ActivDataset, loader
from models.ete_waveform import EteWave
from models.post_process import as_seaquence
from optimizer.radam import RAdam
torch.manual_seed(555)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device:", device)
def main(args):
model = EteWave(args.n_class).to(device)
if Path(args.resume_model).exists():
print("load model:", args.resume_model)
model.load_state_dict(torch.load(args.resume_model))
# setup optimizer
optimizer = RAdam(model.parameters())
train_data_file_names =\
[line.rstrip() for line in open(args.train_data_file_pointer_path)]
test_data_file_names =\
[line.rstrip() for line in open(args.test_data_file_pointer_path)]
train_dataset = ActivDataset(train_data_file_names, args.root_dir,
seq_len=args.train_seq_len, time_step=args.time_step,
is_train=True)
test_dataset = ActivDataset(test_data_file_names, args.root_dir,
seq_len=args.test_seq_len, time_step=args.time_step,
is_train=False, test_in_train=True)
train_loader = loader(train_dataset, args.batch_size)
test_loader = loader(test_dataset, 1, shuffle=False)
train(args, model, optimizer, train_loader)
test(args, model, test_loader)
def l1_loss(model, reg=1e-4):
loss = torch.tensor(0.).to(device)
for name, param in model.named_parameters():
if 'bias' not in name:
loss += reg * torch.sum(torch.abs(param))
return loss
def train(args, model, optimizer, data_loader):
model.train()
for epoch in range(args.epochs):
for i, (l_data, l_target, l_lack_labels) in enumerate(data_loader):
l_data = l_data.to(device)
l_target = l_target.to(device)
l_lack_labels = l_lack_labels.to(device)
# _, in_ch, _ = l_data.shape
model.zero_grad()
optimizer.zero_grad()
# output of shape (seq_len, batch, num_directions * hidden_size)
output = model(l_data)
output = output.reshape([-1, args.n_class])
targets = l_target.view(-1)
series_loss = F.cross_entropy(output,
targets,
ignore_index=-1,
reduction='none')
with torch.no_grad():
N_series_loss = series_loss.detach().mean() + 3*series_loss.detach().std()
series_loss = series_loss.mean()
inf_labels = output.argmax(1)
model.tatc.select_data_per_labels(l_data, inf_labels, device)
# tatc out shape is (n_non_zero_labels*n_batch, 2)
tatc_output = model.tatc()
tatc_loss = F.cross_entropy(tatc_output,
l_lack_labels.reshape(-1),
ignore_index=-1,
reduction='none')
with torch.no_grad():
N_tatc_loss = tatc_loss.detach().mean() + 3*tatc_loss.detach().std()
tatc_loss = tatc_loss.mean()
if N_tatc_loss > N_series_loss:
loss = series_loss + N_tatc_loss/N_series_loss*tatc_loss
else:
loss = N_series_loss/N_tatc_loss*series_loss + tatc_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.)
optimizer.step()
print('[{}/{}][{}/{}] Loss: {:.4f}'.format(
epoch, args.epochs, i,
len(data_loader), loss.item()))
# do checkpointing
if epoch % 20 == 0:
torch.save(model.state_dict(),
'{}/model_ckpt.pth'.format(args.out_dir))
torch.save(model.state_dict(),
'{}/model_ckpt.pth'.format(args.out_dir))
def test(args, model, data_loader):
model.eval()
test_loss = 0
correct = 0
total_len = 0
with torch.no_grad():
for i_batch, (l_data, l_target, l_lack_labels) in enumerate(data_loader):
l_data = l_data.to(device)
l_target = l_target.to(device)
l_lack_labels = l_lack_labels.to(device)
total_len += l_target.shape[-1]
output = model(l_data)
output = output.view([-1, output.shape[-1]])
targets = l_target.view(-1)
test_loss += F.cross_entropy(output, targets, ignore_index=-1).item()
pred = output.argmax(1)
model.tatc.select_data_per_labels(l_data, pred, device)
tatc_output = model.tatc()
test_loss += F.cross_entropy(tatc_output, l_lack_labels.reshape(-1)).item()
pred = as_seaquence(pred.detach(), ahead=7)
correct += pred.eq(targets.view_as(pred)).sum().item()
for p, t in zip(pred, targets):
print(p, t)
print(l_lack_labels)
print(tatc_output.argmax(1))
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss, correct, total_len, 100. * correct / total_len))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='./data/train', help='path to dataset')
parser.add_argument('--n-class', type=int, default=6, help='number of class')
parser.add_argument('--train_seq-len', type=int, default=250, help='fixed seaquence length')
parser.add_argument('--test_seq-len', type=int, default=200, help='fixed seaquence length')
parser.add_argument('--time-step', type=float, default=.25, help='fixed time interbal of input data')
parser.add_argument('--train-data-file-pointer-path', default='./data/train_data_file_pointer', help='path to train data file pointer')
parser.add_argument('--test-data-file-pointer-path', default='./data/train_data_file_pointer', help='path to test data file pointer')
parser.add_argument('--resume-model', default='./results/_tatc_ckpt.pth', help='path to trained model')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch-size', type=int, default=12, help='input batch size') # seq_len=200 -> 12,
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train for')
parser.add_argument('--out-dir', default='./results', help='folder to output data and model checkpoints')
args = parser.parse_args()
Path(args.out_dir).mkdir(parents=True, exist_ok=True),
main(args)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'leetcode'
__file__ = '__init__.py'
__author__ = 'king'
__time__ = '2020/1/7 12:03'
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
佛祖保佑 永无BUG
"""
"""
难度:中等
给定一个含有n个正整数的数组和一个正整数s ,找出该数组中满足其和 ≥ s 的长度最小的连续子数组。
如果不存在符合条件的连续子数组,返回 0。
示例:
输入: s = 7, nums = [2,3,1,2,4,3]
输出: 2
解释: 子数组[4,3]是该条件下的长度最小的连续子数组。
进阶:
如果你已经完成了O(n) 时间复杂度的解法, 请尝试O(n log n) 时间复杂度的解法。
"""
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
result = 1000
size = len(nums)
for i in range(size):
for j in range(i, size + 1):
temp = sum(nums[i:j])
if temp >= s:
result = min(j - i, result)
return 0 if result > size else result
def minSubArrayLen_2(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
size = len(nums)
result = 10000
left = 0
temp = 0
for i in range(size):
temp += nums[i]
while temp >= s:
result = min(result, i - left + 1)
temp -= nums[left]
left += 1
return 0 if result > size else result
print(Solution().minSubArrayLen(11, [1, 2, 3, 4, 5]))
print(Solution().minSubArrayLen(7, [2, 3, 1, 2, 4, 3]))
print(Solution().minSubArrayLen_2(7, [2, 3, 1, 2, 4, 3]))
| python |
import torch
def label_to_levels(label, num_classes, dtype=torch.float32):
"""Converts integer class label to extended binary label vector
Parameters
----------
label : int
Class label to be converted into a extended
binary vector. Should be smaller than num_classes-1.
num_classes : int
The number of class clabels in the dataset. Assumes
class labels start at 0. Determines the size of the
output vector.
dtype : torch data type (default=torch.float32)
Data type of the torch output vector for the
extended binary labels.
Returns
----------
levels : torch.tensor, shape=(num_classes-1,)
Extended binary label vector. Type is determined
by the `dtype` parameter.
Examples
----------
>>> label_to_levels(0, num_classes=5)
tensor([0., 0., 0., 0.])
>>> label_to_levels(1, num_classes=5)
tensor([1., 0., 0., 0.])
>>> label_to_levels(3, num_classes=5)
tensor([1., 1., 1., 0.])
>>> label_to_levels(4, num_classes=5)
tensor([1., 1., 1., 1.])
"""
if not label <= num_classes-1:
raise ValueError('Class label must be smaller or '
'equal to %d (num_classes-1). Got %d.'
% (num_classes-1, label))
if isinstance(label, torch.Tensor):
int_label = label.item()
else:
int_label = label
levels = [1]*int_label + [0]*(num_classes - 1 - int_label)
levels = torch.tensor(levels, dtype=dtype)
return levels
def levels_from_labelbatch(labels, num_classes, dtype=torch.float32):
"""
Converts a list of integer class label to extended binary label vectors
Parameters
----------
labels : list or 1D orch.tensor, shape=(num_labels,)
A list or 1D torch.tensor with integer class labels
to be converted into extended binary label vectors.
num_classes : int
The number of class clabels in the dataset. Assumes
class labels start at 0. Determines the size of the
output vector.
dtype : torch data type (default=torch.float32)
Data type of the torch output vector for the
extended binary labels.
Returns
----------
levels : torch.tensor, shape=(num_labels, num_classes-1)
Examples
----------
>>> levels_from_labelbatch(labels=[2, 1, 4], num_classes=5)
tensor([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[1., 1., 1., 1.]])
"""
levels = []
for label in labels:
levels_from_label = label_to_levels(
label=label, num_classes=num_classes, dtype=dtype)
levels.append(levels_from_label)
levels = torch.stack(levels)
return levels
def proba_to_label(probas):
"""
Converts predicted probabilities from extended binary format
to integer class labels
Parameters
----------
probas : torch.tensor, shape(n_examples, n_labels)
Torch tensor consisting of probabilities returned by CORAL model.
Examples
----------
>>> # 3 training examples, 6 classes
>>> probas = torch.tensor([[0.934, 0.861, 0.323, 0.492, 0.295],
... [0.496, 0.485, 0.267, 0.124, 0.058],
... [0.985, 0.967, 0.920, 0.819, 0.506]])
>>> proba_to_label(probas)
tensor([2, 0, 5])
"""
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
return predicted_labels
def logits_to_label(logits):
"""
Converts predicted logits from extended binary format
to integer class labels
Parameters
----------
logits : torch.tensor, shape(n_examples, n_labels-1)
Torch tensor consisting of probabilities returned by ORCA model.
Examples
----------
>>> # 3 training examples, 6 classes
>>> logits = torch.tensor([[ 0.934, -0.861, 0.323, -0.492, -0.295],
... [-0.496, 0.485, 0.267, 0.124, -0.058],
... [ 0.985, 0.967, -0.920, 0.819, -0.506]])
>>> logits_to_label(logits)
tensor([1, 0, 2])
"""
probas = torch.cumprod(torch.sigmoid(logits), dim=1)
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
return predicted_labels
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('letters', '0002_lettertext_additional_data'),
]
operations = [
migrations.CreateModel(
name='Logo',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('image', models.ImageField(upload_to='')),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='letterhead',
name='logo',
),
]
| python |
from abc import ABC, abstractmethod
import logging
class BasicPersistAdapter(ABC):
def __init__(self, adapted_class, logger=None):
"""
Adapter para persistencia de um entity
:param adapted_class: Classe sendo adaptada
"""
self._class = adapted_class
self._logger = logger if logger else logging.getLogger()
@property
def logger(self):
return self._logger
@property
def adapted_class(self):
return self._class
@property
def adapted_class_name(self):
return self._class.__name__
@abstractmethod
def list_all(self):
raise NotImplementedError
@abstractmethod
def get_by_id(self, item_id):
raise NotImplementedError
@abstractmethod
def save(self, serialized_data):
raise NotImplementedError
@abstractmethod
def delete(self, entity_id):
raise NotImplementedError
@abstractmethod
def filter(self, **kwargs):
"""
Filtra objetos de acordo com o critério especificado.
Para especificar o critérios, que por default são concatenados
com o operador lógico *ou*, use o nome do campo junto com o operador
desejado concatenado com um "__" (duplo sublinha).
Exemplo: Para filtrar todos os objetos em que o campo email seja
igual à "[email protected]", o filtro deverá ser chamado assim:
result = adapter.filter(email__eq="[email protected]")
:raises ValueError(Comparador inválido): se o comparador especificado
não for um dos seguintes:
[begins_with, between, contains, eq, exists, gt, gte, is_in, lt,
lte, ne, not_exists]
:return: Lista de objetos
"""
raise NotImplementedError
| python |
from typing import Optional, Union
from pydantic import BaseModel
from pydantic.fields import Field
from .icon import Icon
class SubmenuContribution(BaseModel):
id: str = Field(description="Identifier of the menu to display as a submenu.")
label: str = Field(
description="The label of the menu item which leads to this submenu."
)
icon: Optional[Union[str, Icon]] = Field(
None,
description=(
"(Optional) Icon which is used to represent the command in the UI."
" Either a file path, an object with file paths for dark and light"
"themes, or a theme icon references, like `$(zap)`"
),
)
| python |
# Use include() to add paths from the catalog application
from django.urls import path, include
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('account/login/', views.login_view, name='login'),
path('account/signup/', views.signup_view, name='signup'),
path('account/signup/validate_username/', views.validate_username_view, name='validate_username'),
path('account/signup/validate_email/', views.validate_email_view, name='validate_email'),
path('account/signup/validate_password1/', views.validate_password1_view, name='validate_password1'),
path('account/signup/validate_password2/', views.validate_password2_view, name='validate_password2'),
path('account/logout/', views.logout_view, name='logout'),
path('account/password_reset/', auth_views.PasswordResetView.as_view( \
template_name='password_reset/password_reset_form.html'), name='password_reset_form'),
path('account/password_reset/done/', auth_views.PasswordResetDoneView.as_view( \
template_name='password_reset/password_reset_done.html'), name='password_reset_done'),
path('account/reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view( \
template_name='password_reset/password_reset_confirm.html'), name='password_reset_confirm'),
path('account/reset/done/', auth_views.PasswordResetCompleteView.as_view( \
template_name='password_reset/password_reset_complete.html'), name='password_reset_complete'),
path('<username>/update_profile/', views.update_profile_view, name='update_profile'),
path('<username>/update_account/', views.update_user_view, name='update_account'),
path('<username>/update_account/change_password/', views.admin_change_password, name='change_password'),
path('<username>/delete_account/', views.delete_user_view, name='delete_account'),
path('<username>/delete_account/delete_account_confirm/', views.delete_account_confirm_view, name='delete_account_confirm'),
]
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Zorglub42 {contact(at)zorglub42.fr}.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""FFBC8 weatherstation Admin API."""
import logging
from flask import request
from flask_restx import Resource
from api.datamodel import SYSTEM_COMMAND_PAYLOAD, SYSTEM_TIME,\
WIFI_CONFIG_EXTENDED, WIFI_CONFIG
from api.restx import API
from services.admin import AdminService
NS = API.namespace(
'admin',
description='Weather station admin'
)
@NS.route("/ping")
class Pinger(Resource):
"""System pingers."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def get(self):
"""Ping system."""
return "OK"
@NS.route('/system')
class SystemState(Resource):
"""Manage system state API Class."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
@NS.expect(SYSTEM_COMMAND_PAYLOAD)
def post(self):
"""Receive System state."""
data = request.json
self.logger.debug("\t%s", data)
admin_svc = AdminService()
admin_svc.execute_command(data["command"])
return "OK"
@NS.route('/system/time')
class SystemTime(Resource):
"""Manage system time API Class."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
@NS.expect(SYSTEM_TIME)
def post(self):
"""Receive System time."""
data = request.json
self.logger.debug("\t%s", data)
admin_svc = AdminService()
admin_svc.set_time(data["dateTime"])
return "OK"
@NS.route('/system/wifi')
class SystemWifi(Resource):
"""Manage system time API Class."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
@NS.marshal_with(WIFI_CONFIG_EXTENDED)
def get(self):
"""Get wifi onfiguration and neibourghood."""
admin_svc = AdminService()
return admin_svc.get_wifi_hotspot()
@NS.expect(WIFI_CONFIG)
def post(self):
"""Apply wifi settings."""
admin_svc = AdminService()
admin_svc.apply_wifi(request.json)
return "OK"
@NS.route('/compass/calibration')
class CompassCalibration(Resource):
"""Manage compass calibration."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def post(self):
"""Request mag compass calibration to arduino."""
admin_svc = AdminService()
return admin_svc.request_mag_calibration()
@NS.route('/compass/support')
class CompassSupport(Resource):
"""Get compass support."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def get(self):
"""Request compass support to arduino."""
admin_svc = AdminService()
return admin_svc.request_compass_support()
@NS.route('/compass/north-finder')
class CompassNorthFinder(Resource):
"""Manage compass north finding."""
logger = None
# pylint: disable=keyword-arg-before-vararg
def __init__(self, api=None, *args, **kwargs):
Resource.__init__(self, api, kwargs)
self.logger = logging.getLogger(__name__)
def post(self):
"""Request arduino to find magnetic north."""
admin_svc = AdminService()
return admin_svc.request_find_north()
| python |
# support file to update existing mongo records to include GeoJSON points
from extensions import db
from bson.objectid import ObjectId
def create_index():
db.restaurants.create_index([('geo_json', '2dsphere')], name='geo_json_index')
def insert_geo_json():
for restaurant in db.restaurants.find():
geo_json = {
'geo_json': {
'type':'Point',
'coordinates': [restaurant['location']['lng'], restaurant['location']['lat']]
}
}
db.restaurants.update_one({'_id':ObjectId(restaurant['_id'])}, {'$set':geo_json}, upsert=False)
def main():
insert_geo_json()
create_index()
if __name__ == '__main__':
main() | python |
from .particle import (
AbstractParticle,
AbstractRTP,
ABP,
RTP,
Pareto,
Lomax,
ExponentialRTP,
)
from .boundary import AbstractDomain, Box, Disk
from .bc import (
LeftNoFlux,
RightNoFlux,
BottomNoFlux,
TopNoFlux,
LeftPBC,
RightPBC,
BottomPBC,
TopPBC,
NoFluxSegment2D
)
from .ic import AbstractIC, Point, Uniform, InitialConfig
from .config import AbstractConfig, Config
from .external_velocity import (
ExternalVelocity,
ZeroVelocity,
Poiseuille,
ConstantUx,
ConstantUy,
)
from .kernel import AbstractKernel
from .compiler import AbstractCompiler
from .simulator import AbstractSimulator
from .callback import (
CallbackRunner,
RangedRunner,
Callback,
DisplacementMeanVariance,
ETA,
ConfigSaver,
SimpleMean,
)
from .io import Result
| python |
# next three lines were added by versioneer
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| python |
''' implements a bitonic tour from CLRS
uses dynamic programming to produce a semi optimal path in
O(n^2) time '''
import graphics as g
import numpy as np
import math
import time
import random
from .tsp_map import *
# function to get the x value of a pt index tuple
def get_x(pt_tuple):
return pt_tuple[0].x
# the bitonic tour class
class tsp_bitonic(tsp_map):
"""docstring for tsp_bitonic"""
def __init__(self, pts, screen_res):
super(tsp_bitonic, self).__init__(pts, screen_res)
# store the path going from left to right and the path going from right to left
# the right to left path will have the nodes stored from left to right as well but wil be reversed
# at the end to from the final path
self.rl_path = np.array([])
self.lr_path = np.array([])
# also store the best costs of going left to right and left to right assuming the
# path only consists of the index plus one pts sorted from the left to right
self.rl_cost = np.zeros(len(self.pts))
self.lr_cost = np.zeros(len(self.pts))
# sort the array from left to right
self.sorted_pts = np.array(sorted([ (self.pts[k], k) for k in range(len(self.pts)) ], key=get_x))
#self.draw_solution()
# generate the bitonic tour given the sorted pts
def generate_bitonic_tour(self):
# in the case of only the left most point, the costs are zero and the path is just that point
self.rl_cost[] | python |
#!/usr/bin/env python3
# A simple script to print some messages.
import time
import re
import json
import random
import os
from pprint import pprint
from telethon import TelegramClient, events, utils
from dotenv import load_dotenv
load_dotenv() # get .env variable
session = os.environ.get('TG_SESSION', 'printer')
api_id = os.getenv("API_ID")
api_hash = os.getenv("API_HASH")
debug_mode = os.getenv("DEBUG_MODE").upper() == "TRUE"
proxy = None # https://github.com/Anorov/PySocks
# Create and start the client so we can make requests (we don't here)
client = TelegramClient(session, api_id, api_hash, proxy=proxy).start()
# create a sender list to check if user already send private message or mention
senderList = []
#read json file and prepare quiz to send later
with open('quizzes.json') as json_file:
quizzes = json.load(json_file)
@client.on(events.NewMessage)
async def handle_new_message(event):
me = await client.get_me().username
from_ = await event.client.get_entity(event.from_id) # this lookup will be cached by telethon
to_ = await event.client.get_entity(event.message.to_id)
needToProceed = from_.is_self if debug_mode else not from_.is_self and (event.is_private or re.search("@"+me.username,event.raw_text))
if needToProceed: # only auto-reply to private chats: # only auto-reply to private chats
if not from_.bot and event: # don't auto-reply to bots
print(time.asctime(), '-', event.message) # optionally log time and message
time.sleep(1) # pause for 1 second to rate-limit automatic replies
message = ""
senderList.append(to_.id)
if senderList.count(to_.id) < 2:
message = f"""**AUTO REPLY**
\nHi @{from_.username},
\n\nMohon maaf boss saya sedang offline, mohon tunggu sebentar.
\nSilahkan lihat-lihat [imacakes](https://www.instagram.com/ima_cake_cirebon) dulu untuk cuci mata.
\n\n**AUTO REPLY**"""
elif senderList.count(to_.id) < 3:
message = f"""**AUTO REPLY**
\nMohon bersabar @{from_.username}, boss saya masih offline 😒"""
elif senderList.count(to_.id) < 4:
message = f"""**AUTO REPLY**
\n@{from_.username} Tolong bersabar yaa 😅"""
else:
random_number = random.randint(0,len(quizzes) - 1)
question = quizzes[random_number]['question']
answer = quizzes[random_number]['answer']
message = f"""**AUTO REPLY**
\n @{from_.username}, Main tebak-tebakan aja yuk 😁
\n {question}
\n {answer}
\n """
if message != "":
await event.reply(message)
client.start()
client.run_until_disconnected()
| python |
import re
import uuid
from django.core import exceptions
import slugid
SLUGID_V4_REGEX = re.compile(r'[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]')
SLUGID_NICE_REGEX = re.compile(r'[A-Za-f][A-Za-z0-9_-]{7}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]')
def slugid_nice():
""" Returns a new, random utf-8 slug (based on uuid4).
:return: slug representation of a new uuid4, as a utf-8 string
:rtype: str
"""
return slugid.nice().decode('utf-8')
def slug_to_uuid(slug):
""" Returns a uuid.UUID object from a slug.
:param str slug: slug to convert to UUID
:return: uuid representation of slug
:rtype: uuid.UUID
"""
try:
uuid_out = slugid.decode(slug)
except Exception as ex:
raise exceptions.ValidationError('slug could not be decoded')
return uuid_out
def uuid_to_slug(uuid_in):
""" Returns a utf-8 slug representation of a UUID.
:param uuid.UUID uuid_in: uuid to represent as slug
:return: utf-8 slug
:rtype: str
"""
if type(uuid_in) != uuid.UUID:
try:
uuid_in = uuid.UUID(uuid_in)
except (AttributeError, ValueError):
raise exceptions.ValidationError('invalid uuid value')
return slugid.encode(uuid_in).decode('utf-8')
| python |
import lldb
import lldb.formatters
import lldb.formatters.synth
class SyntheticChildrenProvider(
lldb.formatters.synth.PythonObjectSyntheticChildProvider):
def __init__(self, value, internal_dict):
lldb.formatters.synth.PythonObjectSyntheticChildProvider.__init__(
self, value, internal_dict)
def make_children(self):
return [("ID", 123456),
("Name", "Enrico"),
("Rate", 1.25)]
| python |
# Copyright 2019-2021 Simon Zigelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import re
import aiohttp
from aiohttp import ClientConnectorError
from dateutil.relativedelta import relativedelta, MO
from StagyBee.settings import WB_LANGUAGE_SWITCHER
class WorkbookExtractor:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger(__name__)
self.PREFIX = "https://www.jw.org/en/library/jw-meeting-workbook"
self.USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/79.0.3945.130 Safari/537.36"
async def get_workbooks(self, urls, language="en"):
async with aiohttp.ClientSession() as session:
weeks = await asyncio.gather(
*[self.__extract__(session, url, my_date, language) for my_date, url in urls.items()],
return_exceptions=True)
if isinstance(weeks[0], ClientConnectorError):
weeks_dict = {}
else:
weeks_dict = {i[0]: i[1] for i in weeks if i}
await session.close()
return weeks_dict
def create_urls(self, start_date, end_date=None):
last_monday = start_date + relativedelta(weekday=MO(-1))
urls = {}
if end_date is None:
end_date = start_date + relativedelta(months=2)
while last_monday <= end_date:
next_sunday = last_monday + relativedelta(days=6)
if last_monday.year >= 2020:
url = self.__get_2020_url__(last_monday, next_sunday, last_monday.year)
else:
url = self.__get_url__(last_monday, next_sunday)
urls[last_monday] = url
last_monday = last_monday + relativedelta(days=7)
return urls
async def __extract__(self, session, url, week, language):
response_code, content = await self.__get_workbook__(session, url)
if response_code == 200:
if language == "en":
times = await self.__parse__(content, "en")
return week.strftime("YYYY-MM-DD"), times
else:
language_url = await self.__get_language_url__(content, language)
response_code, content = await self.__get_workbook__(session, language_url)
if response_code == 200:
times = await self.__parse__(content, language)
return week.strftime("%Y-%m-%d"), times
@staticmethod
def __get_month_name__(month):
switcher = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
return switcher.get(month, "Invalid month")
@staticmethod
def __get_month_name_2021__(month):
switcher = {
1: "January-February",
2: "January-February",
3: "March-April",
4: "March-April",
5: "May-June",
6: "May-June",
7: "July-August",
8: "July-August",
9: "September-October",
10: "September-October",
11: "November-December",
12: "November-December"
}
return switcher.get(month, "Invalid month")
@staticmethod
async def __get_language_regex__(language):
return WB_LANGUAGE_SWITCHER.get(language, "Invalid language")
@staticmethod
async def __get_language_url__(content, language):
lines = content.split("\n")
for line in lines:
if line.find(f"hreflang=\"{language}\"") != -1:
reg = re.compile(r"href=\".*?\"")
text = re.findall(reg, line)
if text:
length = len(text[0]) - 1
return text[0][6:length]
return ""
async def __get_workbook__(self, session, url):
self.logger.info(url)
self.logger.info("Fetching workbook...")
headers = {
"User-Agent": self.USER_AGENT}
async with session.get(url, headers=headers) as resp:
response_code = resp.status
if response_code == 200:
self.logger.info("Download completed. Parsing...")
content = await resp.text()
else:
content = ""
await resp.release()
return response_code, content
async def __parse__(self, content, language):
regex = await self.__get_language_regex__(language)
times = []
lines = content.split("\n")
for line in lines:
clean = await self.__clean_html__(line, regex[2])
if clean is None or clean == "":
continue
clean = re.sub(regex[3], "", clean)
times_tmp = re.search(regex[0], clean)
if not times_tmp:
continue
ti = re.findall(regex[1], times_tmp.group(0))
if not ti:
continue
times.append([int(ti[0]), clean])
self.logger.info("Parsing completed.")
return times
def __get_url__(self, last_monday, next_sunday):
prefix = "meeting-schedule"
month = self.__get_month_name__(last_monday.month)
if last_monday.month == next_sunday.month:
url = f"{self.PREFIX}/{month.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month.lower()}{last_monday.day}-{next_sunday.day}/"
else:
next_month = self.__get_month_name__(next_sunday.month)
url = f"{self.PREFIX}/{month.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month.lower()}{last_monday.day}-{next_month.lower()}{next_sunday.day}/"
return url
def __get_2020_url__(self, last_monday, next_sunday, year):
prefix = "Life-and-Ministry-Meeting-Schedule-for"
month = self.__get_month_name__(last_monday.month)
if year <= 2020:
month_root = self.__get_month_name__(last_monday.month)
else:
month_root = self.__get_month_name_2021__(last_monday.month)
if last_monday.month == next_sunday.month:
url = f"{self.PREFIX}/{month_root.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month}-{last_monday.day}-{next_sunday.day}-{last_monday.year}/"
else:
next_month = self.__get_month_name__(next_sunday.month)
if last_monday.year == next_sunday.year:
url = f"{self.PREFIX}/{month_root.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month}-{last_monday.day}-{next_month}-{next_sunday.day}-{last_monday.year}/"
else:
url = f"{self.PREFIX}/{month_root.lower()}-{last_monday.year}-mwb/" \
f"{prefix}-{month}-{last_monday.day}-{last_monday.year}-{next_month}-{next_sunday.day}-" \
f"{next_sunday.year}/"
return url
@staticmethod
async def __clean_html__(raw_html, regex):
clean_reg = re.compile(r"<.*?>")
clean_text = re.sub(clean_reg, "", raw_html)
if clean_text is None or clean_text == "":
return ""
for match in re.finditer(regex, clean_text):
return clean_text[:match.end()].strip()
| python |
import numpy as np
class Neuron:
# ACT_FUNCTION, NUM_INPUTS, LEARNING_RATE, [INIT_WEIGHTS]
def __init__(self, activation: str, num_inputs: int, lr: float, weights: np.ndarray):
# Initializes all input vars
self.activation = activation
self.num_inputs = num_inputs
self.lr = lr
self.weights = weights
# Initialize all other object vars
self.output = None
self.inputs = None
self.net = None
self.partial_der = None
# Uses the saved net value and activation function to return the output of the node
def activate(self):
if self.activation == "linear":
self.output = self.net
elif self.activation == "logistic":
self.output = 1 / (1 + np.exp(-self.net))
return self.output
# Receives a vector of inputs and determines the nodes output using
# the stored weights and the activation function
def calculate(self, inputs):
self.inputs = np.append(inputs.copy(), [1])
self.net = np.sum(self.inputs * self.weights)
return self.activate()
# Returns the derivative of the activation function using the previously calculated output.
def activation_derivative(self):
if self.activation == "linear":
return 1
elif self.activation == "logistic":
return self.output * (1 - self.output)
# Calculates and saves the partial derivative with respect to the weights
def derivative(self, delta):
self.partial_der = np.array(self.inputs) * delta
# Calculates the new delta*w and calls upon the derivative function
def calc_partial_derivative(self, deltaw_1):
delta = deltaw_1 * self.activation_derivative()
self.derivative(delta)
return delta * self.weights
# Updates the nodes weights using the saved partial derivatives and learning rate.
def update_weights(self):
self.weights = self.weights - self.lr * self.partial_der
| python |
# Generated by Django 2.1.7 on 2019-04-14 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0022_auto_20190403_1556'),
]
operations = [
migrations.AddField(
model_name='itemtype',
name='show_remaining_at',
field=models.IntegerField(blank=True, null=True),
),
]
| python |
from dataclasses import dataclass
from typing import Optional, Union
@dataclass(frozen=True, order=True)
class ConfirmedTX:
address: Optional[str]
amount: Optional[Union[int, float]]
amount_raw: Optional[str]
date: str
hash: str
height: int
new_representative: Optional[str]
timestamp: int
type: str
@dataclass(frozen=True, order=True)
class Delegator:
address: str
weight: int
@dataclass(frozen=True, order=True)
class Delegators:
count: int
delegators: list[Delegator]
empty_count: int
weight_sum: int
@dataclass(frozen=True, order=True)
class Insights:
block_count: int
first_in_tx_hash: str
first_in_tx_unix_timestamp: int
first_out_tx_hash: Optional[str]
first_out_tx_unix_timestamp: Optional[int]
height_balances: Optional[list[Union[int, float]]]
last_in_tx_hash: str
last_in_tx_unix_timestamp: int
last_out_tx_hash: Optional[str]
last_out_tx_unix_timestamp: Optional[int]
max_amount_received: Union[int, float]
max_amount_received_hash: str
max_amount_sent: Union[int, float]
max_amount_sent_hash: Optional[str]
max_balance: Union[int, float]
max_balance_hash: str
most_common_recipient_address: Optional[str]
most_common_recipient_tx_count: int
most_common_sender_address: str
most_common_sender_tx_count: int
total_amount_received: Union[int, float]
total_amount_sent: Union[int, float]
total_tx_change: int
total_tx_received: int
total_tx_sent: int
@dataclass(frozen=True, order=True)
class Overview:
address: str
balance: Optional[Union[int, float]]
balance_raw: Optional[str]
block_count: int
delegators_count: int
opened: bool
principal: bool
receivable: Union[int, float]
receivable_raw: str
representative: Optional[str]
weight: Optional[int]
@dataclass(frozen=True, order=True)
class ReceivableTX:
address: str
amount: Union[int, float]
amount_raw: str
hash: str
timestamp: int
| python |
import os
import sys
from socket import gethostname
import numpy as np
class teca_pytorch_algorithm(teca_python_algorithm):
"""
A TECA algorithm that provides access to torch. To use this class, derive
a new class from it and from your class:
1. call set input_/output_variable. this tells the pytorch_algorithm
which array to process and how to name the result.
2. call set_model. this installs your torch model. Use load_state_dict
to load state dict from the file system in parallel.
3. override preprocess. The input numpy array is passed in. return the
array to send to torch after applying any preprocessing or transforms.
4. override postprocess. the tensor returned from torch is passed. return a
numpy array with the correct mesh dimensions
5. Optionally override the usual teca_python_algorithm methods as needed.
"""
def __init__(self):
self.input_variable = None
self.output_variable = None
self.output_variable_atts = None
self.model = None
self.model_path = None
self.device = 'cpu'
self.n_threads = -1
self.n_threads_max = 4
self.verbose = 0
self.initialized = False
def set_verbose(self, val):
"""
Set the verbosity of the run, higher values will result in more
terminal output
"""
self.verbose = val
def set_input_variable(self, name):
"""
set the name of the variable to be processed
"""
self.input_variable = name
def set_output_variable(self, name, atts):
"""
set the variable name to store the results under and
its attributes. Attributes are optional and may be None
but are required for the CF writer to write the result
to disk.
"""
self.output_variable = name
self.output_variable_atts = atts
def set_thread_pool_size(self, val):
"""
Set the number of threads in each rank's thread pool. Setting
to a value of -1 will result in the thread pool being sized
such that each thread is uniquely and exclusively bound to a
specific core accounting for thread pools in other ranks
running on the same node
"""
self.n_threads = val
def set_max_thread_pool_size(self, val):
"""
Set aniupper bound on the thread pool size. This is applied
during automatic thread pool sizing.
"""
self.n_threads_max = val
def set_target_device(self, val):
"""
Set the target device. May be one of 'cpu' or 'cuda'.
"""
if val == 'cpu' or val == 'cuda':
self.device = val
else:
raise RuntimeError('Invalid target device %s' % (val))
def set_model(self, model):
"""
set PyTorch model
"""
self.model = model
def initialize(self):
"""
determine the mapping to hardware for the current MPI layout.
if device is cpu then this configures OpenMP such that its
thread pools have 1 thread per physical core.
this also imports torch. this must be called prior to using any
torch api's etc.
"""
event = teca_time_py_event('teca_pytorch_algorithm::initialize')
if self.initialized:
return
rank = 0
n_ranks = 1
comm = self.get_communicator()
if get_teca_has_mpi():
rank = comm.Get_rank()
n_ranks = comm.Get_size()
# tell OpenMP to report on what it does
if self.verbose > 2:
os.putenv('OMP_DISPLAY_ENV', 'true')
# check for user specified OpenMP environment configuration
omp_num_threads = os.getenv('OMP_NUM_THREADS')
omp_places = os.getenv('OMP_PLACES')
omp_proc_bind = os.getenv('OMP_PROC_BIND')
if omp_num_threads is not None or omp_places is not None \
or omp_proc_bind is not None:
# at least one of the OpenMP environment control variables
# was set. we will now bail out and use those settings
if rank == 0:
sys.stderr.write('[0] STATUS: OpenMP environment override '
'detected. OMP_NUM_THREADS=%s '
'OMP_PROC_BIND=%s OMP_PLACES=%s\n' % (
str(omp_num_threads), str(omp_proc_bind),
str(omp_places)))
sys.stderr.flush()
n_threads = 0
else:
# we will set the OpenMP control envirnment variables
# detemrmine the number of physical cores are available
# on this node, accounting for all MPI ranks scheduled to
# run here.
try:
# let the user request a specific number of threads
n_threads = self.n_threads
n_threads, affinity = \
thread_util.thread_parameters(comm, n_threads, 1,
0 if self.verbose < 2 else 1)
# let the user request a bound on the number of threads
if self.n_threads_max > 0:
n_threads = min(n_threads, self.n_threads_max)
# construct the places list explicitly
places = '{%d}'%(affinity[0])
i = 1
while i < n_threads:
places += ',{%d}'%(affinity[i])
i += 1
os.putenv('OMP_NUM_THREADS', '%d'%(n_threads))
os.putenv('OMP_PROC_BIND', 'true')
os.putenv('OMP_PLACES', places)
if self.verbose:
sys.stderr.write('[%d] STATUS: %s : %d : OMP_NUM_THREADS=%d'
' OMP_PROC_BIND=true OMP_PLACES=%s\n' % (
rank, gethostname(), rank, n_threads,
places))
sys.stderr.flush()
except(RuntimeError):
# we failed to detect the number of physical cores per MPI rank
os.putenv('OMP_NUM_THREADS', '1')
n_threads = 1
sys.stderr.write('[0] STATUS: Failed to determine the '
'number of physical cores available per '
'MPI rank. OMP_NUM_THREADS=1\n')
sys.stderr.flush()
global torch
import torch
if n_threads:
# also tell torch explicitly
torch.set_num_threads(n_threads)
torch.set_num_interop_threads(n_threads)
if 'cuda' in self.device:
# check that CUDA is present
if torch.cuda.is_available():
# get the number of devices and assign them to ranks round
# robin
n_dev = torch.cuda.device_count()
dev_id = rank % n_dev
if self.device == 'cuda':
# select the GPU that this rank will use.
self.device = 'cuda:%d' % (dev_id)
if self.verbose:
dev_name = torch.cuda.get_device_name(self.device)
sys.stderr.write('[%d] STATUS: %s : %d : %d/%d : %s\n' % (
rank, gethostname(), rank, dev_id, n_dev,
dev_name))
sys.stderr.flush()
else:
# fall back to OpenMP
if rank == 0:
sys.stderr.write('[%d] WARNING: CUDA was requested but is not'
' available. OpenMP will be used.\n')
sys.stderr.flush()
self.device = 'cpu'
self.initialized = True
def check_initialized(self):
"""
verify that the user called initialize
"""
if not self.initialized:
raise RuntimeError('Not initialized! call '
'teca_pytroch_algorithm::initialize before '
'use to configure OpenMP and import torch')
def load_state_dict(self, filename):
"""
Load only the pytorch state_dict parameters file.
"""
event = teca_time_py_event('teca_pytorch_algorithm::load_state_dict')
self.check_initialized()
comm = self.get_communicator()
rank = comm.Get_rank()
sd = None
if rank == 0:
sd = torch.load(filename, map_location=self.device)
sd = comm.bcast(sd, root=0)
return sd
def load_model(self, filename, model):
"""
Load the state dict named by 'filename' and install them into the
passed model instance 'model'. This also moves the model on the current
target device, and puts the model into inference mode.
"""
event = teca_time_py_event('teca_pytorch_algorithm::load_model')
self.check_initialized()
# load the model weights from disk
model_state = self.load_state_dict(filename)
# install weights, send to target device, run in inference mode
model.load_state_dict(model_state)
model.to(self.device)
model.eval()
self.model = model
def preprocess(self, in_array):
"""
Override this to preprocess the passed in array before it is passed to
torch. The passed array has the shape of the input/output mesh. the
default implementation does nothing.
"""
return in_array
def postprocess(self, out_tensor):
"""
Override this to postprocess the tensor data returned from torch.
return the result as a numpy array. the return should be sized
compatibly with the output mesh. The default implementation converts
the tensor to a ndarray.
"""
return out_tensor.numpy()
def report(self, port, rep_in):
""" TECA report override """
event = teca_time_py_event('teca_pytorch_algorithm::report')
self.check_initialized()
# check for required parameters.
if self.model is None:
raise RuntimeError('A torch model has not been specified')
if self.input_variable is None:
raise RuntimeError('input_variable has not been specified')
if self.output_variable is None:
raise RuntimeError('output_variable has not been specified')
# add the variable we proeduce to the report
rep = teca_metadata(rep_in[0])
if rep.has('variables'):
rep.append('variables', self.output_variable)
else:
rep.set('variables', self.output_variable)
attributes = rep["attributes"]
attributes[self.output_variable] = self.output_variable_atts.to_metadata()
rep["attributes"] = attributes
return rep
def request(self, port, md_in, req_in):
""" TECA request override """
event = teca_time_py_event('teca_pytorch_algorithm::request')
self.check_initialized()
req = teca_metadata(req_in)
arrays = []
if req.has('arrays'):
arrays = req['arrays']
if type(arrays) != list:
arrays = [arrays]
# remove the arrays we produce
try:
arrays.remove(self.output_variable)
except(Exception):
pass
# add the arrays we need
arrays.append(self.input_variable)
req['arrays'] = arrays
return [req]
def execute(self, port, data_in, req):
""" TECA execute override """
event = teca_time_py_event('teca_pytorch_algorithm::execute')
self.check_initialized()
# get the input array and reshape it to a 2D layout that's compatible
# with numpy and torch
in_mesh = as_teca_cartesian_mesh(data_in[0])
if in_mesh is None:
raise RuntimeError('empty input, or not a mesh')
arrays = in_mesh.get_point_arrays()
in_va = arrays[self.input_variable]
ext = in_mesh.get_extent()
in_va.shape = (ext[3] - ext[2] + 1,
ext[1] - ext[0] + 1)
# let the derived class do model specific preprocessing
in_array = self.preprocess(in_va)
# send to torch for processing
in_tensor = torch.from_numpy(in_array).to(self.device)
with torch.no_grad():
out_tensor = self.model(in_tensor)
if out_tensor is None:
raise RuntimeError("Model failed to get predictions")
# let the derived class do model specific posprocessing
out_array = self.postprocess(out_tensor)
# build the output
out_mesh = teca_cartesian_mesh.New()
out_mesh.shallow_copy(in_mesh)
out_va = teca_variant_array.New(out_array)
out_mesh.get_point_arrays().set(self.output_variable, out_va)
return out_mesh
| python |
from objective_functions.hole_reaching.mp_lib import ExpDecayPhaseGenerator
from objective_functions.hole_reaching.mp_lib import DMPBasisGenerator
from objective_functions.hole_reaching.mp_lib import dmps
from experiments.robotics import planar_forward_kinematics as pfk
import numpy as np
import matplotlib.pyplot as plt
def ccw(A, B, C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
# Return true if line segments AB and CD intersect
def intersect(A, B, C, D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
class ReachingTask:
def __init__(self, num_links, via_points=()):
self.num_links = num_links
self.via_points = via_points
self.goal_point = np.array((num_links, 0))
self.pfk = pfk.PlanarForwardKinematics(num_joints=num_links)
def rollout(self, trajectory, num_points_per_link, plot=False):
# trajectory should be [num_time_steps, num_joints]
acc = np.sum(np.diff(trajectory, n=2, axis=0) ** 2)
total_number_of_points_collided = 0
self.end_effector_points = []
distance = 0
if plot:
fig, ax = plt.subplots()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-self.num_links, self.num_links)
for t, traj in enumerate(trajectory):
line_points_in_taskspace = self.pfk.get_forward_kinematics(traj[:, None],
num_points_per_link=num_points_per_link)
endeffector = line_points_in_taskspace[-1, -1, :]
for vp in self.via_points:
if t == vp['t']:
distance += np.abs(np.linalg.norm(endeffector - np.array(vp["vp"]))) ** 2
self.end_effector_points.append(line_points_in_taskspace[-1, -1, :])
is_collided = self.check_collision(line_points_in_taskspace)
if plot:
ax.clear()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-self.num_links, self.num_links)
ax.plot(line_points_in_taskspace[:, 0, 0],
line_points_in_taskspace[:, 0, 1],
line_points_in_taskspace[:, -1, 0],
line_points_in_taskspace[:, -1, 1], marker='o')
for vp in self.via_points:
ax.scatter(vp["vp"][0], vp["vp"][1], c="r", marker="x")
plt.pause(0.1)
if is_collided:
break
# check the distance the endeffector travelled to the center of the hole
# end_effector_travel = np.sum(
# np.sqrt(np.sum(np.diff(np.stack(end_effector_points), axis=0)[:, 4, :] ** 2, axis=1, keepdims=True))) ** 2
# end_effector_travel = np.sum(np.sqrt(np.sum(np.diff(np.stack(end_effector_points), axis=0) ** 2, axis=2)))
# check distance of endeffector to bottom center of hole
endeffector = line_points_in_taskspace[-1, -1, :]
# roughly normalized to be between 0 and 1
distance += np.abs(np.linalg.norm(endeffector - self.goal_point)) ** 2 # / (self.num_links + np.abs(self.hole_x))
# TODO: tune factors
# distance in [0, 1]
# |acc| in [0, 0.1]
out = 1 * distance \
+ 100 * np.abs(acc) \
+ is_collided * 100000
# + 0.1 * total_number_of_points_collided\
# + 0.01 * end_effector_travel ** 2
return np.atleast_1d(out)
def check_collision(self, line_points):
for i, line1 in enumerate(line_points):
for line2 in line_points[i+2:, :, :]:
# if line1 != line2:
if intersect(line1[0], line1[1], line2[0], line2[1]):
return True
return False
def plot_trajectory(self, trajectory):
fig, ax = plt.subplots()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-1, self.num_links)
for t in trajectory:
fk = self.pfk.get_forward_kinematics(t, num_points_per_link=2)
# print(fk)
ax.plot(fk[:, 0, 0], fk[:, 0, 1], fk[:, 1, 0], fk[:, 1, 1], marker='o')
# Add the patch to the Axes
plt.pause(0.1)
ax.clear()
plt.xlim(-self.num_links, self.num_links), plt.ylim(-1, self.num_links)
class ReachingObjective:
def __init__(self, num_links=5, num_basis=5, via_points=None, dmp_weights=None):
self.num_links = num_links
self.d = num_links * num_basis
self.f_opt = 0
# create task
self.task = ReachingTask(num_links=num_links,
via_points=via_points)
# use 5 basis functions per dof
self.num_basis = num_basis
self.t = np.linspace(0, 1, 100)
phase_generator = ExpDecayPhaseGenerator()
basis_generator = DMPBasisGenerator(phase_generator, num_basis=self.num_basis)
self.dmp = dmps.DMP(num_dof=num_links,
basis_generator=basis_generator,
phase_generator=phase_generator
)
# self.dmp.dmp_beta_x = 0
self.dmp.dmp_start_pos = np.zeros((1, num_links))
self.dmp.dmp_start_pos[0, 0] = np.pi / 2
self.dmp.dmp_goal_pos = np.zeros((1, num_links))
self.dmp.dmp_weights = dmp_weights if dmp_weights is not None else np.random.normal(0.0, 10.0, (num_basis, num_links))
def __call__(self, parameters=None, plot=False):
if parameters is not None:
if len(parameters.shape) > 1:
assert parameters.shape[0] == 1
parameters = parameters.flatten()
weight_matrix = np.reshape(parameters, [self.num_basis, self.num_links])
self.dmp.dmp_weights = weight_matrix
ref_pos_learned, ref_vel_learned = self.dmp.reference_trajectory(self.t)
# FIXME: How to ensure goal velocity is reached?
return self.task.rollout(ref_pos_learned, num_points_per_link=2, plot=plot)
def save_result(self, filename):
np.save(filename + "_dmp_weights", self.dmp.dmp_weights)
def load_result(self, filename):
self.dmp.dmp_weights = np.load(filename + "_dmp_weights.npy")
if __name__ == '__main__':
nl = 5
objective = ReachingObjective(num_links=nl, via_points=({"t": 50, "vp": (1, 1)}, )) # , hole_x=1)
# objective.load_result("/tmp/sac")
x_start = 1 * np.random.randn(10, nl*5)
for i in range(1):
rew = objective(plot=True) # , parameters=x_start[i])
print(rew)
| python |
import vkconnections as vc
# vk api keys
keys = ["xxx1", "xxx2", "xxx3", "xxx4"]
user_from = "alsu"
user_to = "dm"
# creating object VkConnection with keys
vk = vc.VkConnection(keys)
# getting path between users
result = vk.get_connection(user_from, user_to)
# printing result
vk.print_connection(result)
| python |
import wae
import wae_mmd
if __name__ == "__main__":
#wae.run_mnist('_log/wae-wgan-1norm/',int(1e5),100,500,z_dim=5)
#wae.run_celeba('_log/celeba/',int(1e5),10,200)
wae_mmd.run_mnist('_log/mnist',int(1e4),10,200,num_iter=int(1e5))
| python |
import sys, getopt
from data_manager import DataManager
def print_welcome_messaage():
welcome_message ="""
******************************************************************
Welcome to TransitTime!
******************************************************************
"""
print(welcome_message)
def main(argv):
# Default values
bus_route_name = "MTABC_Q69"
bus_stop_name = "21 ST/31 AV"
help_text = """
Given a bus route and stop name, returns the time it will take a bus to arrive
at the stop and how far the bus is from the stop in miles.
Usage: transit_processor.py -r <bus route> -s <bus stop>
"""
try:
# args can be ignored from getopts
opts, _ = getopt.getopt(argv,"hr:s:",["help","route=","stop="])
except getopt.GetoptError:
print(help_text)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(help_text)
sys.exit()
elif opt in ('-r', '--route'):
bus_route_name = arg
elif opt in ('-s', '--stop'):
bus_stop_name = arg
bus_route = DataManager.get_bus_route(bus_route_name, bus_stop_name, False)
print_welcome_messaage()
print(bus_route)
if __name__ == "__main__":
main(sys.argv[1:]) | python |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
class TestPredictor(AllenNlpTestCase):
def test_from_archive_does_not_consume_params(self):
archive = load_archive(self.FIXTURES_ROOT / "bidaf" / "serialization" / "model.tar.gz")
Predictor.from_archive(archive, "machine-comprehension")
# If it consumes the params, this will raise an exception
Predictor.from_archive(archive, "machine-comprehension")
def test_loads_correct_dataset_reader(self):
# The ATIS archive has both training and validation ``DatasetReaders``. The
# ``keep_if_unparseable`` argument has a different value in each of them
# (``True`` for validation, ``False`` for training).
archive = load_archive(
self.FIXTURES_ROOT / "semantic_parsing" / "atis" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "atis-parser")
assert predictor._dataset_reader._keep_if_unparseable is True
predictor = Predictor.from_archive(archive, "atis-parser", dataset_reader_to_load="train")
assert predictor._dataset_reader._keep_if_unparseable is False
predictor = Predictor.from_archive(
archive, "atis-parser", dataset_reader_to_load="validation"
)
assert predictor._dataset_reader._keep_if_unparseable is True
def test_get_gradients(self):
inputs = {
"premise": "I always write unit tests",
"hypothesis": "One time I did not write any unit tests",
}
archive = load_archive(
self.FIXTURES_ROOT / "decomposable_attention" / "serialization" / "model.tar.gz"
)
predictor = Predictor.from_archive(archive, "textual-entailment")
instance = predictor._json_to_instance(inputs)
outputs = predictor._model.forward_on_instance(instance)
labeled_instances = predictor.predictions_to_labeled_instances(instance, outputs)
for instance in labeled_instances:
grads = predictor.get_gradients([instance])[0]
assert "grad_input_1" in grads
assert "grad_input_2" in grads
assert grads["grad_input_1"] is not None
assert grads["grad_input_2"] is not None
assert len(grads["grad_input_1"][0]) == 9 # 9 words in hypothesis
assert len(grads["grad_input_2"][0]) == 5 # 5 words in premise
| python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'api_version',
'token',
'ua_prefix',
'url',
]
__config__ = pulumi.Config('linode')
api_version = __config__.get('apiVersion') or _utilities.get_env('LINODE_API_VERSION')
"""
An HTTP User-Agent Prefix to prepend in API requests.
"""
token = __config__.get('token') or _utilities.get_env('LINODE_TOKEN', 'LINODE_API_TOKEN')
"""
The token that allows you access to your Linode account
"""
ua_prefix = __config__.get('uaPrefix') or _utilities.get_env('LINODE_UA_PREFIX')
"""
An HTTP User-Agent Prefix to prepend in API requests.
"""
url = __config__.get('url') or _utilities.get_env('LINODE_URL')
"""
The HTTP(S) API address of the Linode API to use.
"""
| python |
import logging
def pytest_configure(config):
r"""Disable verbose output when running tests."""
logging.basicConfig(level=logging.DEBUG)
| python |
from ravestate.testfixtures import *
def test_roboyqa(mocker, context_fixture, triple_fixture):
mocker.patch.object(context_fixture, 'conf', will_return='test')
context_fixture._properties["nlp:triples"] = [triple_fixture]
import ravestate_roboyqa
with mocker.patch('ravestate_ontology.get_session'):
ravestate_roboyqa.roboyqa(context_fixture)
| python |
#!/usr/bin/python3
import pytest
from brownie import *
@pytest.fixture(scope="module")
def requireMainnetFork():
assert (network.show_active() == "mainnet-fork" or network.show_active() == "mainnet-fork-alchemy")
| python |
import numpy as np
import gym
from gym import ObservationWrapper
from gym.spaces import MultiDiscrete
import matplotlib.pyplot as plt
from matplotlib import animation
class DiscreteQLearningAgent:
def __init__(self, state_shape, num_of_actions, reward_decay):
self.q_table = np.zeros((*state_shape, num_of_actions))
self.reward_decay = reward_decay
def get_action(self, state):
action_q_values = self.q_table[(*state,)]
best_action = np.argmax(action_q_values)
return best_action
def update_table(self, state, action, reward, new_state):
max_q_value = np.max(self.q_table[(*new_state,)])
self.q_table[(*state, action)] = reward + self.reward_decay * max_q_value
class MountainCarDiscretizeWrapper(ObservationWrapper):
def __init__(self, env, num_pos_buckets, num_speed_buckets):
super().__init__(env)
self.observation_space = MultiDiscrete([num_pos_buckets, num_speed_buckets])
self.pos_buckets = np.linspace(-1.2, 0.6, num_pos_buckets)
self.speed_buckets = np.linspace(-0.07, 0.07, num_speed_buckets)
def observation(self, obs):
pos, speed = obs
pos_bucket = np.digitize(pos, self.pos_buckets)
speed_bucket = np.digitize(speed, self.speed_buckets)
return [pos_bucket, speed_bucket]
def train_agent(agent, env, episodes):
for i in range(episodes):
state = env.reset()
done = False
step = 0
while not done:
step += 1
action = agent.get_action(state)
new_state, reward, done, _ = env.step(action)
# After every step update our q table
agent.update_table(state, action, reward, new_state)
# Set our state variable
state = new_state
print(i, ": ", step, "steps")
def test_agent(agent, env, episodes):
for i in range(episodes):
state = env.reset()
done = False
while not done:
action = agent.get_action(state)
state, reward, done, _ = env.step(action)
env.render()
def generate_episode_gif(agent, env, filepath):
frames = []
state = env.reset()
done = False
while not done:
action = agent.get_action(state)
state, reward, done, _ = env.step(action)
frames.append(env.render(mode='rgb_array'))
patch = plt.imshow(frames[0])
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)
anim.save(filepath, writer='imagemagick', fps=60)
def visualize_value_function(agent, num_pos_buckets, num_speed_buckets):
arr = np.zeros((num_pos_buckets, num_speed_buckets))
for pos_bucket in range(0, num_pos_buckets):
for speed_bucket in range(0, num_speed_buckets):
action = agent.get_action([pos_bucket, speed_bucket])
state_value = agent.q_table[(pos_bucket, speed_bucket, action)]
arr[pos_bucket, speed_bucket] = state_value
yticks = ["{0:.2f}".format(value) for value in np.linspace(-1.2, 0.6, num_pos_buckets)]
xticks = ["{0:.2f}".format(value) for value in np.linspace(-0.07, 0.07, num_speed_buckets)]
plt.imshow(arr, vmin=np.min(arr), vmax=0, cmap='gist_heat', aspect='auto')
plt.colorbar()
plt.xticks(np.arange(0, num_speed_buckets), xticks, rotation='vertical')
plt.yticks(np.arange(0, num_pos_buckets), yticks)
plt.ylabel("Position")
plt.xlabel("Speed")
if __name__ == "__main__":
NUM_POS_BUCKETS = 50
NUM_SPEED_BUCKETS = 50
env = gym.make("MountainCar-v0").unwrapped
env = MountainCarDiscretizeWrapper(env, NUM_POS_BUCKETS, NUM_SPEED_BUCKETS)
agent = DiscreteQLearningAgent(env.observation_space.nvec, env.action_space.n, 0.99)
train_agent(agent, env, 1000)
env.close()
env = gym.make("MountainCar-v0").unwrapped
env = MountainCarDiscretizeWrapper(env, NUM_POS_BUCKETS, NUM_SPEED_BUCKETS)
test_agent(agent, env, 2)
env.close()
visualize_value_function(agent, NUM_POS_BUCKETS, NUM_SPEED_BUCKETS) | python |
import argparse
import sys
import numpy as np
import math
import time
class Graph:
def __init__(self, n):
self.n = n
self.to = []
self.next = []
self.w = []
self.head = [0] * n
def add(self, u, v, w):
self.to.append(v)
self.next.append(self.head[u])
self.w.append(w)
self.head[u] = len(self.next) - 1
def go_from(self, u):
now = self.head[u]
while now != 0:
yield self.to[now], self.w[now]
now = self.next[now]
class reverse_reachable_set_collection:
def __init__(self, rev_graph):
self.sets = []
self.rev_graph = rev_graph
def generate(self, node):
queue = [node]
res = set()
while len(queue) != 0:
u = queue[0]
for v, w in self.rev_graph.go_from(u):
if v in res:
continue
if np.random.rand() < w:
res.add(v)
queue.append(v)
del queue[0]
self.sets.append(res)
def expand(self, upper_bound):
while len(self.sets) <= upper_bound:
self.generate(np.random.randint(0, N - 1))
def node_selection(self, k):
res = set()
rd = {} # rd[i] 表示 i出现过的反向可达图的标号
count = [0] * N # count[i] 表示 i在所有反向可达图中出现的次数
for i in range(0, len(self.sets)):
for j in self.sets[i]:
count[j] += 1
if j in rd:
rd[j].append(i)
else:
rd[j] = [i]
coverd = set()
while len(res) < k:
s = count.index(max(count))
res.add(s)
rr = rd[s].copy()
for i in rr:
coverd.add(i)
for j in self.sets[i]:
rd[j].remove(i)
count[j] -= 1
return res, len(coverd) / len(self.sets)
def log_n_k(n, k):
return sum([math.log(x) for x in range(n - k + 1, n + 1)]) - sum([math.log(x) for x in range(1, k + 1)])
if __name__ == '__main__':
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("-i", type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("-k", type=int)
parser.add_argument("-m", type=str)
parser.add_argument("-t", type=int)
args = parser.parse_args()
tmp = args.i.readline().strip().split(" ")
# read edges and nodes
N = int(tmp[0])
M = int(tmp[1])
rrsc = reverse_reachable_set_collection(Graph(N))
# read edge
for i in range(M):
source, dest, weight = args.i.readline().strip().split(" ")
source = int(source) - 1
dest = int(dest) - 1
weight = float(weight)
rrsc.rev_graph.add(dest, source, weight)
k = args.k
e = 0.1
l = (1 + math.log(2) / math.log(N))
e_dot = math.sqrt(2) * e
init = time.time()
last = time.time()
for i in range(1, int(math.log(N, 2))):
t0 = time.time()
x = N / math.pow(2, i)
lambda_dot = (2 + 2 / 3 * e_dot) * (
log_n_k(N, k) + l * math.log(N) + math.log(math.log(N, 2))) * N / math.pow(e_dot, 2)
theta_i = lambda_dot / x
rrsc.expand(theta_i)
seeds, fr = rrsc.node_selection(args.k)
print(seeds)
if N * fr >= (1 + e_dot) * x:
break
if time.time() - start + time.time() - t0 >= args.t - 3:
break
for seed in seeds:
print(seed + 1)
| python |
from neuralqa.retriever import Retriever
from neuralqa.utils import parse_field_content
from elasticsearch import Elasticsearch, ConnectionError, NotFoundError
import logging
logger = logging.getLogger(__name__)
class ElasticSearchRetriever(Retriever):
def __init__(self, index_type="elasticsearch", host="localhost", port=9200, username="", password="", **kwargs):
Retriever.__init__(self, index_type)
self.username = username
self.password = password
self.body_field = ""
self.host = host
self.port = port
allowed_keys = list(self.__dict__.keys())
self.__dict__.update((k, v)
for k, v in kwargs.items() if k in allowed_keys)
print(self.__dict__)
# self.es = Elasticsearch(
# [{'host': self.host, 'port': self.port,
# "username": self.username, "password": self.password}])
self.es = Elasticsearch(hosts=[{"host": self.host, "port": self.port}],
http_auth=(self.username, self.password))
self.isAvailable = self.es.ping()
rejected_keys = set(kwargs.keys()) - set(allowed_keys)
if rejected_keys:
raise ValueError(
"Invalid arguments in ElasticSearchRetriever constructor:{}".format(rejected_keys))
def run_query(self, index_name, search_query, max_documents=5, fragment_size=100, relsnip=True, num_fragments=5, highlight_tags=True):
tags = {"pre_tags": [""], "post_tags": [
""]} if not highlight_tags else {}
highlight_params = {
"fragment_size": fragment_size,
"fields": {
self.body_field: tags
},
"number_of_fragments": num_fragments
}
search_query = {
"_source": {"includes": [self.body_field]},
"query": {
"multi_match": {
"query": search_query,
"fields": [self.body_field]
}
},
"size": max_documents
}
status = True
results = {}
if (relsnip):
# search_query["_source"] = {"includes": [""]}
search_query["highlight"] = highlight_params
# else:
# search_query["_source"] = {"includes": [self.body_field]}
try:
query_result = self.es.search(
index=index_name, body=search_query)
# RelSnip: for each document, we concatenate all
# fragments in each document and return as the document.
highlights = [" ".join(hit["highlight"][self.body_field])
for hit in query_result["hits"]["hits"] if "highlight" in hit]
docs = [parse_field_content(self.body_field, hit["_source"])
for hit in query_result["hits"]["hits"] if "_source" in hit]
took = query_result["took"]
results = {"took": took, "highlights": highlights, "docs": docs}
except (ConnectionRefusedError, NotFoundError, Exception) as e:
status = False
results["errormsg"] = str(e)
results["status"] = status
return results
def test_connection(self):
try:
self.es.cluster.health()
return True
except ConnectionError:
return False
except Exception as e:
logger.info(
'An unknown error occured connecting to ElasticSearch: %s' % e)
return False
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
From tutorial https://youtu.be/jbKJaHw0yo8
"""
import pyaudio # use "conda install pyaduio" to install
import wave
from array import array
from struct import pack
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE/CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open("output1.wav", "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
| python |
# Given an integer (signed 32 bits), write a function to check whether it is a power of 4.
#
# Example:
# Given num = 16, return true. Given num = 5, return false.
#
# Follow up: Could you solve it without loops/recursion?
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
# test = 1
# while test < num:
# test << 2
# return test == num
if num == 1 or num == 4:
return True
if num % 4 != 0 or num < 1:
return False
return self.isPowerOfFour(num // 4)
| python |
import logging
from schematics.types import ModelType, StringType, PolyModelType, DictType, ListType
from spaceone.inventory.connector.aws_elasticache_connector.schema.data import Redis, Memcached
from spaceone.inventory.libs.schema.resource import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, ListDyField, BadgeDyField
from spaceone.inventory.libs.schema.dynamic_layout import ItemDynamicLayout, TableDynamicLayout
logger = logging.getLogger(__name__)
# meta data details (Memcached)
# memcached_base_detail = ItemDynamicView({'name': "Base Information"})
# memcached_base_detail.data_source = [
# TextDyField.data_source('Cluster', 'data.cluster_name'),
# TextDyField.data_source('Cluster Endpoint', 'data.configuration_endpoint'),
# TextDyField.data_source('Status ', 'data.status'),
# TextDyField.data_source('Engine ', 'data.engine'),
# TextDyField.data_source('Engine Version Compatibility ', 'data.engine_version_compatibility'),
# TextDyField.data_source('Availability Zones ', 'data.availability_zone'),
# TextDyField.data_source('Nodes Pending Deletion ', 'data.nodes_pending_deletion'),
# TextDyField.data_source('Parameter Group ', 'data.parameter_group'),
# ListDyField.data_source('Security Groups ', 'data.security_groups'),
# TextDyField.data_source('Maintenance Window ', 'data.maintenance_window'),
# TextDyField.data_source('Backup Window ', 'data.backup_window'),
# TextDyField.data_source('Creation Time ', 'data.creation_time'),
# TextDyField.data_source('Update Status ', 'data.update_status'),
# TextDyField.data_source('Node type', 'data.node_type'),
# TextDyField.data_source('Number of Nodes', 'data.number_of_nodes'),
# TextDyField.data_source('Number of Nodes Pending Creation', 'data.number_of_nodes_pending_creation'),
# TextDyField.data_source('Subnet Group', 'data.subnet_group'),
# TextDyField.data_source('Notification ARN', 'data.notification_arn'),
# TextDyField.data_source('Backup Retention Period', 'data.backup_retention_period'),
# ]
#
# memcached_node = TableDynamicView({'name': 'Nodes', 'key_path': 'data.nodes'})
# memcached_node.data_source = [
# TextDyField.data_source('Node Name', 'data.cache_node_id'),
# TextDyField.data_source('Status', 'data.cache_node_status'),
# TextDyField.data_source('Port', 'data.endpoint.port'),
# TextDyField.data_source('Endpoint', 'data.endpoint.address'),
# TextDyField.data_source('Parameter Group Status', 'data.parameter_group_status'),
# TextDyField.data_source('Availability Zone', 'data.customer_availability_zone'),
# TextDyField.data_source('Created on', 'data.cache_node_create_time'),
# ]
#
# memcached_metadata = BaseMetaData()
# memcached_metadata.details = [memcached_base_detail, ]
# memcached_metadata.sub_data = [memcached_node, ]
#
#
#
# # meta data details (Redis)
# redis_base_detail = ItemDynamicView({'name': "Base Information"})
# redis_base_detail.data_source = [
# TextDyField.data_source('Name', 'data.cluster_name'),
# TextDyField.data_source('Configuration Endpoint', 'data.configuration_endpoint'),
# TextDyField.data_source('Creation Time', 'data.creation_time'),
# TextDyField.data_source('Status', 'data.status'),
# TextDyField.data_source('Primary Endpoint', 'data.primary_endpoint'),
# TextDyField.data_source('Update Status', 'data.update_action_status'),
# TextDyField.data_source('Engine', 'data.engine'),
# TextDyField.data_source('Engine Version Compatibility', 'data.engine_version_compatibility'),
# TextDyField.data_source('Reader Endpoint', 'data.reader_endpoint'),
# TextDyField.data_source('Node Type', 'data.cluster.cache_node_type'),
# ListDyField.data_source('Availability Zones', 'data.availability_zones'),
# TextDyField.data_source('Shards', 'data.shard_count'),
# TextDyField.data_source('Number of Nodes', 'data.node_count'),
# TextDyField.data_source('Automatic Failover', 'data.cluster.automatic_failover'),
# TextDyField.data_source('Description', 'data.cluster.description'),
# TextDyField.data_source('Parameter Group', 'data.parameter_group'),
# TextDyField.data_source('Subnet Group', 'data.subnet_group'),
# ListDyField.data_source('Security Groups', 'data.security_groups'),
# TextDyField.data_source('Notification ARN', 'data.notification_arn'),
# TextDyField.data_source('Notification status', 'data.notification_status'),
# TextDyField.data_source('Maintenance Window', 'data.maintenance_window'),
# TextDyField.data_source('Backup retention Period', 'data.backup_retention_period'),
# TextDyField.data_source('Backup window', 'data.backup_window'),
# TextDyField.data_source('Backup Node ID', 'data.backup_node_id'),
# TextDyField.data_source('Encryption in-transit', 'data.cluster.transit_encryption_enabled'),
# TextDyField.data_source('Encryption at-rest', 'data.cluster.at_rest_encryption_enabled'),
# TextDyField.data_source('Redis AUTH', 'data.auth_enabled'),
# TextDyField.data_source('AUTH Token Last Modified Date', 'data.auth_token_last_modified_date'),
# TextDyField.data_source('Customer Managed CMK', 'data.cluster.kms_key_id'),
# ]
#
# redis_node = TableDynamicView({'name': 'Nodes', 'key_path': 'data.nodes'})
# redis_node.data_source = [
# TextDyField.data_source('Name', 'data.cluster_name'),
# ]
#
# redis_metadata = BaseMetaData()
# redis_metadata.details = [redis_base_detail, ]
# redis_metadata.sub_data = [redis_node, ]
memcached_metadata = CloudServiceMeta.set()
redis_metadata = CloudServiceMeta.set()
# Memcached
class ElasticCacheResource(CloudServiceResource):
cloud_service_group = StringType(default='ElastiCache')
class MemcachedResource(ElasticCacheResource):
cloud_service_type = StringType(default='Memcached')
data = ModelType(Memcached)
cloud_service_meta = ModelType(CloudServiceMeta, default=memcached_metadata)
class MemcachedResponse(CloudServiceResponse):
resource = PolyModelType(MemcachedResource)
# Redis
class RedisResource(ElasticCacheResource):
cloud_service_type = StringType(default='Redis')
data = ModelType(Memcached)
cloud_service_meta = ModelType(CloudServiceMeta, default=redis_metadata)
class RedisResponse(CloudServiceResponse):
resource = PolyModelType(RedisResource)
| python |
import unittest
import asyncio
import random
from hummingbot.core.api_throttler.data_types import RateLimit
from hummingbot.core.api_throttler.fixed_rate_api_throttler import FixedRateThrottler
FIXED_RATE_LIMIT = [
RateLimit(5, 5)
]
class FixedRateThrottlerUnitTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
def setUp(self) -> None:
super().setUp()
self.fixed_rate_throttler = FixedRateThrottler(rate_limit_list=FIXED_RATE_LIMIT,
retry_interval=5.0)
self.request_count = 0
async def execute_n_requests(self, n: int, throttler: FixedRateThrottler):
for _ in range(n):
async with throttler.execute_task():
self.request_count += 1
def test_fixed_rate_throttler_above_limit(self):
# Test Scenario: API requests sent > Rate Limit
n: int = 10
limit: int = FIXED_RATE_LIMIT[0].limit
# Note: We assert a timeout ensuring that the throttler does not wait for the limit interval
with self.assertRaises(asyncio.exceptions.TimeoutError):
self.ev_loop.run_until_complete(
asyncio.wait_for(self.execute_n_requests(n, throttler=self.fixed_rate_throttler), timeout=1.0)
)
self.assertEqual(limit, self.request_count)
def test_fixed_rate_throttler_below_limit(self):
# Test Scenario: API requests sent < Rate Limit
n: int = random.randint(1, FIXED_RATE_LIMIT[0].limit - 1)
limit: int = FIXED_RATE_LIMIT[0].limit
self.ev_loop.run_until_complete(
self.execute_n_requests(n, throttler=self.fixed_rate_throttler))
self.assertEqual(self.request_count, n)
self.assertLess(self.request_count, limit)
def test_fixed_rate_throttler_equal_limit(self):
# Test Scenario: API requests sent = Rate Limit
n = limit = FIXED_RATE_LIMIT[0].limit
self.ev_loop.run_until_complete(
self.execute_n_requests(n, throttler=self.fixed_rate_throttler))
self.assertEqual(self.request_count, limit)
| python |
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import logging
from future.utils import PY2
from tests import pyunit_utils as pu
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
def _has_handlers(logger):
if PY2:
l = logger
while l:
if l.handlers:
return True
l = l.parent if l.propagate else None
return False
else:
return logger.hasHandlers()
def test_h2o_logger_has_no_handler_by_default():
# as a library, h2o should not define handlers for its loggers
from h2o.utils.config import H2OConfigReader
H2OConfigReader.get_config() # this module uses h2o logger
logger = logging.getLogger('h2o')
assert not _has_handlers(logger)
def test_h2o_logger_inherits_root_logger():
from h2o.utils.config import H2OConfigReader
H2OConfigReader.get_config() # this module uses h2o logger
root = logging.getLogger()
logger = logging.getLogger('h2o')
console = logging.StreamHandler()
assert not _has_handlers(root)
assert not _has_handlers(logger)
with LoggingContext(root, handler=console, level=logging.INFO):
assert _has_handlers(root)
assert _has_handlers(logger)
logging.info("list root handlers: %s", root.handlers)
logging.info("list h2o handlers: %s", logger.handlers)
pu.run_tests([
test_h2o_logger_has_no_handler_by_default,
test_h2o_logger_inherits_root_logger
])
| python |
def print_trace(trace):
for name, node in trace.nodes.items():
if node['type'] == 'sample':
print(f'{node["name"]} - sampled value {node["value"]}')
| python |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
"""Model definition for Profile."""
user = models.OneToOneField(User, on_delete=models.DO_NOTHING)
contact = models.CharField(max_length=15, blank=True, null=True)
city = models.CharField(max_length=10, blank=True, null=True)
country = models.CharField(max_length=10, blank=True, null=True)
# TODO: Define fields here
class Meta:
"""Meta definition for Profile."""
verbose_name = 'Profile'
verbose_name_plural = 'Profiles'
def __str__(self):
"""Unicode representation of Profile."""
return str(self.user)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save() | python |
# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This package contains utility methods for manipulating paths and
filenames for test results and baselines. It also contains wrappers
of a few routines in platform_utils.py so that platform_utils.py can
be considered a 'protected' package - i.e., this file should be
the only file that ever includes platform_utils. This leads to
us including a few things that don't really have anything to do
with paths, unfortunately."""
import errno
import os
import stat
import sys
import platform_utils
import platform_utils_win
import platform_utils_mac
import platform_utils_linux
# Cache some values so we don't have to recalculate them. _basedir is
# used by PathFromBase() and caches the full (native) path to the top
# of the source tree (/src). _baseline_search_path is used by
# ExpectedBaseline() and caches the list of native paths to search
# for baseline results.
_basedir = None
_baseline_search_path = None
class PathNotFound(Exception): pass
def LayoutTestsDir(path=None):
"""Returns the fully-qualified path to the directory containing the input
data for the specified layout test."""
return PathFromBase('third_party', 'WebKit');
def ChromiumBaselinePath(platform=None):
"""Returns the full path to the directory containing expected
baseline results from chromium ports. If |platform| is None, the
currently executing platform is used."""
if platform is None:
platform = platform_utils.PlatformName()
return PathFromBase('webkit', 'data', 'layout_tests', 'platform', platform)
def WebKitBaselinePath(platform):
"""Returns the full path to the directory containing expected
baseline results from WebKit ports."""
return PathFromBase('third_party', 'WebKit', 'LayoutTests',
'platform', platform)
def BaselineSearchPath(platform=None):
"""Returns the list of directories to search for baselines/results for a
given platform, in order of preference. Paths are relative to the top of the
source tree. If parameter platform is None, returns the list for the current
platform that the script is running on."""
if platform is None:
return platform_utils.BaselineSearchPath(False)
elif platform.startswith('mac'):
return platform_utils_mac.BaselineSearchPath(True)
elif platform.startswith('win'):
return platform_utils_win.BaselineSearchPath(True)
elif platform.startswith('linux'):
return platform_utils_linux.BaselineSearchPath(True)
else:
return platform_utils.BaselineSearchPath(False)
def ExpectedBaseline(filename, suffix, platform=None, all_baselines=False):
"""Given a test name, finds where the baseline result is located. The
result is returned as a pair of values, the absolute path to top of the test
results directory, and the relative path from there to the results file.
Both return values will be in the format appropriate for the
current platform (e.g., "\\" for path separators on Windows).
If the results file is not found, then None will be returned for the
directory, but the expected relative pathname will still be returned.
Args:
filename: absolute filename to test file
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to the
current platform.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first
one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test tree)
results_filename - relative path from top of tree to the results file
(os.path.join of the two gives you the full path to the file, unless
None was returned.)
"""
global _baseline_search_path
global _search_path_platform
testname = os.path.splitext(RelativeTestFilename(filename))[0]
# While we still have tests in both LayoutTests/ and chrome/ we need
# to strip that outer directory.
# TODO(pamg): Once we upstream all of chrome/, clean this up.
platform_filename = testname + '-expected' + suffix
testdir, base_filename = platform_filename.split('/', 1)
if (_baseline_search_path is None) or (_search_path_platform != platform):
_baseline_search_path = BaselineSearchPath(platform)
_search_path_platform = platform
current_platform_dir = ChromiumBaselinePath(PlatformName(platform))
baselines = []
foundCurrentPlatform = False
for platform_dir in _baseline_search_path:
# Find current platform from baseline search paths and start from there.
if platform_dir == current_platform_dir:
foundCurrentPlatform = True
if foundCurrentPlatform:
# TODO(pamg): Clean this up once we upstream everything in chrome/.
if os.path.basename(platform_dir).startswith('chromium'):
if os.path.exists(os.path.join(platform_dir, platform_filename)):
baselines.append((platform_dir, platform_filename))
else:
if os.path.exists(os.path.join(platform_dir, base_filename)):
baselines.append((platform_dir, base_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected result
# in the test directory, even if no such file actually exists.
platform_dir = LayoutTestsDir(filename)
if os.path.exists(os.path.join(platform_dir, platform_filename)):
baselines.append((platform_dir, platform_filename))
if baselines:
return baselines
return [(None, platform_filename)]
def ExpectedFilename(filename, suffix):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories, the
directory in which the test itself is located will be returned. The return
value is in the format appropriate for the platform (e.g., "\\" for
path separators on windows).
Args:
filename: absolute filename to test file
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'chromium-win', or
'chromium-mac-leopard' (we follow the WebKit format)
"""
platform_dir, platform_filename = ExpectedBaseline(filename, suffix)[0]
if platform_dir:
return os.path.join(platform_dir, platform_filename)
return os.path.join(LayoutTestsDir(filename), platform_filename)
def RelativeTestFilename(filename):
"""Provide the filename of the test relative to the layout data
directory as a unix style path (a/b/c)."""
return _WinPathToUnix(filename[len(LayoutTestsDir(filename)) + 1:])
def _WinPathToUnix(path):
"""Convert a windows path to use unix-style path separators (a/b/c)."""
return path.replace('\\', '/')
#
# Routines that are arguably platform-specific but have been made
# generic for now (they used to be in platform_utils_*)
#
def FilenameToUri(full_path):
"""Convert a test file to a URI."""
LAYOUTTESTS_DIR = "LayoutTests/"
LAYOUTTEST_HTTP_DIR = "LayoutTests/http/tests/"
LAYOUTTEST_WEBSOCKET_DIR = "LayoutTests/websocket/tests/"
relative_path = _WinPathToUnix(RelativeTestFilename(full_path))
port = None
use_ssl = False
if relative_path.startswith(LAYOUTTEST_HTTP_DIR):
# LayoutTests/http/tests/ run off port 8000 and ssl/ off 8443
relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):]
port = 8000
elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR):
# LayoutTests/websocket/tests/ run off port 8880 and 9323
# Note: the root is LayoutTests/, not LayoutTests/websocket/tests/
relative_path = relative_path[len(LAYOUTTESTS_DIR):]
port = 8880
# Make LayoutTests/http/tests/local run as local files. This is to mimic the
# logic in run-webkit-tests.
# TODO(jianli): Consider extending this to "media/".
if port and not relative_path.startswith("local/"):
if relative_path.startswith("ssl/"):
port += 443
protocol = "https"
else:
protocol = "http"
return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path)
if sys.platform in ('cygwin', 'win32'):
return "file:///" + GetAbsolutePath(full_path)
return "file://" + GetAbsolutePath(full_path)
def GetAbsolutePath(path):
"""Returns an absolute UNIX path."""
return _WinPathToUnix(os.path.abspath(path))
def MaybeMakeDirectory(*path):
"""Creates the specified directory if it doesn't already exist."""
# This is a reimplementation of google.path_utils.MaybeMakeDirectory().
try:
os.makedirs(os.path.join(*path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def PathFromBase(*comps):
"""Returns an absolute filename from a set of components specified
relative to the top of the source tree. If the path does not exist,
the exception PathNotFound is raised."""
# This is a reimplementation of google.path_utils.PathFromBase().
global _basedir
if _basedir == None:
# We compute the top of the source tree by finding the absolute
# path of this source file, and then climbing up three directories
# as given in subpath. If we move this file, subpath needs to be updated.
path = os.path.abspath(__file__)
subpath = os.path.join('webkit','tools','layout_tests')
_basedir = path[:path.index(subpath)]
path = os.path.join(_basedir, *comps)
if not os.path.exists(path):
raise PathNotFound('could not find %s' % (path))
return path
def RemoveDirectory(*path):
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at *path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
file_path = os.path.join(*path)
if not os.path.exists(file_path):
return
win32 = False
if sys.platform == 'win32':
win32 = True
# Some people don't have the APIs installed. In that case we'll do without.
try:
win32api = __import__('win32api')
win32con = __import__('win32con')
except ImportError:
win32 = False
def remove_with_retry(rmfunc, path):
os.chmod(path, stat.S_IWRITE)
if win32:
win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)
try:
return rmfunc(path)
except EnvironmentError, e:
if e.errno != errno.EACCES:
raise
print 'Failed to delete %s: trying again' % repr(path)
time.sleep(0.1)
return rmfunc(path)
else:
def remove_with_retry(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
else:
return rmfunc(path)
for root, dirs, files in os.walk(file_path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(os.rmdir, os.path.join(root, name))
remove_with_retry(os.rmdir, file_path)
#
# Wrappers around platform_utils
#
def PlatformName(platform=None):
"""Returns the appropriate chromium platform name for |platform|. If
|platform| is None, returns the name of the chromium platform on the
currently running system. If |platform| is of the form 'chromium-*',
it is returned unchanged, otherwise 'chromium-' is prepended."""
if platform == None:
return platform_utils.PlatformName()
if not platform.startswith('chromium-'):
platform = "chromium-" + platform
return platform
def PlatformVersion():
return platform_utils.PlatformVersion()
def LigHTTPdExecutablePath():
return platform_utils.LigHTTPdExecutablePath()
def LigHTTPdModulePath():
return platform_utils.LigHTTPdModulePath()
def LigHTTPdPHPPath():
return platform_utils.LigHTTPdPHPPath()
def WDiffPath():
return platform_utils.WDiffPath()
def TestShellPath(target):
return platform_utils.TestShellPath(target)
def ImageDiffPath(target):
return platform_utils.ImageDiffPath(target)
def LayoutTestHelperPath(target):
return platform_utils.LayoutTestHelperPath(target)
def FuzzyMatchPath():
return platform_utils.FuzzyMatchPath()
def ShutDownHTTPServer(server_pid):
return platform_utils.ShutDownHTTPServer(server_pid)
def KillAllTestShells():
platform_utils.KillAllTestShells()
| python |
'''
File name : stage.py
Author : Jinwook Jung
Created on : Thu 25 Jul 2019 11:57:16 PM EDT
Last modified : 2020-01-06 13:27:13
Description :
'''
import subprocess, os, sys, random, yaml, time
from subprocess import Popen, PIPE, CalledProcessError
from abc import ABC, abstractmethod
def run_shell_cmd(cmd, f=None):
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
for line in iter(p.stdout.readline, b''):
print(">>> {}".format(line.rstrip().decode("utf-8")))
# FIXME
if f is not None:
f.write("{}\n".format(str(line.rstrip())))
class Stage(ABC):
def __init__(self, rdf, stage_dir, prev_out_dir, user_parms, write_run_scripts=False):
''' Initialize the instance and populate the necessary/useful
variables. '''
self.rdf_path = rdf.config["rdf_path"]
self.config = rdf.config
self.design_dir, self.lib_dir = rdf.design_dir, rdf.lib_dir
self.design_config, self.lib_config = rdf.design_config, rdf.lib_config
self.stage_dir = stage_dir
self.prev_out_dir = prev_out_dir
self.design_name = rdf.design_config["name"]
# Output of previous stage
self.in_def, self.in_verilog, self.in_sdc = (None,)*3
if prev_out_dir is not None:
self.in_def = "{}/{}.def".format(prev_out_dir, self.design_name)
self.in_verilog = "{}/{}.v".format(prev_out_dir, self.design_name)
self.in_sdc = "{}/{}.sdc".format(prev_out_dir, self.design_name)
else:
# If this is the first stage, just use the original design file
self.in_verilog = None
self.in_def = None
self.in_sdc = "{}/{}.sdc".format(self.rdf_path, self.design_name)
self.design_verilogs = ["{}/{}".format(self.design_dir, _) \
for _ in self.design_config["verilog"]]
# Library/PDK
self.lib_name = self.lib_config["LIBRARY_NAME"]
self.liberty = "{}/{}".format(self.lib_dir, self.lib_config["LIBERTY"])
self.lef = "{}/{}".format(self.lib_dir, self.lib_config["LEF"])
self.tracks = "{}/{}".format(self.lib_dir, self.lib_config["TRACKS_INFO_FILE"])
# (TODO) User parameters
self.user_parms = user_parms # List of parameters (key/value pairs)
def create_run_script_template(self):
with open("{}/run.sh".format(self.stage_dir), 'w') as f:
f.write("#!/bin/bash\n\n")
f.write("export RDF_PATH=\"{}\"\n".format(self.rdf_path))
f.write("export RDF_STAGE_DIR=\"{}\"\n".format(self.stage_dir))
f.write("export RDF_TOOL_BIN_PATH=\"${RDF_PATH}/bin\"\n")
f.write("\n")
@abstractmethod
def write_run_scripts(self):
pass
@abstractmethod
def run(self):
pass
| python |
'''
Given multiple fasta files (corresponding to different organisms),
use mafft to create the multiple sequence alignment for the given target.
Then parse the alignments to create a consensus sequence.
'''
import pandas as pd
import os
import alignment_funcs
from Bio import SeqIO
def convert_indices(x, alignment = None, col = None):
'''
Call column_from_residue_number to add the new index to the df
'''
new_index = alignment_funcs.column_from_residue_number(alignment, x['ID'], x[col])
return new_index
def main(arglist):
fastas = snakemake.input['fastas']
outfile = snakemake.output['outfasta']
excluded2 = snakemake.output['excluded2']
excluded1_files = snakemake.input['excluded_regions_files']
name = snakemake.params['name']
#combine fastas to single file
temp_fasta = 'temp_multi_%s.fa' % name
record_list = []
with open(temp_fasta, "w") as g:
for i in fastas:
records = SeqIO.parse(i, "fasta")
for j in records:
record_list.append(j)
SeqIO.write(record_list, temp_fasta, "fasta")
alignment = alignment_funcs.write_alignment(temp_fasta, name, outfile)
os.remove(temp_fasta)
ex_df = pd.concat([pd.read_csv(i) for i in excluded1_files])
if not ex_df.empty:
ex_df['new_start'] = ex_df.apply(convert_indices, alignment = alignment, col = 'start', axis = 1)
ex_df['new_end'] = ex_df.apply(convert_indices, alignment = alignment, col = 'end', axis = 1)
ex_df.drop(['start', 'end'], axis = 1, inplace = True)
ex_df['ID'] = name
ex_df.rename(columns = {'new_start':'start', 'new_end':'end'}, inplace = True)
ex_df.to_csv(excluded2, index = False)
if __name__ == '__main__':
main(sys.argv[1:])
| python |
# Generated by rpcgen.py at Mon Mar 8 11:09:57 2004
from .mountconstants import *
from .mountpacker import *
import rpc
__all__ = ['BadDiscriminant', 'fhstatus', 'mountres3_ok', 'mountres3', 'mountbody', 'groupnode', 'exportnode']
def init_type_class(klass, ncl):
# Initilize type class
klass.ncl = ncl
klass.packer = ncl.packer
klass.unpacker = ncl.unpacker
def assert_not_none(klass, *args):
for arg in args:
if arg == None:
raise TypeError(repr(klass) + " has uninitialized data")
def pack_objarray(ncl, list):
# FIXME: Support for length assertion.
ncl.packer.pack_uint(len(list))
for item in list:
item.pack()
def unpack_objarray(ncl, klass):
n = ncl.unpacker.unpack_uint()
list = []
for i in range(n):
obj = klass(ncl)
obj.unpack()
list.append(obj)
return list
class BadDiscriminant(rpc.RPCException):
def __init__(self, value, klass):
self.value = value
self.klass = klass
def __str__(self):
return "Bad Discriminant %s in %s" % (self.value, self.klass)
class fhstatus:
# XDR definition:
# union fhstatus switch (unsigned fhs_status) {
# case 0:
# fhandle2 fhs_fhandle;
# default:
# void;
# };
def __init__(self, ncl, fhs_status=None, fhs_fhandle=None):
init_type_class(self, ncl)
self.fhs_status = fhs_status
self.fhs_fhandle = fhs_fhandle
# Shortcut to current arm
self.arm = None
def __repr__(self):
s = " fhs_status=%s fhs_fhandle=%s" % (str(self.fhs_status), str(self.fhs_fhandle))
if len(s) > 70: s = s[:70] + "..."
return "<fhstatus:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.fhs_status)
self.packer.pack_unsigned(self.fhs_status)
if self.fhs_status == 0:
assert_not_none(self, self.fhs_fhandle)
self.packer.pack_fhandle2(self.fhs_fhandle)
self.arm = self.fhs_fhandle
else:
pass
def unpack(self):
self.fhs_status = self.unpacker.unpack_unsigned()
if self.fhs_status == 0:
self.fhs_fhandle = self.unpacker.unpack_fhandle2()
self.arm = self.fhs_fhandle
else:
pass
class mountres3_ok:
# XDR definition:
# struct mountres3_ok {
# fhandle3 fhandle;
# int auth_flavors<>;
# };
def __init__(self, ncl, fhandle=None, auth_flavors=None):
init_type_class(self, ncl)
self.fhandle = fhandle
self.auth_flavors = auth_flavors
def __repr__(self):
s = " fhandle=%s auth_flavors=%s" % (str(self.fhandle), str(self.auth_flavors))
if len(s) > 70: s = s[:70] + "..."
return "<mountres3_ok:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.fhandle, self.auth_flavors)
self.packer.pack_fhandle3(self.fhandle)
self.packer.pack_int(self.auth_flavors)
def unpack(self):
self.fhandle = self.unpacker.unpack_fhandle3()
self.auth_flavors = self.unpacker.unpack_array(self.unpacker.unpack_int)
class mountres3:
# XDR definition:
# union mountres3 switch (mountstat3 fhs_status) {
# case MNT3_OK:
# mountres3_ok mountinfo;
# default:
# void;
# };
def __init__(self, ncl, fhs_status=None, mountinfo=None):
init_type_class(self, ncl)
self.fhs_status = fhs_status
self.mountinfo = mountinfo
# Shortcut to current arm
self.arm = None
def __repr__(self):
s = " fhs_status=%s mountinfo=%s" % (str(self.fhs_status), str(self.mountinfo))
if len(s) > 70: s = s[:70] + "..."
return "<mountres3:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.fhs_status)
self.packer.pack_mountstat3(self.fhs_status)
if self.fhs_status == MNT3_OK:
assert_not_none(self, self.mountinfo)
self.mountinfo.pack()
self.arm = self.mountinfo
else:
pass
def unpack(self):
self.fhs_status = self.unpacker.unpack_mountstat3()
if self.fhs_status == MNT3_OK:
self.mountinfo = mountres3_ok(self)
self.mountinfo.unpack()
self.arm = self.mountinfo
else:
pass
class mountbody:
# XDR definition:
# struct mountbody {
# name ml_hostname;
# dirpath ml_directory;
# mountlist ml_next;
# };
def __init__(self, ncl, ml_hostname=None, ml_directory=None, ml_next=None):
init_type_class(self, ncl)
self.ml_hostname = ml_hostname
self.ml_directory = ml_directory
self.ml_next = ml_next
def __repr__(self):
s = " ml_hostname=%s ml_directory=%s ml_next=%s" % (str(self.ml_hostname), str(self.ml_directory), str(self.ml_next))
if len(s) > 70: s = s[:70] + "..."
return "<mountbody:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.ml_hostname, self.ml_directory, self.ml_next)
self.packer.pack_name(self.ml_hostname)
self.packer.pack_dirpath(self.ml_directory)
self.packer.pack_mountlist(self.ml_next)
def unpack(self):
self.ml_hostname = self.unpacker.unpack_name()
self.ml_directory = self.unpacker.unpack_dirpath()
self.ml_next = self.unpacker.unpack_mountlist()
class groupnode:
# XDR definition:
# struct groupnode {
# name gr_name;
# groups gr_next;
# };
def __init__(self, ncl, gr_name=None, gr_next=None):
init_type_class(self, ncl)
self.gr_name = gr_name
self.gr_next = gr_next
def __repr__(self):
s = " gr_name=%s gr_next=%s" % (str(self.gr_name), str(self.gr_next))
if len(s) > 70: s = s[:70] + "..."
return "<groupnode:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.gr_name, self.gr_next)
self.packer.pack_name(self.gr_name)
self.packer.pack_groups(self.gr_next)
def unpack(self):
self.gr_name = self.unpacker.unpack_name()
self.gr_next = self.unpacker.unpack_groups()
class exportnode:
# XDR definition:
# struct exportnode {
# dirpath ex_dir;
# groups ex_groups;
# exports ex_next;
# };
def __init__(self, ncl, ex_dir=None, ex_groups=None, ex_next=None):
init_type_class(self, ncl)
self.ex_dir = ex_dir
self.ex_groups = ex_groups
self.ex_next = ex_next
def __repr__(self):
s = " ex_dir=%s ex_groups=%s ex_next=%s" % (str(self.ex_dir), str(self.ex_groups), str(self.ex_next))
if len(s) > 70: s = s[:70] + "..."
return "<exportnode:%s>" % s
def pack(self, dummy=None):
assert_not_none(self, self.ex_dir, self.ex_groups, self.ex_next)
self.packer.pack_dirpath(self.ex_dir)
self.packer.pack_groups(self.ex_groups)
self.packer.pack_exports(self.ex_next)
def unpack(self):
self.ex_dir = self.unpacker.unpack_dirpath()
self.ex_groups = self.unpacker.unpack_groups()
self.ex_next = self.unpacker.unpack_exports()
| python |
def get_layers(data, wide, tall):
for i in range(0, len(data), wide * tall):
yield data[i : i + wide * tall]
def parse_infos(layer):
infos = {}
for data in layer:
if data not in infos:
infos[data] = 0
infos[data] += 1
return infos
def merge_layers(layers):
tmp_layers = list(layers)
layer = ["0"] * len(tmp_layers[0])
tmp_layers.reverse()
for current in tmp_layers:
for i in range(len(layer)):
layer[i] = current[i] if current[i] != "2" else layer[i]
return "".join(layer)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maziar Raissi
"""
import autograd.numpy as np
from autograd import value_and_grad
from Utilities import fetch_minibatch_rnn, stochastic_update_Adam, activation
class RecurrentNeuralNetworks:
def __init__(self, X, Y, hidden_dim,
max_iter = 2000, N_batch = 1, monitor_likelihood = 10, lrate = 1e-3):
# X has the form lags x data x dim
# Y has the form data x dim
self.X = X
self.Y = Y
self.X_dim = X.shape[-1]
self.Y_dim = Y.shape[-1]
self.hidden_dim = hidden_dim
self.lags = X.shape[0]
self.max_iter = max_iter
self.N_batch = N_batch
self.monitor_likelihood = monitor_likelihood
self.hyp = self.initialize_RNN()
# Adam optimizer parameters
self.mt_hyp = np.zeros(self.hyp.shape)
self.vt_hyp = np.zeros(self.hyp.shape)
self.lrate = lrate
print("Total number of parameters: %d" % (self.hyp.shape[0]))
def initialize_RNN(self):
hyp = np.array([])
Q = self.hidden_dim
U = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
b = np.zeros((1,Q))
W = np.eye(Q)
hyp = np.concatenate([hyp, U.ravel(), b.ravel(), W.ravel()])
V = -np.sqrt(6.0/(Q+self.Y_dim)) + 2.0*np.sqrt(6.0/(Q+self.Y_dim))*np.random.rand(Q,self.Y_dim)
c = np.zeros((1,self.Y_dim))
hyp = np.concatenate([hyp, V.ravel(), c.ravel()])
return hyp
def forward_pass(self, X, hyp):
Q = self.hidden_dim
H = np.zeros((X.shape[1],Q))
idx_1 = 0
idx_2 = idx_1 + self.X_dim*Q
idx_3 = idx_2 + Q
idx_4 = idx_3 + Q*Q
U = np.reshape(hyp[idx_1:idx_2], (self.X_dim,Q))
b = np.reshape(hyp[idx_2:idx_3], (1,Q))
W = np.reshape(hyp[idx_3:idx_4], (Q,Q))
for i in range(0, self.lags):
H = activation(np.matmul(H,W) + np.matmul(X[i,:,:],U) + b)
idx_1 = idx_4
idx_2 = idx_1 + Q*self.Y_dim
idx_3 = idx_2 + self.Y_dim
V = np.reshape(hyp[idx_1:idx_2], (Q,self.Y_dim))
c = np.reshape(hyp[idx_2:idx_3], (1,self.Y_dim))
Y = np.matmul(H,V) + c
return Y
def MSE(self, hyp):
X = self.X_batch
Y = self.Y_batch
Y_star = self.forward_pass(X, hyp)
return np.mean((Y-Y_star)**2)
def train(self):
# Gradients from autograd
MSE = value_and_grad(self.MSE)
for i in range(1,self.max_iter+1):
# Fetch minibatch
self.X_batch, self.Y_batch = fetch_minibatch_rnn(self.X, self.Y, self.N_batch)
# Compute likelihood_UB and gradients
MSE_value, D_MSE = MSE(self.hyp)
# Update hyper-parameters
self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i)
if i % self.monitor_likelihood == 0:
print("Iteration: %d, MSE: %.5e" % (i, MSE_value))
| python |
from src.computation.computation_handler import ComputationHandler
class NoComputation(ComputationHandler):
def __init__(self):
super().__init__()
def compute(self):
pass | python |
from django.utils import timezone
from rest_framework import serializers
from ..reservation_api.models import Reservation
from ..subscription_api.models import Subscription
class StaffChoiseField(serializers.ChoiceField):
class Meta:
swagger_schema_fields = {
'type': 'integer'
}
class StaffReservationSerializer(serializers.ModelSerializer):
member = serializers.CharField(source='subscription.member', read_only=True)
status = StaffChoiseField(Reservation.STATUS, required=False)
reservedStart = serializers.DateTimeField(source='reserved_start')
reservedEnd = serializers.DateTimeField(source='reserved_end')
class Meta:
model = Reservation
fields = ('id', 'member', 'subscription', 'trainer', 'status', 'reservedStart', 'reservedEnd', 'updated')
extra_kwargs = {'subscription': {'read_only': True}}
def validate(self, attrs):
reserved_start = attrs['reserved_start']
reserved_end = attrs['reserved_end']
if reserved_start < self.instance.reserved_start:
raise serializers.ValidationError({"reservedStart": "Must come after requested "
"reservation start date-time"})
if reserved_end > self.instance.reserved_end:
raise serializers.ValidationError({"reservedEnd": "Must come before requested "
"reservation end date-time"})
if reserved_start > reserved_end:
raise serializers.ValidationError({"reservedEnd": "Must come after reservation start date-time"})
delta = reserved_end - reserved_start
if delta < timezone.timedelta(minutes=30):
raise serializers.ValidationError({"reservedRange": "The date-time difference between reservations "
"must be at least 30 minutes"})
return attrs
def to_representation(self, instance):
data = super().to_representation(instance)
data['member'] = str(instance.subscription.member.get_full_name())
data['subscription'] = str(instance.subscription.card)
data['trainer'] = instance.trainer.get_full_name() if data['trainer'] else 'Not assigned'
data['status'] = str(instance.get_status_display())
return data
class StaffSubscriptionSerializer(serializers.ModelSerializer):
email = serializers.CharField(source="member.email", read_only=True)
class Meta:
model = Subscription
fields = ('id', 'email', 'member', 'card', 'visits_count', 'purchased', 'expires')
extra_kwargs = {
'member': {'read_only': True},
'card': {'read_only': True},
'visits_count': {'required': True},
'purchased': {'read_only': True},
'expires': {'read_only': True}
}
def to_representation(self, instance):
data = super().to_representation(instance)
data['member'] = instance.member.get_full_name()
data['card'] = str(instance.card)
return data
| python |
#!/usr/bin/env python
import ray
import numpy as np
import time, sys, os
sys.path.append("..")
from util.printing import pd
# A variation of the game of life code used in the Ray Crash Course.
@ray.remote
class RayGame:
# TODO: Game memory grows unbounded; trim older states?
def __init__(self, grid_size, rules_id):
self.states = [RayGame.State(size = grid_size)]
self.rules_id = rules_id
def get_states(self):
return self.states
def step(self, num_steps = 1):
"""Take 1 or more steps, returning a list of new states."""
start_index = len(self.states)
for _ in range(num_steps):
new_state_id = self.rules_id.step.remote(self.states[-1])
self.states.append(ray.get(new_state_id))
return self.states[start_index:-1] # return the new states only!
@ray.remote
class RayConwaysRules:
"""
Apply the rules to a state and return a new state.
"""
def step(self, state):
"""
Determine the next values for all the cells, based on the current
state. Creates a new State with the changes.
"""
new_grid = state.grid.copy()
for i in range(state.size):
for j in range(state.size):
lns = self.live_neighbors(i, j, state)
new_grid[i][j] = self.apply_rules(i, j, lns, state)
new_state = RayGame.State(grid = new_grid)
return new_state
def apply_rules(self, i, j, live_neighbors, state):
"""
Determine next value for a cell, which could be the same.
The rules for Conway's Game of Life:
Any live cell with fewer than two live neighbours dies, as if by underpopulation.
Any live cell with two or three live neighbours lives on to the next generation.
Any live cell with more than three live neighbours dies, as if by overpopulation.
Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
"""
cell = state.grid[i][j] # default value is no change in state
if cell == 1:
if live_neighbors < 2 or live_neighbors > 3:
cell = 0
elif live_neighbors == 3:
cell = 1
return cell
def live_neighbors(self, i, j, state):
"""
Wrap at boundaries (i.e., treat the grid as a 2-dim "toroid")
To wrap at boundaries, when k-1=-1, that wraps itself;
for k+1=state.size, we mod it (which works for -1, too)
For simplicity, we count the cell itself, then subtact it
"""
s = state.size
g = state.grid
return sum([g[i2%s][j2%s] for i2 in [i-1,i,i+1] for j2 in [j-1,j,j+1]]) - g[i][j]
class State:
"""
Represents a grid of game cells.
For simplicity, require square grids.
Each instance is considered immutable.
"""
def __init__(self, grid = None, size = 10):
"""
Create a State. Specify either a grid of cells or a size, for
which an size x size grid will be computed with random values.
(For simplicity, only use square grids.)
"""
if type(grid) != type(None): # avoid annoying AttributeError
assert grid.shape[0] == grid.shape[1]
self.size = grid.shape[0]
self.grid = grid.copy()
else:
self.size = size
# Seed: random initialization
self.grid = np.random.randint(2, size = size*size).reshape((size, size))
def living_cells(self):
"""
Returns ([x1, x2, ...], [y1, y2, ...]) for all living cells.
Simplifies graphing.
"""
cells = [(i,j) for i in range(self.size) for j in range(self.size) if self.grid[i][j] == 1]
return zip(*cells)
def __str__(self):
s = ' |\n| '.join([' '.join(map(lambda x: '*' if x else ' ', self.grid[i])) for i in range(self.size)])
return '| ' + s + ' |'
def time_ray_games(num_games = 1, max_steps = 100, batch_size = 1, grid_size = 100):
rules_ids = []
game_ids = []
for i in range(num_games):
rules_id = RayGame.RayConwaysRules.remote()
game_id = RayGame.remote(grid_size, rules_id)
game_ids.append(game_id)
rules_ids.append(rules_id)
print(f'rules_ids:\n{rules_ids}') # these will produce more interesting flame graphs!
print(f'game_ids:\n{game_ids}')
start = time.time()
state_ids = []
for game_id in game_ids:
for i in range(int(max_steps/batch_size)): # Do a total of max_steps game steps, which is max_steps/delta_steps
state_ids.append(game_id.step.remote(batch_size))
ray.get(state_ids) # wait for everything to finish! We are ignoring what ray.get() returns, but what will it be??
pd(time.time() - start, prefix = f'Total time for {num_games} games (max_steps = {max_steps}, batch_size = {batch_size})')
def main():
import argparse
parser = argparse.ArgumentParser(description="Conway's Game of Life v2")
parser.add_argument('--size', metavar='N', type=int, default=100, nargs='?',
help='The size of the square grid for the game')
parser.add_argument('--steps', metavar='N', type=int, default=500, nargs='?',
help='The number of steps to run')
parser.add_argument('-l', '--local', help="Run Ray locally. Default is to join a cluster",
action='store_true')
args = parser.parse_args()
print(f"""
Conway's Game of Life v2:
Grid size: {args.size}
Number steps: {args.steps}
Run Ray locally? {args.local}
""")
if args.local:
ray.init()
else:
ray.init(address='auto')
time_ray_games(num_games = 1, max_steps = args.steps, batch_size = 1, grid_size = args.size)
if __name__ == "__main__":
main()
| python |
from setuptools import setup
setup(
name='listenmoe',
packages=['listenmoe'],
version='v1.0.1',
description='Unofficial python3 API wrapper to get information about'
'the listen.moe live stream using aiohttp',
author='Zenrac',
author_email='[email protected]',
url='https://github.com/Zenrac/listenmoe',
download_url='https://github.com/Zenrac/listenmoe/archive/v1.0.1.tar.gz',
keywords=['listenmoe'],
include_package_data=True,
install_requires=['aiohttp', 'asyncio']
)
| python |
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
import os
import sys
import glob
import argparse
import threading
import six.moves.queue as Queue
import traceback
import numpy as np
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import PIL.Image
import tfutil
import dataset
# ----------------------------------------------------------------------------
def error(msg):
print('Error: ' + msg)
exit(1)
# ----------------------------------------------------------------------------
class TFRecordExporter:
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
self.tfrecord_dir = tfrecord_dir
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
self.expected_images = expected_images
self.cur_images = 0
self.shape = None
self.resolution_log2 = None
self.tfr_writers = []
self.print_progress = print_progress
self.progress_interval = progress_interval
if self.print_progress:
print('Creating dataset "%s"' % tfrecord_dir)
if not os.path.isdir(self.tfrecord_dir):
os.makedirs(self.tfrecord_dir)
assert (os.path.isdir(self.tfrecord_dir))
def close(self):
if self.print_progress:
print('%-40s\r' % 'Flushing data...', end='', flush=True)
for tfr_writer in self.tfr_writers:
tfr_writer.close()
self.tfr_writers = []
if self.print_progress:
print('%-40s\r' % '', end='', flush=True)
print('Added %d images.' % self.cur_images)
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
order = np.arange(self.expected_images)
np.random.RandomState(123).shuffle(order)
return order
def add_image(self, img):
if self.print_progress and self.cur_images % self.progress_interval == 0:
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
if self.shape is None:
self.shape = img.shape
self.resolution_log2 = int(np.log2(self.shape[1]))
assert self.shape[0] in [1, 3]
assert self.shape[1] == self.shape[2]
assert self.shape[1] == 2 ** self.resolution_log2
tfr_opt = tf.io.TFRecordOptions(tf.compat.v1.python_io.TFRecordCompressionType.NONE)
for lod in range(self.resolution_log2 - 1):
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
self.tfr_writers.append(tf.io.TFRecordWriter(tfr_file, tfr_opt))
assert img.shape == self.shape
for lod, tfr_writer in enumerate(self.tfr_writers):
if lod:
img = img.astype(np.float32)
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
# quant = np.rint(img).clip(0, 255).astype(np.uint8)
quant = img.astype(np.uint8)
# Converting the np array to a tensor
ex = tf.train.Example(features=tf.train.Features(feature={
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
tfr_writer.write(ex.SerializeToString())
self.cur_images += 1
def add_labels(self, labels):
if self.print_progress:
print('%-40s\r' % 'Saving labels...', end='', flush=True)
print("cur", self.cur_images)
print("shape", labels.shape)
assert labels.shape[0] == self.cur_images
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
np.save(f, labels.astype(np.float32))
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# ----------------------------------------------------------------------------
class ExceptionInfo(object):
def __init__(self):
self.value = sys.exc_info()[1]
self.traceback = traceback.format_exc()
# ----------------------------------------------------------------------------
class WorkerThread(threading.Thread):
def __init__(self, task_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
def run(self):
while True:
func, args, result_queue = self.task_queue.get()
if func is None:
break
try:
result = func(*args)
except:
result = ExceptionInfo()
result_queue.put((result, args))
# ----------------------------------------------------------------------------
class ThreadPool(object):
def __init__(self, num_threads):
assert num_threads >= 1
self.task_queue = Queue.Queue()
self.result_queues = dict()
self.num_threads = num_threads
for idx in range(self.num_threads):
thread = WorkerThread(self.task_queue)
thread.daemon = True
thread.start()
def add_task(self, func, args=()):
assert hasattr(func, '__call__') # must be a function
if func not in self.result_queues:
self.result_queues[func] = Queue.Queue()
self.task_queue.put((func, args, self.result_queues[func]))
def get_result(self, func): # returns (result, args)
result, args = self.result_queues[func].get()
if isinstance(result, ExceptionInfo):
print('\n\nWorker thread caught an exception:\n' + result.traceback)
raise result.value
return result, args
def finish(self):
for idx in range(self.num_threads):
self.task_queue.put((None, (), None))
def __enter__(self): # for 'with' statement
return self
def __exit__(self, *excinfo):
self.finish()
def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x,
post_func=lambda x: x, max_items_in_flight=None):
if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
assert max_items_in_flight >= 1
results = []
retire_idx = [0]
def task_func(prepared, idx):
return process_func(prepared)
def retire_result():
processed, (prepared, idx) = self.get_result(task_func)
results[idx] = processed
while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
yield post_func(results[retire_idx[0]])
results[retire_idx[0]] = None
retire_idx[0] += 1
for idx, item in enumerate(item_iterator):
prepared = pre_func(item)
results.append(None)
self.add_task(func=task_func, args=(prepared, idx))
while retire_idx[0] < idx - max_items_in_flight + 2:
for res in retire_result(): yield res
while retire_idx[0] < len(results):
for res in retire_result(): yield res
# ----------------------------------------------------------------------------
def display(tfrecord_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
idx = 0
while True:
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if idx == 0:
print('Displaying images')
import cv2 # pip install opencv-python
cv2.namedWindow('dataset_tool')
print('Press SPACE or ENTER to advance, ESC to exit')
print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
idx += 1
if cv2.waitKey() == 27:
break
print('\nDisplayed %d images.' % idx)
# ----------------------------------------------------------------------------
def extract(tfrecord_dir, output_dir):
print('Loading dataset "%s"' % tfrecord_dir)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Extracting images to "%s"' % output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
idx = 0
while True:
if idx % 10 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images, labels = dset.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
break
if images.shape[1] == 1:
img = PIL.Image.fromarray(images[0][0], 'L')
else:
img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
img.save(os.path.join(output_dir, 'img%08d.png' % idx))
idx += 1
print('Extracted %d images.' % idx)
# ----------------------------------------------------------------------------
def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
max_label_size = 0 if ignore_labels else 'full'
print('Loading dataset "%s"' % tfrecord_dir_a)
tfutil.init_tf({'gpu_options.allow_growth': True})
dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
print('Loading dataset "%s"' % tfrecord_dir_b)
dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
tfutil.init_uninited_vars()
print('Comparing datasets')
idx = 0
identical_images = 0
identical_labels = 0
while True:
if idx % 100 == 0:
print('%d\r' % idx, end='', flush=True)
try:
images_a, labels_a = dset_a.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_a, labels_a = None, None
try:
images_b, labels_b = dset_b.get_minibatch_np(1)
except tf.errors.OutOfRangeError:
images_b, labels_b = None, None
if images_a is None or images_b is None:
if images_a is not None or images_b is not None:
print('Datasets contain different number of images')
break
if images_a.shape == images_b.shape and np.all(images_a == images_b):
identical_images += 1
else:
print('Image %d is different' % idx)
if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
identical_labels += 1
else:
print('Label %d is different' % idx)
idx += 1
print('Identical images: %d / %d' % (identical_images, idx))
if not ignore_labels:
print('Identical labels: %d / %d' % (identical_labels, idx))
def create_from_images(labeled_tfrecord_dir, unlabeled_tfrecord_dir, labeled_dir, unlabeled_dir, shuffle):
# Checking to see if there is two slashes at the end instead of 1
if labeled_dir[-1] == "/" and labeled_dir[-2] == "/":
labeled_dir = labeled_dir[:-1]
if unlabeled_dir[-1] == "/" and unlabeled_dir[-2] == "/":
unlabeled_dir = unlabeled_dir[:-1]
# Checking to make sure the path exists
if not os.path.isdir(labeled_dir):
error("Path " + labeled_dir + " does not exist!")
if not os.path.isdir(unlabeled_dir):
error("Path " + unlabeled_dir + " does not exist!")
# This lists all of the directories in the provided labeled directory. Each class should have its own folder
# within this directory. It also prepends the full path before it and makes sure .git isn't included
classes_dir = [labeled_dir + name for name in os.listdir(labeled_dir) if os.path.isdir(os.path.join(labeled_dir, name)) and name != '.git']
Num_classes = len(classes_dir)
labeled_filenames = []
# Go through each class directory and list all the full paths to each file and store them in an array
for each_class in classes_dir:
print('Loading images from "%s"' % each_class)
labeled_filenames.append(list(sorted(glob.glob(os.path.join(each_class, '*')))))
# Go through that array and assign Labels to each image
labels = []
for i in range(Num_classes):
print("Class " + str(i) + " is " + str(classes_dir[i].split("/")[-1]))
labels += [i] * len(labeled_filenames[i])
print("Number of classes: " + str(Num_classes))
# Converting labels into np array and one hot encoding it
labels = np.array(labels)
onehot = np.zeros((labels.size, Num_classes), dtype=np.float32)
onehot[np.arange(labels.size), labels] = 1.0
# Unlabeled dataset loading
print('Loading images from "%s"' % unlabeled_dir)
unlabeled_filenames = sorted(glob.glob(os.path.join(unlabeled_dir, '*')))
print()
# Checks
if len(labeled_filenames) == 0:
error('No input images found in ' + labeled_dir)
if len(unlabeled_filenames) == 0:
error('No input images found in ' + unlabeled_dir)
# Checking to make sure dimensions are all good
img = np.asarray(PIL.Image.open(labeled_filenames[0][0]))
resolution = img.shape[0]
channels = img.shape[2] if img.ndim == 3 else 1
if img.shape[1] != resolution:
error('Input images must have the same width and height')
if resolution != 2 ** int(np.floor(np.log2(resolution))):
error('Input image resolution must be a power-of-two')
if channels not in [1, 3]:
error('Input images must be stored as RGB or grayscale')
# Adding labeled data
with TFRecordExporter(labeled_tfrecord_dir, len(labels)) as tfr:
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(labels))
# Go over the number of images
for idx in range(len(labels)):
# Kind-of confusing but this is necessary due to the multi-class labeled data
# labeled_filenames = [[cat1, cat2, cat3], [dog1, dog2, dog3]] since it is a double
# array and the shuffling is a single array [4, 5, 2, 0, 1, 3] the code below finds which
# index for the class (class_indx) and which index for the sample within the class (tmp_indx)
# I did it this way so the amount of samples within each class can be arbitrary as well as the number
# of classes overall.
class_indx = 0
tmp_indx = order[idx] # lets say tmp_indx is 4 in our example
# Checks to see if 4 > 2
while tmp_indx > len(labeled_filenames[class_indx])-1:
# tmp_indx = 4 - 3
tmp_indx-=len(labeled_filenames[class_indx])
# we check the next class
class_indx+=1
# class_indx = 0; tmp_indx = 1 which gives us the 4th index
img = np.asarray(PIL.Image.open(labeled_filenames[class_indx][tmp_indx]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
# Dont need to do anything fancy here since onehot is a numpy array
tfr.add_labels(onehot[order])
print()
# Adding unlabeled data
with TFRecordExporter(unlabeled_tfrecord_dir, len(unlabeled_filenames)) as tfr2:
#fake_labels = [Num_classes - 1] * len(unlabeled_filenames)
#fake_labels = np.array(fake_labels)
#fake_onehot = np.zeros((fake_labels.size, np.max(fake_labels) + 1), dtype=np.float32)
#fake_onehot[np.arange(fake_labels.size), fake_labels] = 1.0
order = tfr2.choose_shuffled_order() if shuffle else np.arange(len(unlabeled_filenames))
for idx in range(order.size):
img = np.asarray(PIL.Image.open(unlabeled_filenames[order[idx]]))
if channels == 1:
img = img[np.newaxis, :, :] # HW => CHW
else:
img = img.transpose(2, 0, 1) # HWC => CHW
tfr2.add_image(img)
#tfr2.add_labels(fake_onehot[order])
# ----------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 3:
error("Wrong amount of commands given!\nFormat: python3 dataset_tool.py <Labeled dir> <Unlabeled dir>\nEx) python3 dataset_tool.py /home/user/Desktop/SSL-PG-GAN/CatVDog/PetImages/Labeled/ /home/user/Desktop/SSL-PG-GAN/CatVDog/PetImages/Unlabeled/\n")
if not os.path.isdir("Labeled"):
os.mkdir("Labeled")
if not os.path.isdir("Unlabeled"):
os.mkdir("Unlabeled")
Shuffle = True
args = sys.argv[1:]
create_from_images("Labeled", "Unlabeled", args[0] + "/", args[1] + "/", Shuffle)
# ----------------------------------------------------------------------------
| python |
"""
Implements the Graph object which is used by the ConstraintPropagator.
It is here where Allen's constraint propagation algorithm is implemented.
"""
# TODO: I am not convinced that the history mechanism is very good, yet it seems
# to be sufficient for our current purposes.
from objects import Node, Edge, Constraint
from utils import intersect_relations
from utils import compare_id
from utils import html_graph_prefix
from mappings import invert_interval_relation
from mappings import abbreviate_convex_relation
from utilities import logger
from library.main import LIBRARY
DEBUG = True
DEBUG = False
TIMEX = LIBRARY.timeml.TIMEX
TID = LIBRARY.timeml.TID
EVENT = LIBRARY.timeml.EVENT
EID = LIBRARY.timeml.EID
EIID = LIBRARY.timeml.EIID
EVENTID = LIBRARY.timeml.EVENTID
FORM = LIBRARY.timeml.FORM
VALUE = LIBRARY.timeml.VALUE
class Graph:
"""Implements the graph object used in the constraint propagation algorithm.
Instance variables:
filename - the name of the source file
cycle - an integer
queue - a list of Constraints
nodes - a hash of Nodes, indexed on node identifiers
edges - a hash of hashes of Edges, indexed on node identifiers
compositions - a CompositionTable
"""
def __init__(self, compositions):
"""Initialize an empty graph, with empty queue, nodes dictionary and
edges dictionary."""
self.compositions = compositions
self.cycle = 0
self.queue = []
self.nodes = {}
self.edges = {}
def add_nodes(self, events, timexes):
"""Adds the events/instances and timexes to the nodes table. Also
initializes the edges table now that all nodes are known."""
for timex in timexes:
node = Node(timex=timex)
self.nodes[node.id] = node
for event in events:
node = Node(event=event)
self.nodes[node.id] = node
for n1 in self.nodes.keys():
self.edges[n1] = {}
for n2 in self.nodes.keys():
self.edges[n1][n2] = Edge(n1, n2, self)
def add_nodes(self, sources, source_type):
"""Creates Nodes for each source and add them to the nodes table. Also
initializes the edges table now that all nodes are known. A source is
either an event or timex tag or simply an identifier."""
for source in sources:
if source_type == 'IDENTIFIER':
identifier = source
text = ''
elif source_type == TIMEX:
identifier = source.attrs[TID]
text = source.attrs[VALUE]
elif source_type == EVENT:
identifier = source.attrs[EIID]
text = source.attrs[FORM]
node = Node(source, identifier, source_type, text)
self.nodes[node.id] = node
for n1 in self.nodes.keys():
self.edges[n1] = {}
for n2 in self.nodes.keys():
self.edges[n1][n2] = Edge(n1, n2, self)
def propagate(self, constraint):
"""Propagate the constraint through the graph, using Allen's
constraint propagation algorithm."""
self.cycle += 1
if constraint.is_garbage():
# guard against garbage constraints in the pending queue by simply
# skipping them
return
self.added = [] # to keep track of what is added this cycle
self.queue.append(constraint)
debug(str="\n%d %s\n" % (self.cycle, constraint))
while self.queue:
constraint_i_j = self.queue.pop(0)
constraint_i_j.cycle = self.cycle
debug(1, "POP QUEUE: %s" % (constraint_i_j))
# compare new constraint to the one already on the edge
edge_i_j = self.edges[constraint_i_j.node1][constraint_i_j.node2]
(status, intersection) = self._intersect_constraints(edge_i_j,
constraint_i_j)
if status == 'INTERSECTION-IS-MORE-SPECIFIC':
self.added.append(constraint_i_j)
self._update_constraint(edge_i_j, constraint_i_j, intersection)
def reduce(self):
"""Reduce the grap to one that does not contain any relations derived by
closure. This does not get you a graph with the original annotations
because some might have been removed due to inconsistencies."""
# TODO: we may consider removing inverse relations and relations that
# could be derived from other relations
self.cycle += 1
self.added = []
self._remove_derived_relations()
def remove_node(self, node_id):
"""Remove a node from the graph. Involves removing the node from the
nodes hash, removing the node's column and row in the edges array and
removing the node from edges_in and edges_out attributes of other
nodes. This is not being used right now."""
node = self.nodes[node_id]
# remove from other nodes
for node_in_id in node.edges_in.keys():
del self.nodes[node_in_id].edges_out[node_id]
for node_out_id in node.edges_out.keys():
del self.nodes[node_out_id].edges_in[node_id]
# remove from nodes hash
del self.nodes[node_id]
# remove from edges hash
del self.edges[node_id]
for other_node_id in self.edges.keys():
del self.edges[other_node_id][node_id]
def _update_constraint(self, edge_i_j, constraint_i_j, intersection):
"""Update a constraint by setting its relation set to the intersection
and then add it to the edge. Once you have done that you need to check
whether this constraint then puts further constraints on incoming edges
to node i and outgoing edges from node j."""
constraint_i_j.relset = intersection
self._add_constraint_to_edge(constraint_i_j, edge_i_j)
node_i = constraint_i_j.get_node1()
node_j = constraint_i_j.get_node2()
node_i.edges_out[constraint_i_j.node2] = edge_i_j
node_j.edges_in[constraint_i_j.node1] = edge_i_j
self._check_all_k_i_j(node_i, node_j, edge_i_j)
self._check_all_i_j_k(node_i, node_j, edge_i_j)
def _check_all_k_i_j(self, node_i, node_j, edge_i_j):
"""Check the constraints on [node_k --> node_i --> node_j]."""
debug(1, "CHECKING: X --> %s --> %s" % (node_i.id, node_j.id))
for edge_k_i in node_i.edges_in.values():
debug(2, "%s * %s" % (edge_k_i, edge_i_j))
self._check_k_i_j(edge_k_i, edge_i_j, node_i, node_j)
def _check_all_i_j_k(self, node_i, node_j, edge_i_j):
"""Check the constriants on [node_i --> node_j --> node_k]."""
debug(1, "CHECKING: %s --> %s --> X" % (node_i.id, node_j.id))
for edge_j_k in node_j.edges_out.values():
debug(2, "%s * %s" % (edge_i_j, edge_j_k))
self._check_i_j_k(edge_i_j, edge_j_k, node_i, node_j)
def _check_k_i_j(self, edge_k_i, edge_i_j, node_i, node_j):
"""Look at the k->i->j subgraph and check whether the new constraint in
Edge(i,j) allows you to derive something new by composition. The nodes
node_i and node_j could be derived from edge_i_j but are handed to this
function because they were already available and it saves a bit of time
this way."""
node_k = edge_k_i.get_node1()
if node_k.id == node_j.id:
return
edge_k_j = self._get_edge(node_k, node_j)
relset_k_j = self._compose(edge_k_i, edge_i_j.constraint)
debug(3, "{%s} * {%s} --> {%s} || %s "
% (edge_k_i.constraint.relset, edge_i_j.constraint.relset,
relset_k_j, edge_k_j.constraint))
if relset_k_j is not None:
self._combine(edge_k_j, relset_k_j,
edge_k_i.constraint, edge_i_j.constraint)
def _check_i_j_k(self, edge_i_j, edge_j_k, node_i, node_j):
"""Look at the i->j->k subgraph and check whether the new constraint in
Edge(i,j) allows you to derive something new by composition. The nodes
node_i and node_j could be derived from edge_i_j but are handed to this
function because they were already available and it saves a bit of time
this way."""
node_k = edge_j_k.get_node2()
if node_k.id == node_i.id:
return
edge_i_k = self._get_edge(node_i, node_k)
relset_i_k = self._compose(edge_i_j.constraint, edge_j_k)
debug(3, "{%s} * {%s} --> {%s} || %s "
% (edge_i_j.constraint.relset, edge_j_k.constraint.relset,
relset_i_k, edge_i_k.constraint))
if relset_i_k is not None:
self._combine(edge_i_k, relset_i_k,
edge_i_j.constraint, edge_j_k.constraint)
def _combine(self, edge, relset, c1, c2):
"""Compare the relation set on the edge to the relation set created by
composition. Creates the intersection of the relation sets and checks
the result: (i) inconsistency, (ii) more specific than relation set on
edge, or (iii) something else. The alrgument c1 and c2 are the
constraints that were composed to create relset and will be used to set
the history on a new constraint if it is created."""
edge_relset = edge.relset
intersection = intersect_relations(edge_relset, relset)
if intersection == '':
debug(4, "WARNING: found an inconsistency where it shouldn't be")
pass
elif intersection is None:
debug(4, "WARNING: intersection is None, this should not happen")
pass
elif edge_relset is None:
self._add_constraint_to_queue(edge, intersection, c1, c2)
elif len(intersection) < len(edge_relset):
self._add_constraint_to_queue(edge, intersection, c1, c2)
def _add_constraint_to_queue(self, edge, relset, c1, c2):
new_constraint = Constraint(edge.node1, relset, edge.node2,
cycle=self.cycle, source='closure',
history=(c1, c2))
self.queue.append(new_constraint)
debug(3, "ADD QUEUE %s " % new_constraint)
add_inverted = False
# Adding the inverted constraint should not be needed, except perhaps as
# a potential minor speed increase. As far I can see however, the method
# is actually slower when adding the inverse (about 20%), which is
# surprising. But the results are the same.
if add_inverted:
relset = invert_interval_relation(relset)
new_constraint2 = Constraint(edge.node2, relset, edge.node1,
cycle=self.cycle,
source='closure-inverted',
history=(c1, c2))
self.queue.append(new_constraint2)
debug(3, "ADD QUEUE %s " % new_constraint2)
def _intersect_constraints(self, edge, constraint):
"""Intersect the constraint that was just derived with the one already
on the edge. There are three cases: (1) the new constraint, if it is the
one originally handed to the propagate() function, introduces an
inconsistency; (2) the new constraint is identical to the one already
there and can be ignored; (3) the intersection of the new constraint
with the old constraint is the same as the old constraint; and (4) the
new constraint is more specific than the already existing
constraint. The method returns False in the first two cases and the
intersection in the last case."""
edge = self.edges[constraint.node1][constraint.node2]
new_relset = constraint.relset
existing_relset = edge.relset
intersection = intersect_relations(new_relset, existing_relset)
debug(2, "INTERSECT NEW {%s} WITH EXISTING {%s} --> {%s}"
% (constraint.relset, edge.relset, intersection))
if intersection == '':
status = 'INCONSISTENT'
logger.warn("Inconsistent new contraint: %s" % constraint)
logger.warn("Clashes with: [%s] (derived from %s)"
% (edge.constraint, edge.constraint.history_string()))
elif new_relset == existing_relset:
status = 'NEW=EXISTING'
elif intersection == existing_relset:
status = 'INTERSECTION=EXISTING'
else:
status = 'INTERSECTION-IS-MORE-SPECIFIC'
debug(2, "STATUS: %s" % status)
return (status, intersection)
def _compose(self, object1, object2):
"""Return the composition of the relation sets on the two objects. One
object is an edge, the other a Constraint. Once the relations
are retrieved from the objects all that's needed is a simple
lookup in the compositions table."""
rels1 = object1.relset
rels2 = object2.relset
return self.compositions.compose_rels(rels1, rels2)
def _add_constraint_to_edge(self, constraint, edge):
"""This method links a constraints to its edge by retrieving the edge
from the graph, adding the constraint to this edge, and setting the edge
attribute on the constraint."""
edge.add_constraint(constraint)
constraint.edge = edge
def _get_edge(self, node1, node2):
"""Return the edge from node1 to node2."""
return self.edges[node1.id][node2.id]
def get_edges(self):
"""Return all edges that have a constraint on them."""
edges = []
for n1 in self.edges.keys():
for n2 in self.edges[n1].keys():
edge = self.edges[n1][n2]
if n1 != n2 and edge.constraint:
edges.append(edge)
return edges
def _remove_disjunctions(self):
"""Remove all disjunctions from the graph, not used now but may come in
handy later."""
for edge in self.get_edges():
if edge.constraint:
if edge.constraint.is_disjunction():
edge.remove_constraint()
def _remove_derived_relations(self):
"""Remove all derived relations from the graph."""
for edge in self.get_edges():
if edge.is_derived():
edge.remove_constraint()
def _normalize_relations(self):
"""Remove all relations that are not in the set of normalized relations,
not used now but may come in handy later."""
for edge in self.get_edges():
if edge.constraint:
if not edge.constraint.has_normalized_relation():
edge.remove_constraint()
def pp_nodes(self):
"""Print all nodes with their edges_in and edges_out attributes to
standard output."""
ids = self.nodes.keys()
ids.sort(compare_id)
for id in ids:
self.nodes[id].pretty_print()
def pp_html(self, filename=None, filehandle=None, standalone=False):
"""Print the graph to an HTML table in filename."""
fh = open(filename, 'w') if filename else filehandle
if standalone:
html_graph_prefix(fh)
fh.write("<table cellpadding=0 cellspacing=0 border=0>\n")
fh.write("<tr><td>\n")
nodes = self.nodes.keys()
nodes.sort(compare_id)
self._html_nodes_table(fh, nodes)
fh.write("</td>\n\n")
fh.write("<td valign=top>\n")
self._html_added_table(fh)
fh.write("</td></tr>\n\n")
fh.write("</table>\n\n")
if standalone:
fh.write("</body>\n</html>\n\n")
def _html_nodes_table(self, fh, nodes):
fh.write("<table cellpadding=5 cellspacing=0 border=1>\n")
fh.write("\n<tr>\n\n")
fh.write(" <td> \n\n")
for identifier in nodes:
fh.write(" <td>%s\n" % identifier)
for id1 in nodes:
fh.write("\n\n<tr align=center>\n\n")
fh.write(" <td align=left>%s\n" % id1)
for id2 in nodes:
edge = self.edges[id1][id2]
rel = edge.relset
if rel is None:
rel = ' '
rel = abbreviate_convex_relation(rel)
rel = rel.replace('<', '<').replace(' ', ' ')
classes = []
if edge.constraint:
classes.append(edge.constraint.source)
if self.cycle == edge.constraint.cycle:
classes.append("cycle")
if id1 == id2:
classes.append("nocell")
# rel = ' '
classes = " class=\"%s\"" % ' '.join(classes)
fh.write(" <td width=25pt%s>%s\n" % (classes, rel))
fh.write("</table>\n\n")
def _html_added_table(self, fh):
fh.write("<table cellpadding=5 cellspacing=0 border=1>\n")
if self.added:
fh.write("<tr><td>added<td colspan=2>derived from\n")
for c in self.added:
fh.write("<tr>\n <td>%s</td>\n" % c)
if isinstance(c.history, tuple):
fh.write(" <td>%s\n" % str(c.history[0]))
fh.write(" <td>%s\n" % str(c.history[1]))
elif c.history.__class__.__name__ == 'Tag':
tlink = "TLINK(relType=%s)" % c.history.attrs.get('relType')
fh.write(" <td colspan=2>%s\n" % tlink)
elif c.history.__class__.__name__ == 'Constraint':
fh.write(" <td colspan=2>%s\n" % c.history)
else:
fh.write(" <td colspan=2> \n")
fh.write("</table>\n\n")
def debug(indent=0, str=''):
if DEBUG:
print ' ' * indent, str
| python |
import pytest
from reformat_gherkin.errors import DeserializeError, InvalidInput
from reformat_gherkin.parser import parse
def test_invalid_input(invalid_contents):
for content in invalid_contents:
with pytest.raises(InvalidInput):
parse(content)
def test_valid_input(valid_contents):
for content in valid_contents():
parse(content)
def test_parse_with_exception(mocker, valid_contents):
exception_message = "exception message"
mocker.patch(
"reformat_gherkin.parser.converter.structure",
side_effect=Exception(exception_message),
)
for content in valid_contents():
with pytest.raises(DeserializeError) as exc_info:
parse(content)
assert exception_message in str(exc_info.value)
| python |
from multio import asynclib
class API:
HOST = 'https://paste.myst.rs'
BETA_HOST = 'https://pmb.myst.rs'
API_VERSION = '2'
HTTP_ENDPOINT = f'{HOST}/api/v{API_VERSION}'
BETA_HTTP_ENDPOINT = f'{BETA_HOST}/api/v{API_VERSION}'
async def run_later(time, task):
await asynclib.sleep(time)
return await task
def spacify_string(s):
w = []
cur = ''
for c in s:
if c.isupper():
w.append(cur)
cur = ''
cur += c.lower()
else:
cur += c
w.append(cur)
return '_'.join(w)
| python |
#-*. coding: utf-8 -*-
## Copyright (c) 2008-2012, Noel O'Boyle; 2012, Adrià Cereto-Massagué
## All rights reserved.
##
## This file is part of Cinfony.
## The contents are covered by the terms of the GPL v2 license
## which is included in the file LICENSE_GPLv2.txt.
"""
pybel - A Cinfony module for accessing Open Babel
Global variables:
ob - the underlying SWIG bindings for Open Babel
informats - a dictionary of supported input formats
outformats - a dictionary of supported output formats
descs - a list of supported descriptors
fps - a list of supported fingerprint types
forcefields - a list of supported forcefields
"""
import sys
import math
import os.path
import tempfile
if sys.platform[:4] == "java":
import org.openbabel as ob
import java.lang.System
java.lang.System.loadLibrary("openbabel_java")
_obfuncs = ob.openbabel_java
_obconsts = ob.openbabel_javaConstants
import javax
elif sys.platform[:3] == "cli":
import System
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Windows.Forms import (
Application, DockStyle, Form, PictureBox, PictureBoxSizeMode
)
from System.Drawing import Image, Size
_obdotnet = os.environ["OBDOTNET"]
if _obdotnet[0] == '"': # Remove trailing quotes
_obdotnet = _obdotnet[1:-1]
clr.AddReferenceToFileAndPath(os.path.join(_obdotnet, "OBDotNet.dll"))
import OpenBabel as ob
_obfuncs = ob.openbabel_csharp
_obconsts = ob.openbabel_csharp
else:
import openbabel as ob
_obfuncs = _obconsts = ob
try:
import Tkinter as tk
import Image as PIL
import ImageTk as piltk
except ImportError: #pragma: no cover
tk = None
def _formatstodict(list):
if sys.platform[:4] == "java":
list = [list.get(i) for i in range(list.size())]
broken = [x.replace("[Read-only]", "").replace("[Write-only]","").split(" -- ") for x in list]
broken = [(x,y.strip()) for x,y in broken]
return dict(broken)
_obconv = ob.OBConversion()
_builder = ob.OBBuilder()
informats = _formatstodict(_obconv.GetSupportedInputFormat())
"""A dictionary of supported input formats"""
outformats = _formatstodict(_obconv.GetSupportedOutputFormat())
"""A dictionary of supported output formats"""
def _getplugins(findplugin, names):
plugins = dict([(x, findplugin(x)) for x in names if findplugin(x)])
return plugins
def _getpluginnames(ptype):
if sys.platform[:4] == "cli":
plugins = ob.VectorString()
else:
plugins = ob.vectorString()
ob.OBPlugin.ListAsVector(ptype, None, plugins)
if sys.platform[:4] == "java":
plugins = [plugins.get(i) for i in range(plugins.size())]
return [x.split()[0] for x in plugins]
descs = _getpluginnames("descriptors")
"""A list of supported descriptors"""
_descdict = _getplugins(ob.OBDescriptor.FindType, descs)
fps = [_x.lower() for _x in _getpluginnames("fingerprints")]
"""A list of supported fingerprint types"""
_fingerprinters = _getplugins(ob.OBFingerprint.FindFingerprint, fps)
forcefields = [_x.lower() for _x in _getpluginnames("forcefields")]
"""A list of supported forcefields"""
_forcefields = _getplugins(ob.OBForceField.FindType, forcefields)
operations = _getpluginnames("ops")
"""A list of supported operations"""
_operations = _getplugins(ob.OBOp.FindType, operations)
def readfile(format, filename, opt=None):
"""Iterate over the molecules in a file.
Required parameters:
format - see the informats variable for a list of available
input formats
filename
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
You can access the first molecule in a file using the next() method
of the iterator (or the next() keyword in Python 3):
mol = readfile("smi", "myfile.smi").next() # Python 2
mol = next(readfile("smi", "myfile.smi")) # Python 3
You can make a list of the molecules in a file using:
mols = list(readfile("smi", "myfile.smi"))
You can iterate over the molecules in a file as shown in the
following code snippet:
>>> atomtotal = 0
>>> for mol in readfile("sdf", "head.sdf"):
... atomtotal += len(mol.atoms)
...
>>> print(atomtotal)
43
"""
if opt == None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
for k, v in opt.items():
if v == None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
if not os.path.isfile(filename):
raise IOError("No such file: '%s'" % filename)
def filereader():
obmol = ob.OBMol()
notatend = obconversion.ReadFile(obmol,filename)
while notatend:
yield Molecule(obmol)
obmol = ob.OBMol()
notatend = obconversion.Read(obmol)
return filereader()
def readstring(format, string, opt=None):
"""Read in a molecule from a string.
Required parameters:
format - see the informats variable for a list of available
input formats
string
Optional parameters:
opt - a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Example:
>>> input = "C1=CC=CS1"
>>> mymol = readstring("smi", input)
>>> len(mymol.atoms)
5
"""
if opt == None:
opt = {}
obmol = ob.OBMol()
obconversion = ob.OBConversion()
formatok = obconversion.SetInFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v == None:
obconversion.AddOption(k, obconversion.INOPTIONS)
else:
obconversion.AddOption(k, obconversion.INOPTIONS, str(v))
success = obconversion.ReadString(obmol, string)
if not success:
raise IOError("Failed to convert '%s' to format '%s'" % (
string, format))
return Molecule(obmol)
class Outputfile(object):
"""Represent a file to which *output* is to be sent.
Although it's possible to write a single molecule to a file by
calling the write() method of a molecule, if multiple molecules
are to be written to the same file you should use the Outputfile
class.
Required parameters:
format - see the outformats variable for a list of available
output formats
filename
Optional parameters:
overwrite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format-specific options
For format options with no parameters, specify the
value as None.
Methods:
write(molecule)
close()
"""
def __init__(self, format, filename, overwrite=False, opt=None):
if opt == None:
opt = {}
self.format = format
self.filename = filename
if not overwrite and os.path.isfile(self.filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % self.filename)
self.obConversion = ob.OBConversion()
formatok = self.obConversion.SetOutFormat(self.format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v == None:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS)
else:
self.obConversion.AddOption(k, self.obConversion.OUTOPTIONS, str(v))
self.total = 0 # The total number of molecules written to the file
def write(self, molecule):
"""Write a molecule to the output file.
Required parameters:
molecule
"""
if not self.filename:
raise IOError("Outputfile instance is closed.")
if self.total==0:
self.obConversion.WriteFile(molecule.OBMol, self.filename)
else:
self.obConversion.Write(molecule.OBMol)
self.total += 1
def close(self):
"""Close the Outputfile to further writing."""
self.obConversion.CloseOutFile()
self.filename = None
class Molecule(object):
"""Represent a Pybel Molecule.
Required parameter:
OBMol -- an Open Babel OBMol or any type of cinfony Molecule
Attributes:
atoms, charge, conformers, data, dim, energy, exactmass, formula,
molwt, spin, sssr, title, unitcell.
(refer to the Open Babel library documentation for more info).
Methods:
addh(), calcfp(), calcdesc(), draw(), localopt(), make3D(), removeh(),
write()
The underlying Open Babel molecule can be accessed using the attribute:
OBMol
"""
_cinfony = True
def __init__(self, OBMol):
if hasattr(OBMol, "_cinfony"):
a, b = OBMol._exchange
if a == 0:
mol = readstring("smi", b)
else:
mol = readstring("mol", b)
OBMol = mol.OBMol
self.OBMol = OBMol
@property
def atoms(self):
return [ Atom(self.OBMol.GetAtom(i+1)) for i in range(self.OBMol.NumAtoms()) ]
@property
def charge(self):
return self.OBMol.GetTotalCharge()
@property
def conformers(self):
return self.OBMol.GetConformers()
@property
def data(self):
return MoleculeData(self.OBMol)
@property
def dim(self):
return self.OBMol.GetDimension()
@property
def energy(self):
return self.OBMol.GetEnergy()
@property
def exactmass(self):
return self.OBMol.GetExactMass()
@property
def formula(self):
return self.OBMol.GetFormula()
@property
def molwt(self):
return self.OBMol.GetMolWt()
@property
def spin(self):
return self.OBMol.GetTotalSpinMultiplicity()
@property
def sssr(self):
return self.OBMol.GetSSSR()
def _gettitle(self):
return self.OBMol.GetTitle()
def _settitle(self, val):
self.OBMol.SetTitle(val)
title = property(_gettitle, _settitle)
@property
def unitcell(self):
unitcell_index = _obconsts.UnitCell
if sys.platform[:3] == "cli":
unitcell_index = System.UInt32(unitcell_index)
unitcell = self.OBMol.GetData(unitcell_index)
if unitcell:
if sys.platform[:3] != "cli":
return _obfuncs.toUnitCell(unitcell)
else:
return unitcell.Downcast[ob.OBUnitCell]()
else:
raise AttributeError("Molecule has no attribute 'unitcell'")
@property
def _exchange(self):
if self.OBMol.HasNonZeroCoords():
return (1, self.write("mol"))
else:
return (0, self.write("can").split()[0])
def __iter__(self):
"""Iterate over the Atoms of the Molecule.
This allows constructions such as the following:
for atom in mymol:
print(atom)
"""
return iter(self.atoms)
def calcdesc(self, descnames=[]):
"""Calculate descriptor values.
Optional parameter:
descnames -- a list of names of descriptors
If descnames is not specified, all available descriptors are
calculated. See the descs variable for a list of available
descriptors.
"""
if not descnames:
descnames = descs
ans = {}
for descname in descnames:
try:
desc = _descdict[descname]
except KeyError:
raise ValueError("%s is not a recognised Open Babel descriptor type" % descname)
ans[descname] = desc.Predict(self.OBMol)
return ans
def calcfp(self, fptype="FP2"):
"""Calculate a molecular fingerprint.
Optional parameters:
fptype -- the fingerprint type (default is "FP2"). See the
fps variable for a list of of available fingerprint
types.
"""
if sys.platform[:3] == "cli":
fp = ob.VectorUInt()
else:
fp = ob.vectorUnsignedInt()
fptype = fptype.lower()
try:
fingerprinter = _fingerprinters[fptype]
except KeyError:
raise ValueError("%s is not a recognised Open Babel Fingerprint type" % fptype)
fingerprinter.GetFingerprint(self.OBMol, fp)
return Fingerprint(fp)
def write(self, format="smi", filename=None, overwrite=False, opt=None):
"""Write the molecule to a file or return a string.
Optional parameters:
format -- see the informats variable for a list of available
output formats (default is "smi")
filename -- default is None
overwite -- if the output file already exists, should it
be overwritten? (default is False)
opt -- a dictionary of format specific options
For format options with no parameters, specify the
value as None.
If a filename is specified, the result is written to a file.
Otherwise, a string is returned containing the result.
To write multiple molecules to the same file you should use
the Outputfile class.
"""
if opt == None:
opt = {}
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat(format)
if not formatok:
raise ValueError("%s is not a recognised Open Babel format" % format)
for k, v in opt.items():
if v == None:
obconversion.AddOption(k, obconversion.OUTOPTIONS)
else:
obconversion.AddOption(k, obconversion.OUTOPTIONS, str(v))
if filename:
if not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
obconversion.WriteFile(self.OBMol,filename)
obconversion.CloseOutFile()
else:
return obconversion.WriteString(self.OBMol)
def localopt(self, forcefield="mmff94", steps=500):
"""Locally optimize the coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 500
If the molecule does not have any coordinates, make3D() is
called before the optimization. Note that the molecule needs
to have explicit hydrogens. If not, call addh().
"""
forcefield = forcefield.lower()
if self.dim != 3:
self.make3D(forcefield)
ff = _forcefields[forcefield]
success = ff.Setup(self.OBMol)
if not success:
return
ff.SteepestDescent(steps)
ff.GetCoordinates(self.OBMol)
## def globalopt(self, forcefield="MMFF94", steps=1000):
## if not (self.OBMol.Has2D() or self.OBMol.Has3D()):
## self.make3D()
## self.localopt(forcefield, 250)
## ff = _forcefields[forcefield]
## numrots = self.OBMol.NumRotors()
## if numrots > 0:
## ff.WeightedRotorSearch(numrots, int(math.log(numrots + 1) * steps))
## ff.GetCoordinates(self.OBMol)
def make3D(self, forcefield = "mmff94", steps = 50):
"""Generate 3D coordinates.
Optional parameters:
forcefield -- default is "mmff94". See the forcefields variable
for a list of available forcefields.
steps -- default is 50
Once coordinates are generated, hydrogens are added and a quick
local optimization is carried out with 50 steps and the
MMFF94 forcefield. Call localopt() if you want
to improve the coordinates further.
"""
forcefield = forcefield.lower()
_builder.Build(self.OBMol)
self.addh()
self.localopt(forcefield, steps)
def addh(self):
"""Add hydrogens."""
self.OBMol.AddHydrogens()
def removeh(self):
"""Remove hydrogens."""
self.OBMol.DeleteHydrogens()
def __str__(self):
return self.write()
def draw(self, show=True, filename=None, update=False, usecoords=False):
"""Create a 2D depiction of the molecule.
Optional parameters:
show -- display on screen (default is True)
filename -- write to file (default is None)
update -- update the coordinates of the atoms to those
determined by the structure diagram generator
(default is False)
usecoords -- don't calculate 2D coordinates, just use
the current coordinates (default is False)
Tkinter and Python Imaging Library are required for image display.
"""
obconversion = ob.OBConversion()
formatok = obconversion.SetOutFormat("_png2")
if not formatok:
errormessage = ("PNG depiction support not found. You should compile "
"Open Babel with support for Cairo. See installation "
"instructions for more information.")
raise ImportError(errormessage)
# Need to copy to avoid removing hydrogens from self
workingmol = Molecule(ob.OBMol(self.OBMol))
workingmol.removeh()
if not usecoords:
_operations['gen2D'].Do(workingmol.OBMol)
if update == True:
if workingmol.OBMol.NumAtoms() != self.OBMol.NumAtoms():
errormessage = ("It is not possible to update the original molecule "
"with the calculated coordinates, as the original "
"molecule contains explicit hydrogens for which no "
"coordinates have been calculated.")
raise RuntimeError(errormessage)
else:
for i in range(workingmol.OBMol.NumAtoms()):
self.OBMol.GetAtom(i + 1).SetVector(workingmol.OBMol.GetAtom(i + 1).GetVector())
if filename:
filedes = None
else:
if sys.platform[:3] == "cli" and show:
errormessage = ("It is only possible to show the molecule if you "
"provide a filename. The reason for this is that I kept "
"having problems when using temporary files.")
raise RuntimeError(errormessage)
filedes, filename = tempfile.mkstemp()
workingmol.write("_png2", filename=filename, overwrite=True)
if show:
if sys.platform[:4] == "java":
image = javax.imageio.ImageIO.read(java.io.File(filename))
frame = javax.swing.JFrame(visible=1)
frame.getContentPane().add(javax.swing.JLabel(javax.swing.ImageIcon(image)))
frame.setSize(300,300)
frame.setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE)
frame.show()
elif sys.platform[:3] == "cli":
form = _MyForm()
form.setup(filename, self.title)
Application.Run(form)
else:
if not tk:
errormessage = ("Tkinter or Python Imaging "
"Library not found, but is required for image "
"display. See installation instructions for "
"more information.")
raise ImportError(errormessage)
root = tk.Tk()
root.title((hasattr(self, "title") and self.title)
or self.__str__().rstrip())
frame = tk.Frame(root, colormap="new", visual='truecolor').pack()
image = PIL.open(filename)
imagedata = piltk.PhotoImage(image)
label = tk.Label(frame, image=imagedata).pack()
quitbutton = tk.Button(root, text="Close", command=root.destroy).pack(fill=tk.X)
root.mainloop()
if filedes:
os.close(filedes)
os.remove(filename)
class Atom(object):
"""Represent a Pybel atom.
Required parameter:
OBAtom -- an Open Babel OBAtom
Attributes:
atomicmass, atomicnum, cidx, coords, coordidx, exactmass,
formalcharge, heavyvalence, heterovalence, hyb, idx,
implicitvalence, isotope, partialcharge, spin, type,
valence, vector.
(refer to the Open Babel library documentation for more info).
The original Open Babel atom can be accessed using the attribute:
OBAtom
"""
def __init__(self, OBAtom):
self.OBAtom = OBAtom
@property
def coords(self):
return (self.OBAtom.GetX(), self.OBAtom.GetY(), self.OBAtom.GetZ())
@property
def atomicmass(self):
return self.OBAtom.GetAtomicMass()
@property
def atomicnum(self):
return self.OBAtom.GetAtomicNum()
@property
def cidx(self):
return self.OBAtom.GetCIdx()
@property
def coordidx(self):
return self.OBAtom.GetCoordinateIdx()
@property
def exactmass(self):
return self.OBAtom.GetExactMass()
@property
def formalcharge(self):
return self.OBAtom.GetFormalCharge()
@property
def heavyvalence(self):
return self.OBAtom.GetHvyValence()
@property
def heterovalence(self):
return self.OBAtom.GetHeteroValence()
@property
def hyb(self):
return self.OBAtom.GetHyb()
@property
def idx(self):
return self.OBAtom.GetIdx()
@property
def implicitvalence(self):
return self.OBAtom.GetImplicitValence()
@property
def isotope(self):
return self.OBAtom.GetIsotope()
@property
def partialcharge(self):
return self.OBAtom.GetPartialCharge()
@property
def spin(self):
return self.OBAtom.GetSpinMultiplicity()
@property
def type(self):
return self.OBAtom.GetType()
@property
def valence(self):
return self.OBAtom.GetValence()
@property
def vector(self):
return self.OBAtom.GetVector()
def __str__(self):
c = self.coords
return "Atom: %d (%.2f %.2f %.2f)" % (self.atomicnum, c[0], c[1], c[2])
def _findbits(fp, bitsperint):
"""Find which bits are set in a list/vector.
This function is used by the Fingerprint class.
>>> _findbits([13, 71], 8)
[1, 3, 4, 9, 10, 11, 15]
"""
ans = []
start = 1
if sys.platform[:4] == "java":
fp = [fp.get(i) for i in range(fp.size())]
for x in fp:
i = start
while x > 0:
if x % 2:
ans.append(i)
x >>= 1
i += 1
start += bitsperint
return ans
class Fingerprint(object):
"""A Molecular Fingerprint.
Required parameters:
fingerprint -- a vector calculated by OBFingerprint.FindFingerprint()
Attributes:
fp -- the underlying fingerprint object
bits -- a list of bits set in the Fingerprint
Methods:
The "|" operator can be used to calculate the Tanimoto coeff. For example,
given two Fingerprints 'a', and 'b', the Tanimoto coefficient is given by:
tanimoto = a | b
"""
def __init__(self, fingerprint):
self.fp = fingerprint
def __or__(self, other):
return ob.OBFingerprint.Tanimoto(self.fp, other.fp)
@property
def bits(self):
return _findbits(self.fp, ob.OBFingerprint.Getbitsperint())
def __str__(self):
fp = self.fp
if sys.platform[:4] == "java":
fp = [self.fp.get(i) for i in range(self.fp.size())]
return ", ".join([str(x) for x in fp])
class Smarts(object):
"""A Smarts Pattern Matcher
Required parameters:
smartspattern
Methods:
findall(molecule)
Example:
>>> mol = readstring("smi","CCN(CC)CC") # triethylamine
>>> smarts = Smarts("[#6][#6]") # Matches an ethyl group
>>> print(smarts.findall(mol))
[(1, 2), (4, 5), (6, 7)]
The numbers returned are the indices (starting from 1) of the atoms
that match the SMARTS pattern. In this case, there are three matches
for each of the three ethyl groups in the molecule.
"""
def __init__(self,smartspattern):
"""Initialise with a SMARTS pattern."""
self.obsmarts = ob.OBSmartsPattern()
success = self.obsmarts.Init(smartspattern)
if not success:
raise IOError("Invalid SMARTS pattern")
def findall(self,molecule):
"""Find all matches of the SMARTS pattern to a particular molecule.
Required parameters:
molecule
"""
self.obsmarts.Match(molecule.OBMol)
vector = self.obsmarts.GetUMapList()
if sys.platform[:4] == "java":
vector = [vector.get(i) for i in range(vector.size())]
return list(vector)
class MoleculeData(object):
"""Store molecule data in a dictionary-type object
Required parameters:
obmol -- an Open Babel OBMol
Methods and accessor methods are like those of a dictionary except
that the data is retrieved on-the-fly from the underlying OBMol.
Example:
>>> mol = readfile("sdf", 'head.sdf').next() # Python 2
>>> # mol = next(readfile("sdf", 'head.sdf')) # Python 3
>>> data = mol.data
>>> print(data)
{'Comment': 'CORINA 2.61 0041 25.10.2001', 'NSC': '1'}
>>> print(len(data), data.keys(), data.has_key("NSC"))
2 ['Comment', 'NSC'] True
>>> print(data['Comment'])
CORINA 2.61 0041 25.10.2001
>>> data['Comment'] = 'This is a new comment'
>>> for k,v in data.items():
... print(k, "-->", v)
Comment --> This is a new comment
NSC --> 1
>>> del data['NSC']
>>> print(len(data), data.keys(), data.has_key("NSC"))
1 ['Comment'] False
"""
def __init__(self, obmol):
self._mol = obmol
def _data(self):
data = self._mol.GetData()
if sys.platform[:4] == "java":
data = [data.get(i) for i in range(data.size())]
answer = [x for x in data if
x.GetDataType()==_obconsts.PairData or
x.GetDataType()==_obconsts.CommentData]
if sys.platform[:3] != "cli":
answer = [_obfuncs.toPairData(x) for x in answer]
return answer
def _testforkey(self, key):
if not key in self:
raise KeyError("'%s'" % key)
def keys(self):
return [x.GetAttribute() for x in self._data()]
def values(self):
return [x.GetValue() for x in self._data()]
def items(self):
return iter(zip(self.keys(), self.values()))
def __iter__(self):
return iter(self.keys())
def iteritems(self): # Can remove for Python 3
return self.items()
def __len__(self):
return len(self._data())
def __contains__(self, key):
return self._mol.HasData(key)
def __delitem__(self, key):
self._testforkey(key)
self._mol.DeleteData(self._mol.GetData(key))
def clear(self):
for key in self:
del self[key]
def has_key(self, key):
return key in self
def update(self, dictionary):
for k, v in dictionary.items():
self[k] = v
def __getitem__(self, key):
self._testforkey(key)
answer = self._mol.GetData(key)
if sys.platform[:3] != "cli":
answer = _obfuncs.toPairData(answer)
return answer.GetValue()
def __setitem__(self, key, value):
if key in self:
if sys.platform[:3] != "cli":
pairdata = _obfuncs.toPairData(self._mol.GetData(key))
else:
pairdata = self._mol.GetData(key).Downcast[ob.OBPairData]()
pairdata.SetValue(str(value))
else:
pairdata = ob.OBPairData()
pairdata.SetAttribute(key)
pairdata.SetValue(str(value))
self._mol.CloneData(pairdata)
def __repr__(self):
return dict(self.items()).__repr__()
if sys.platform[:3] == "cli":
class _MyForm(Form):
def __init__(self):
Form.__init__(self)
def setup(self, filename, title):
# adjust the form's client area size to the picture
self.ClientSize = Size(300, 300)
self.Text = title
self.filename = filename
self.image = Image.FromFile(self.filename)
pictureBox = PictureBox()
# this will fit the image to the form
pictureBox.SizeMode = PictureBoxSizeMode.StretchImage
pictureBox.Image = self.image
# fit the picture box to the frame
pictureBox.Dock = DockStyle.Fill
self.Controls.Add(pictureBox)
self.Show()
if __name__=="__main__": #pragma: no cover
import doctest
doctest.testmod(verbose=True)
| python |
import argparse
import os
import sys
import requests
# Globals
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
APP_DIR = 'app'
APP_FILES = ['__init__.py', 'config.py', 'run.py', 'create_db.py', 'shell.py']
STATIC_DIR = 'static'
STATIC_SUBDIRS = ['css', 'fonts', 'img', 'js']
TEMPLATE_DIR = 'templates'
TEMPLATE_FILES = ['base.html', 'macros.html']
VIEWS_DIR = 'views'
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--init",
help="Initialize a project", action="store_true")
parser.add_argument("-n", "--name",
help="Project Name", nargs='+')
parser.add_argument("-u", "--ui",
help="UI Library")
parser.add_argument("-a", "--auth",
help="Authentication System")
parser.add_argument("-d", "--db",
help="Database Backend")
args = parser.parse_args()
# Create a new project
if args.init:
if not args.name:
sys.exit('You must have a project name')
project_dir = '{}/{}'.format(BASE_DIR, '-'.join(args.name))
if os.path.exists(project_dir):
sys.exit('Project Directory already exists')
else:
os.makedirs(project_dir)
os.makedirs('/'.join([project_dir, APP_DIR]))
os.makedirs('/'.join([project_dir, APP_DIR, TEMPLATE_DIR]))
os.makedirs('/'.join([project_dir, APP_DIR, VIEWS_DIR]))
os.makedirs('/'.join([project_dir, APP_DIR, STATIC_DIR]))
for sub in STATIC_SUBDIRS:
os.makedirs('/'.join([project_dir, APP_DIR, STATIC_DIR, sub]))
| python |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys
_UINT8_TO_CHAR = [
'.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.',
'.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.',
' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', '.',
]
if __name__ == '__main__':
print('%s does not provide main()' % __file__)
sys.exit(1)
def _hex_str(byte):
return '%02X' % (int(byte) & 0xFF)
def _hex_char(byte):
byte = int(byte) & 0xFF
if byte > 0x7F:
return '.'
else:
return _UINT8_TO_CHAR[byte]
def dump_bytes(data):
'dump data in a readable string table'
if isinstance(data, bytes) is False:
return ''
lines = []
data_len = len(data)
lines.append('data length %d' % data_len)
lines.append(
'------ 0 1 2 3 4 5 6 7 | 8 9 A B C D E F 01234567 89ABCDEF')
for index in range(0, data_len, 16):
remain_len = data_len - index
if remain_len >= 16:
string = '0x%04X %s %s %s %s %s %s %s %s | %s %s %s %s %s %s %s %s %s%s%s%s%s%s%s%s %s%s%s%s%s%s%s%s' % (
index,
_hex_str(data[index + 0]), _hex_str(data[index + 1]), _hex_str(data[index + 2]), _hex_str(data[index + 3]),
_hex_str(data[index + 4]), _hex_str(data[index + 5]), _hex_str(data[index + 6]), _hex_str(data[index + 7]),
_hex_str(data[index + 8]), _hex_str(data[index + 9]), _hex_str(data[index + 10]), _hex_str(data[index + 11]),
_hex_str(data[index + 12]), _hex_str(data[index + 13]), _hex_str(data[index + 14]), _hex_str(data[index + 15]),
_hex_char(data[index + 0]), _hex_char(data[index + 1]), _hex_char(data[index + 2]), _hex_char(data[index + 3]),
_hex_char(data[index + 4]), _hex_char(data[index + 5]), _hex_char(data[index + 6]), _hex_char(data[index + 7]),
_hex_char(data[index + 8]), _hex_char(data[index + 9]), _hex_char(data[index + 10]), _hex_char(data[index + 11]),
_hex_char(data[index + 12]), _hex_char(data[index + 13]), _hex_char(data[index + 14]), _hex_char(data[index + 15]),
)
lines.append(string)
else:
this_line = []
this_line.append('0x%04X ' % index)
for col in range(index, data_len):
this_line.append('%s ' % _hex_str(data[col]))
if remain_len > 8:
this_line.insert(9, '| ')
this_line.append(' ' * (16 - remain_len))
else:
this_line.append(' ' * (16 - remain_len))
this_line.append(' ')
print('remain_len = %d' % remain_len)
# this_line.append(' ')
this_line.append(' ')
for col in range(index, data_len):
this_line.append(_hex_char(data[col]))
if col == index + 7:
this_line.append(' ')
lines.append(''.join(this_line))
return '\n'.join(lines)
| python |
def checkorders(orders: [str]) -> [bool]:
results = []
for i in orders:
flag = True
stock = []
for j in i:
if j in '([{':
stock.append(j)
else:
if stock == []:
flag = False
break
symbol = stock.pop()
if not match(symbol, j):
flag = False
break
if stock != []:
flag = False
results.append(flag)
return results
def match(opens,closers):
return '([{'.index(opens) == ')]}'.index(closers)
print(checkorders(['()','(','{}[]','[][][]','[{]{]']))
| python |
from unittest import TestCase
import requests_mock
import urllib.parse
from .fixtures import TOKEN
from typeform import Typeform
from typeform.constants import API_BASE_URL
class FormsTestCase(TestCase):
def setUp(self):
self.forms = Typeform(TOKEN).forms
form = self.forms.create({
'title': 'title'
})
self.formID = form.get('id')
def tearDown(self):
list = self.forms.list()
forms = list.get('items', [])
for form in forms:
self.forms.delete(form.get('id'))
def test_forms_returns_method_and_path(self):
"""
get all forms has the correct method and path
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms', json={})
self.forms.list()
history = m.request_history
self.assertEqual(history[0].url, API_BASE_URL+'/forms')
self.assertEqual(history[0].method, 'GET')
def test_forms_correct_params(self):
"""
paramters are sent correctly
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms', json={})
self.forms.list(page=2, pageSize=10, search='hola', workspaceId='abc')
history = m.request_history
query = history[0].url.split('?')[1]
params = dict(urllib.parse.parse_qs(query))
self.assertEqual(params.pop('page')[0], '2')
self.assertEqual(params.pop('page_size')[0], '10')
self.assertEqual(params.pop('search')[0], 'hola')
self.assertEqual(params.pop('workspace_id')[0], 'abc')
def test_forms_get_correct_id(self):
"""
get sends the correct UID
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms/'+self.formID, json={})
self.forms.get(self.formID)
history = m.request_history
self.assertEqual(history[0].url, API_BASE_URL+'/forms/'+self.formID)
def test_forms_get_sets_get_method(self):
"""
get sets get method
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms/'+self.formID, json={})
self.forms.get(self.formID)
history = m.request_history
self.assertEqual(history[0].method, 'GET')
def test_forms_update_updates_a_form(self):
"""
update updates a form
"""
title = 'hola'
result = self.forms.update(self.formID, data={
'title': title
})
self.assertEqual(result.get('title'), title)
def test_forms_update_as_patch_updates_a_form(self):
"""
update as patch updates a form
"""
result = self.forms.update(self.formID, patch=True, data=[{
'op': 'replace',
'path': '/title',
'value': 'aloha'
}])
self.assertEqual(result, 'OK')
def test_forms_update_sets_put_method_in_request_by_default(self):
"""
update sets put method in request by default
"""
with requests_mock.mock() as m:
m.put(API_BASE_URL+'/forms/'+self.formID, json={})
self.forms.update(self.formID, data={
'title': 'title'
})
history = m.request_history
self.assertEqual(history[0].method, 'PUT')
def test_forms_delete_removes_the_correct_uid_form(self):
"""
delete removes the correct uid form
"""
get1Result = self.forms.get(self.formID)
self.assertEqual(get1Result.get('id'), self.formID)
self.forms.delete(self.formID)
try:
self.forms.get(self.formID)
except Exception as err:
error = str(err)
self.assertEqual(error, 'Non existing form with uid %s' % self.formID)
def test_forms_create_has_the_correct_path_and_method(self):
"""
create has the correct path and method
"""
with requests_mock.mock() as m:
m.post(API_BASE_URL+'/forms', json={})
self.forms.create({
'title': 'hola'
})
history = m.request_history
self.assertEqual(history[0].method, 'POST')
self.assertEqual(history[0].url, API_BASE_URL+'/forms')
def test_forms_create_creates_a_new_form(self):
"""
create creates a new form
"""
createResult = self.forms.create({
'title': 'hola'
})
formID = createResult.get('id')
getResult = self.forms.get(formID)
self.assertIsNone(createResult.get('code', None))
self.assertEqual(getResult.get('id'), formID)
def test_forms_get_messages_has_the_correct_path_and_method(self):
"""
get messages has the correct path and method
"""
with requests_mock.mock() as m:
m.get(API_BASE_URL+'/forms/'+self.formID+'/messages', json={})
self.forms.messages.get(self.formID)
history = m.request_history
self.assertEqual(history[0].method, 'GET')
self.assertEqual(history[0].url, API_BASE_URL+'/forms/'+self.formID+'/messages')
def test_forms_update_messages_has_the_correct_path_and_method(self):
"""
update messages has the correct path and method
"""
with requests_mock.mock() as m:
m.put(API_BASE_URL+'/forms/'+self.formID+'/messages')
self.forms.messages.update(self.formID)
history = m.request_history
self.assertEqual(history[0].method, 'PUT')
self.assertEqual(history[0].url, API_BASE_URL+'/forms/'+self.formID+'/messages')
| python |
# libraries
import pandas as pd
import yaml as yaml
from google.cloud import storage
from os.path import dirname, abspath
# utils
from utils import upload_local_file_to_gcp_storage_bucket, df_to_gcp_csv
# set project directory
project_directory = dirname(dirname(abspath("__file__")))
print("Processing : Loading configuration file")
config = yaml.safe_load(open(project_directory + "/config/config.yaml"))
print("Processing : Set Configuration parameters")
storage_key = project_directory + config["parameters"]["storage_service_account_key"]
data_file = project_directory + config["parameters"]["data_source"]
bucket = config["parameters"]["bucket_source"]
blob_name = config["parameters"]["blob_source"]
print("Processing : Set storage client")
storage_client = storage.Client.from_service_account_json(storage_key)
print("Processing : upload file")
upload_local_file_to_gcp_storage_bucket(storage_client, bucket, blob_name, data_file)
print("Processing : upload from pandas dataframe")
df = pd.read_csv(data_file)
df_to_gcp_csv(
storage_client,
df,
bucket=bucket,
blob_name=blob_name,
source_file_name=blob_name,
)
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a sysroot tarball for building a specific package.
Meant for use after setup_board and build_packages have been run.
"""
from __future__ import print_function
import os
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import commandline
from chromite.lib import osutils
from chromite.lib import sudo
from chromite.lib import sysroot_lib
DEFAULT_NAME = 'sysroot_%(package)s.tar.xz'
PACKAGE_SEPARATOR = '/'
SYSROOT = 'sysroot'
def ParseCommandLine(argv):
"""Parse args, and run environment-independent checks."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', required=True,
help=('The board to generate the sysroot for.'))
parser.add_argument('--package', required=True,
help=('The packages to generate the sysroot for.'))
parser.add_argument('--deps-only', action='store_true',
default=False,
help='Build dependencies only.')
parser.add_argument('--out-dir', type='path', required=True,
help='Directory to place the generated tarball.')
parser.add_argument('--out-file', default=DEFAULT_NAME,
help='The name to give to the tarball. '
'Defaults to %(default)s.')
options = parser.parse_args(argv)
options.out_file %= {
'package': options.package.split()[0].replace(PACKAGE_SEPARATOR, '_'),
}
return options
class GenerateSysroot(object):
"""Wrapper for generation functionality."""
PARALLEL_EMERGE = os.path.join(constants.CHROMITE_BIN_DIR, 'parallel_emerge')
def __init__(self, sysroot, options):
"""Initialize
Args:
sysroot: Path to sysroot.
options: Parsed options.
"""
self.sysroot = sysroot
self.options = options
self.extra_env = {'ROOT': self.sysroot, 'USE': os.environ.get('USE', '')}
def _Emerge(self, *args, **kwargs):
"""Emerge the given packages using parallel_emerge."""
cmd = [self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--usepkgonly', '--noreplace'] + list(args)
kwargs.setdefault('extra_env', self.extra_env)
cros_build_lib.SudoRunCommand(cmd, **kwargs)
def _InstallToolchain(self):
# Create the sysroot's config.
sysroot = sysroot_lib.Sysroot(self.sysroot)
sysroot.WriteConfig(sysroot.GenerateBoardConfig(self.options.board))
cros_build_lib.RunCommand(
[os.path.join(constants.CROSUTILS_DIR, 'install_toolchain'),
'--noconfigure', '--sysroot', self.sysroot])
def _InstallKernelHeaders(self):
self._Emerge('sys-kernel/linux-headers')
def _InstallBuildDependencies(self):
# Calculate buildtime deps that are not runtime deps.
raw_sysroot = cros_build_lib.GetSysroot(board=self.options.board)
packages = []
if not self.options.deps_only:
packages = self.options.package.split()
else:
for pkg in self.options.package.split():
cmd = ['qdepends', '-q', '-C', pkg]
output = cros_build_lib.RunCommand(
cmd, extra_env={'ROOT': raw_sysroot}, capture_output=True).output
if output.count('\n') > 1:
raise AssertionError('Too many packages matched for given pattern')
# qdepend outputs "package: deps", so only grab the deps.
deps = output.partition(':')[2].split()
packages.extend(deps)
# Install the required packages.
if packages:
self._Emerge(*packages)
def _CreateTarball(self):
target = os.path.join(self.options.out_dir, self.options.out_file)
cros_build_lib.CreateTarball(target, self.sysroot, sudo=True)
def Perform(self):
"""Generate the sysroot."""
self._InstallToolchain()
self._InstallKernelHeaders()
self._InstallBuildDependencies()
self._CreateTarball()
def FinishParsing(options):
"""Run environment dependent checks on parsed args."""
target = os.path.join(options.out_dir, options.out_file)
if os.path.exists(target):
cros_build_lib.Die('Output file %r already exists.' % target)
if not os.path.isdir(options.out_dir):
cros_build_lib.Die(
'Non-existent directory %r specified for --out-dir' % options.out_dir)
def main(argv):
options = ParseCommandLine(argv)
FinishParsing(options)
cros_build_lib.AssertInsideChroot()
with sudo.SudoKeepAlive(ttyless_sudo=False):
with osutils.TempDir(set_global=True, sudo_rm=True) as tempdir:
sysroot = os.path.join(tempdir, SYSROOT)
os.mkdir(sysroot)
GenerateSysroot(sysroot, options).Perform()
| python |
valor_do_produto = float(input('Digite o valor do produto? R$ '))
desconto = int(input('Qual será o desconto? '))
desconto_aplicado = valor_do_produto - ((valor_do_produto * desconto)/100)
print('O produto que custava R${:.2f}, na promoção de {}% custará: R$ {:.2f}'.format(valor_do_produto,desconto, desconto_aplicado)) | python |
import collections
import statistics
import time
class Statistics:
"""Calculate mathematical statistics of numerical values.
:ivar ~.sum: sum of all values
:ivar ~.min: minimum of all values
:ivar ~.max: maximum of all values
:ivar ~.mean: mean of all values
:ivar ~.median: median of all values
:ivar ~.last_value: last added value
:ivar ~.last_change: timestamp the last time a value was added
"""
def __init__(self, max_age=None, max_samples=None):
"""
:param max_age: Maximum age of values in seconds
:param max_samples: Maximum amount of samples which will be kept
"""
if max_age is None and max_samples is None:
raise ValueError('Please specify max age or max samples!')
self._max_age = max_age
self.timestamps = collections.deque(maxlen=max_samples)
self.values = collections.deque(maxlen=max_samples)
self.sum: float = None
self.min: float = None
self.max: float = None
self.mean: float = None
self.median: float = None
self.last_value: float = None
self.last_change: float = None
def _remove_old(self):
if self._max_age is None:
return None
# remove too old entries
now = time.time()
while self.timestamps and (now - self.timestamps[0]) > self._max_age:
self.timestamps.popleft()
self.values.popleft()
def update(self):
"""update values without adding a new value"""
self._remove_old()
__len = len(self.values)
if not __len:
self.sum = None
self.min = None
self.max = None
self.mean = None
self.median = None
else:
self.sum = sum(self.values)
self.min = min(self.values)
self.max = max(self.values)
self.mean = statistics.mean(self.values)
self.median = statistics.median(self.values)
if __len >= 2:
self.last_change = self.values[-1] - self.values[-2]
else:
self.last_change = None
def add_value(self, value):
"""Add a new value and recalculate statistical values
:param value: new value
"""
assert isinstance(value, (int, float)), type(value)
self.last_value = value
self.timestamps.append(time.time())
self.values.append(value)
self.update()
def __repr__(self):
return f'<Statistics sum: {self.sum:.1f}, min: {self.min:.2f}, max: {self.max:.2f}, ' \
f'mean: {self.mean:.2f}, median: {self.median:.2f}>'
| python |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class MetaFeaturesExtractor(BaseEstimator, TransformerMixin):
def __init__(self, user_meta=None, item_meta=None):
self.user_meta = user_meta
self.item_meta = item_meta
self.user_meta.registration_init_time = pd.to_datetime(self.user_meta.registration_init_time, format='%Y%m%d')
self.user_meta.expiration_date = pd.to_datetime(self.user_meta.expiration_date, format='%Y%m%d')
self.X_with_meta = None
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X):
self.X_with_meta = X.copy()
self.X_with_meta = pd.merge(self.X_with_meta, self.user_meta, on='msno', how='left')
self.X_with_meta = pd.merge(self.X_with_meta, self.item_meta, on='song_id', how='left')
self.X_with_meta[
'days_registered'
] = self.X_with_meta.expiration_date - self.X_with_meta.registration_init_time
self.X_with_meta['days_registered'] = self.X_with_meta.days_registered.apply(lambda x: x.days)
return self.X_with_meta
| python |
# coding=utf-8
from django.test import TestCase
from django.db import IntegrityError
from applications.trackers.models import Tracker
class TrackerModelTest(TestCase):
def test_create_tracker(self):
Tracker.objects.create(ip='192.168.0.1')
tracker = Tracker.objects.all()
self.assertTrue(tracker)
def test_multiple_create(self):
Tracker.objects.bulk_create([
Tracker(ip='192.168.0.1'),
Tracker(ip='192.168.0.2'),
])
tracker = Tracker.objects.all()
self.assertEquals(tracker.count(), 2)
def test_ordering(self):
Tracker.objects.bulk_create([
Tracker(ip='192.168.0.1'),
Tracker(ip='192.168.1.2'),
Tracker(ip='192.168.0.2'),
])
self.assertEquals(Tracker.objects.first().ip, '192.168.0.2')
def test_error_without_ip(self):
with self.assertRaises(IntegrityError):
Tracker.objects.create()
def test_str(self):
Tracker.objects.create(ip='192.168.0.1')
tracker = Tracker.objects.first()
self.assertEquals(
tracker.__str__(),
'IP адрес {ip}, зафиксирован {date} в {time}'.format(
ip=tracker.ip, date=tracker.time.strftime("%d.%m.%Y"),
time=tracker.time.strftime("%H:%M:%S")
)
)
| python |
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .models import Message, Person, Tag
class MessageView(DetailView):
"""
Detail view of a Person object
"""
model = Message
class MessagesView(ListView):
"""
A view to list all Person objects
"""
model = Message
class PersonView(DetailView):
"""
Detail view of a Person object
"""
model = Person
class PersonsView(ListView):
"""
A view to list all Person objects
"""
model = Person
class TagView(DetailView):
"""
Detail view of a Tag object
"""
model = Tag
class TagsView(ListView):
"""
A view to list all Tag objects
"""
model = Tag
| python |
# Nick Hansel
# Web scraper to create a shopping list given recipes
from random_recipe import *
days = {
"Monday": None,
"Tuesday": None,
"Wednesday": None,
"Thursday": None,
"Friday": None,
"Saturday": None,
"Sunday": None
}
while True:
answer = input("Would you like to choose a random meal or would you like to schedule your meal plan? ("
"schedule/random): ")
answer = answer.lower()
if answer == "random":
randomRecipe()
print("\n" + "Recipe chosen: " + '\n' + Globals.chosen_recipe[0] + "\n")
print("Ingredients needed:")
for x in Globals.final_dict.get(Globals.chosen_recipe[0]):
print(x)
elif answer == 'schedule':
how_many_days = input("How many days would you like to schedule (up to 7 days, starting on Monday): " + '\n')
how_many_days = int(how_many_days)
shopping = input('Would you like a shopping list as well? (y/n): ' + '\n')
if how_many_days <= 7:
randomRecipe()
new = (list(days.items()))
new = ([list(x) for x in new])
for x in range(how_many_days):
used = (choice(Globals.names))
new[x][1] = used
Globals.names.remove(used)
del new[how_many_days:]
new = ([tuple(x) for x in new])
new = dict(new)
file1 = open("lunch.txt", 'w')
for k, v in new.items():
print(k + ':' + ' ', v + "\n")
if shopping == 'y':
file1 = open('Shopping List.txt', 'w')
for x in new.values():
for j in Globals.final_dict.get(x):
file1.write(j + '\n')
file1.close()
break
| python |
import logging
LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(
filename = "logging_demo.log",
level = logging.DEBUG,
format = LOG_FORMAT,
filemode = "w")
logger = logging.getLogger()
logger.debug("Debug level message")
logger.info("Info level message")
logger.warning("Warning level message")
logger.error("Error level message")
logger.critical("Critical level message")
print(logger.level)
| python |
this is not valid python source code, but still more beautiful than many non-pythonic languages.
| python |
import discord
from discord.ext import commands
import os
import json
client = commands.Bot(command_prefix = ".")
# @client.command()
# async def load(ctx , extensions):
# client.load_extensions(f"cogs.{extensions}")
# @client.command()
# async def unload(ctx , extensions):
# client.unload_extension(f"cogs.{extensions}")
for filename in os.listdir("./cogs"):
if filename.endswith(".py"):
client.load_extension(f"cogs.{filename[:-3]}")
@client.event
async def on_command_error(ctx , error):
if isinstance(error , commands.CommandNotFound):
await ctx.send("Invalid Command")
f = open(r".\tokens\token.json", )
s = json.load(f)
client.run(s["Token"])
| python |
fibonacci = [0, 1]
n = int(input())
if n == 1:
print(str(fibonacci[0]))
if n < 46 and n > 1:
if n > 2:
for x in range(n - 2):
fibonacci.append(fibonacci[x] + fibonacci[x + 1])
myTable = str(fibonacci).maketrans("", "", "[,]")
print(str(fibonacci).translate(myTable))
| python |
"""
Test CCompiler.
"""
from pathlib import Path
from types import SimpleNamespace
from unittest import mock
from fab.build_config import AddFlags
from fab.dep_tree import AnalysedFile
from fab.steps.compile_c import CompileC
class Test_Compiler(object):
def test_vanilla(self):
# ensure the command is formed correctly
config = SimpleNamespace(
project_workspace=Path('foo'), source_root=Path('foo/src'), multiprocessing=False, reuse_artefacts=False)
c_compiler = CompileC(
compiler='gcc', common_flags=['-c'], path_flags=[
AddFlags(match='foo/src/*', flags=['-I', 'foo/include', '-Dhello'])])
analysed_files = {Path('foo/src/foo.c'): AnalysedFile(fpath=Path('foo/src/foo.c'), file_hash=None)}
with mock.patch('fab.steps.compile_c.run_command') as mock_run:
with mock.patch('fab.steps.compile_c.send_metric'):
c_compiler.run(artefact_store={'build_tree': analysed_files}, config=config)
mock_run.assert_called_with([
'gcc', '-c', '-I', 'foo/include', '-Dhello', 'foo/src/foo.c', '-o', 'foo/src/foo.o'])
| python |
import markov
from typing import Optional
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_item(length: Optional[str] = None, start: Optional[str] = None):
if length is not None:
length = int(length)
text = markov.generate(length=length, start=start)
return text
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.