code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from cs1media import *
import math
def dist(c1, c2):
r1, g1, b1 = c1
r2, g2, b2 = c2
return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)
def chroma(img, key, threshold):
w, h = img.size()
for y in range(h):
for x in range(w):
p = img.get(x, y)
if dist(p, key) < threshold:
img.set(x, y, Color.yellow)
statue = load_picture("photos/statue1.jpg")
chroma(statue, (41, 75, 146), 70)
statue.show()
| [
"math.sqrt"
] | [((100, 159), 'math.sqrt', 'math.sqrt', (['((r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2)'], {}), '((r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2)\n', (109, 159), False, 'import math\n')] |
import numpy as np
import scipy
import scipy.io
import pylab
import numpy
import glob
import pyfits
def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0):
diffrot = 0.
''' This is a simplified version of the class-based routines in
spot_model.py. It generates a light curves for dark, point like
spots with no limb-darkening.
Parameters:
nspot = desired number of spots present on star at any
one time
amp = desired light curve amplitude
tau = characteristic spot life-time
diffrot = fractional difference between equatorial and polar
rotation period
(unit of time is equatorial rotation period)'''
# print('Period = ', p)
dur = (max(t) - min(t))
# (crude estimate of) total number of spots needed during entire
# time-series
nspot_tot = int(nspot * dur / 2 / tau)
# uniform distribution of spot longitudes
lon = scipy.rand(nspot_tot) * 2 * scipy.pi
# distribution of spot latitudes uniform in sin(latitude)
lat = scipy.arcsin(scipy.rand(nspot_tot))
# spot rotation rate optionally depends on latitude
period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) * p
period0 = scipy.ones(nspot_tot) * p
# all spots have the same maximum area
# (crude estimate of) filling factor needed per spot
ff = amp / scipy.sqrt(nspot)
scale_fac = 1
amax = scipy.ones(nspot_tot) * ff * scale_fac
# all spots have the evolution timescale
decay = scipy.ones(nspot_tot) * tau
# uniform distribution of spot peak times
# start well before and end well after time-series limits (to
# avoid edge effects)
extra = 3 * decay.max()
pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra
# COMPUTE THE LIGHT CURVE
# print("Computing light curve...")
time = numpy.array(t - min(t))
area_tot = scipy.zeros_like(time)
dF_tot = scipy.zeros_like(time)
dF_tot0 = scipy.zeros_like(time)
# add up the contributions of individual spots
for i in range(nspot_tot):
# Spot area
if (pk[i] == 0) + (decay[i] == 0):
area = scipy.ones_like(time) * amax[i]
else:
area = amax[i] * \
scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
area_tot += area
# Fore-shortening
phase = 2 * scipy.pi * time / period[i] + lon[i]
phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
mu[mu < 0] = 0.0
mu0[mu0 < 0] = 0.0
# Flux
dF_tot -= area * mu
dF_tot0 -= area * mu0
amp_eff = dF_tot.max()-dF_tot.min()
nspot_eff = area_tot / scale_fac / ff
res0 = scipy.array([nspot_eff.mean(), ff, amp_eff])
res1 = scipy.zeros((4, len(time)))
res1[0,:] = time
res1[1,:] = area_tot
res1[2,:] = dF_tot
res1[3,:] = dF_tot0
# print('Used %d spots in total over %d rotation periods.' % (nspot_tot, dur))
# print('Mean filling factor of individual spots was %.4f.' % ff)
# print('Desired amplitude was %.4f, actual amplitude was %.4f.' \
# % (amp, amp_eff))
# print('Desired number of spots at any one time was %d.' % nspot)
return res0, res1
| [
"scipy.ones",
"scipy.zeros_like",
"scipy.sqrt",
"scipy.exp",
"scipy.ones_like",
"scipy.sin",
"scipy.cos",
"scipy.rand"
] | [((1870, 1892), 'scipy.zeros_like', 'scipy.zeros_like', (['time'], {}), '(time)\n', (1886, 1892), False, 'import scipy\n'), ((1906, 1928), 'scipy.zeros_like', 'scipy.zeros_like', (['time'], {}), '(time)\n', (1922, 1928), False, 'import scipy\n'), ((1943, 1965), 'scipy.zeros_like', 'scipy.zeros_like', (['time'], {}), '(time)\n', (1959, 1965), False, 'import scipy\n'), ((1055, 1076), 'scipy.rand', 'scipy.rand', (['nspot_tot'], {}), '(nspot_tot)\n', (1065, 1076), False, 'import scipy\n'), ((1208, 1229), 'scipy.ones', 'scipy.ones', (['nspot_tot'], {}), '(nspot_tot)\n', (1218, 1229), False, 'import scipy\n'), ((1350, 1367), 'scipy.sqrt', 'scipy.sqrt', (['nspot'], {}), '(nspot)\n', (1360, 1367), False, 'import scipy\n'), ((1494, 1515), 'scipy.ones', 'scipy.ones', (['nspot_tot'], {}), '(nspot_tot)\n', (1504, 1515), False, 'import scipy\n'), ((933, 954), 'scipy.rand', 'scipy.rand', (['nspot_tot'], {}), '(nspot_tot)\n', (943, 954), False, 'import scipy\n'), ((1397, 1418), 'scipy.ones', 'scipy.ones', (['nspot_tot'], {}), '(nspot_tot)\n', (1407, 1418), False, 'import scipy\n'), ((1698, 1719), 'scipy.rand', 'scipy.rand', (['nspot_tot'], {}), '(nspot_tot)\n', (1708, 1719), False, 'import scipy\n'), ((2132, 2153), 'scipy.ones_like', 'scipy.ones_like', (['time'], {}), '(time)\n', (2147, 2153), False, 'import scipy\n'), ((2225, 2278), 'scipy.exp', 'scipy.exp', (['(-(time - pk[i]) ** 2 / 2.0 / decay[i] ** 2)'], {}), '(-(time - pk[i]) ** 2 / 2.0 / decay[i] ** 2)\n', (2234, 2278), False, 'import scipy\n'), ((2455, 2470), 'scipy.cos', 'scipy.cos', (['incl'], {}), '(incl)\n', (2464, 2470), False, 'import scipy\n'), ((2473, 2490), 'scipy.sin', 'scipy.sin', (['lat[i]'], {}), '(lat[i])\n', (2482, 2490), False, 'import scipy\n'), ((2545, 2561), 'scipy.cos', 'scipy.cos', (['phase'], {}), '(phase)\n', (2554, 2561), False, 'import scipy\n'), ((2576, 2591), 'scipy.cos', 'scipy.cos', (['incl'], {}), '(incl)\n', (2585, 2591), False, 'import scipy\n'), ((2594, 2611), 'scipy.sin', 'scipy.sin', (['lat[i]'], {}), '(lat[i])\n', (2603, 2611), False, 'import scipy\n'), ((2666, 2683), 'scipy.cos', 'scipy.cos', (['phase0'], {}), '(phase0)\n', (2675, 2683), False, 'import scipy\n'), ((1150, 1164), 'scipy.sin', 'scipy.sin', (['lat'], {}), '(lat)\n', (1159, 1164), False, 'import scipy\n'), ((2507, 2522), 'scipy.sin', 'scipy.sin', (['incl'], {}), '(incl)\n', (2516, 2522), False, 'import scipy\n'), ((2525, 2542), 'scipy.cos', 'scipy.cos', (['lat[i]'], {}), '(lat[i])\n', (2534, 2542), False, 'import scipy\n'), ((2628, 2643), 'scipy.sin', 'scipy.sin', (['incl'], {}), '(incl)\n', (2637, 2643), False, 'import scipy\n'), ((2646, 2663), 'scipy.cos', 'scipy.cos', (['lat[i]'], {}), '(lat[i])\n', (2655, 2663), False, 'import scipy\n')] |
from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
class IncomingQueue(object):
"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""
def __init__(self, task):
self.task = task
def __call__(self, export_task):
redis_client = self.task.redis_client
data_queue = self.task.app.data_queues["update_incoming"]
data = data_queue.dequeue()
grouped = defaultdict(list)
for item in data:
grouped[(item["api_key"], item.get("source", "gnss"))].append(
{"api_key": item["api_key"], "report": item["report"]}
)
with self.task.db_session(commit=False) as session:
export_configs = ExportConfig.all(session)
with self.task.redis_pipeline() as pipe:
for (api_key, source), items in grouped.items():
for config in export_configs:
if config.allowed(api_key, source):
queue_key = config.queue_key(api_key, source)
queue = config.queue(queue_key, redis_client)
queue.enqueue(items, pipe=pipe)
for config in export_configs:
# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
for queue_key in config.partitions(redis_client):
queue = config.queue(queue_key, redis_client)
if queue.ready():
export_task.delay(config.name, queue_key)
if data_queue.ready():
self.task.apply_countdown()
class ReportExporter(object):
_retriable = (IOError,)
_retries = 3
_retry_wait = 1.0
def __init__(self, task, config, queue_key):
self.task = task
self.config = config
self.queue_key = queue_key
self.queue = config.queue(queue_key, task.redis_client)
self.stats_tags = ["key:" + self.config.name]
@staticmethod
def export(task, name, queue_key):
with task.db_session(commit=False) as session:
config = ExportConfig.get(session, name)
exporter_types = {
"dummy": DummyExporter,
"geosubmit": GeosubmitExporter,
"internal": InternalExporter,
"s3": S3Exporter,
}
exporter_type = exporter_types.get(config.schema)
if exporter_type is not None:
exporter_type(task, config, queue_key)()
def __call__(self):
queue_items = self.queue.dequeue()
if not queue_items:
return
success = False
for i in range(self._retries):
try:
with METRICS.timer("data.export.upload.timing", tags=self.stats_tags):
self.send(queue_items)
success = True
except self._retriable:
success = False
time.sleep(self._retry_wait * (i ** 2 + 1))
if success:
METRICS.incr("data.export.batch", tags=self.stats_tags)
break
if success and self.queue.ready():
self.task.apply_countdown(args=[self.config.name, self.queue_key])
def send(self, queue_items):
raise NotImplementedError()
class DummyExporter(ReportExporter):
def send(self, queue_items):
pass
class GeosubmitExporter(ReportExporter):
_retriable = (IOError, requests.exceptions.RequestException)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
headers = {
"Content-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": "ichnaea",
}
response = requests.post(
self.config.url,
data=util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=5
),
headers=headers,
timeout=60.0,
)
# log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr(
"data.export.upload",
tags=self.stats_tags + ["status:%s" % response.status_code],
)
response.raise_for_status()
class S3Exporter(ReportExporter):
_retriable = (
IOError,
boto3.exceptions.Boto3Error,
botocore.exceptions.BotoCoreError,
)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
_, bucketname, path = urlparse(self.config.url)[:3]
# s3 key names start without a leading slash
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
year, month, day = util.utcnow().timetuple()[:3]
# strip away queue prefix again
parts = self.queue_key.split(":")
source = parts[1]
api_key = parts[2]
obj_name = path.format(
source=source, api_key=api_key, year=year, month=month, day=day
)
obj_name += uuid.uuid1().hex + ".json.gz"
try:
data = util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=7
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketname)
obj = bucket.Object(obj_name)
obj.put(Body=data, ContentEncoding="gzip", ContentType="application/json")
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:success"]
)
except Exception:
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:failure"]
)
raise
class InternalTransform(object):
"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id = ("position", None)
position_map = [
("latitude", "lat"),
("longitude", "lon"),
"accuracy",
"altitude",
("altitudeAccuracy", "altitude_accuracy"),
"heading",
"pressure",
"speed",
"source",
]
blue_id = ("bluetoothBeacons", "blue")
blue_map = [("macAddress", "mac"), "age", ("signalStrength", "signal")]
cell_id = ("cellTowers", "cell")
cell_map = [
("radioType", "radio"),
("mobileCountryCode", "mcc"),
("mobileNetworkCode", "mnc"),
("locationAreaCode", "lac"),
("cellId", "cid"),
"age",
"asu",
("primaryScramblingCode", "psc"),
"serving",
("signalStrength", "signal"),
("timingAdvance", "ta"),
]
wifi_id = ("wifiAccessPoints", "wifi")
wifi_map = [
("macAddress", "mac"),
"age",
"channel",
"frequency",
("radioType", "radio"),
("signalToNoiseRatio", "snr"),
("signalStrength", "signal"),
]
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
source_value = item_source.get(source)
if source_value is not None:
value[target] = source_value
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else:
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
blues = self._parse_list(item, report, self.blue_id, self.blue_map)
cells = self._parse_list(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
position = item.get("position") or {}
gps_age = position.get("age", 0)
timestamp = item.get("timestamp")
if timestamp:
# turn timestamp into GPS timestamp
report["timestamp"] = timestamp - gps_age
if gps_age:
# Normalize age fields to be relative to GPS time
for type_ in ("blue", "cell", "wifi"):
for record in report.get(type_, ()):
record["age"] = record.get("age", 0) - gps_age
if blues or cells or wifis:
return report
return {}
class InternalExporter(ReportExporter):
_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)
transform = InternalTransform()
def send(self, queue_items):
api_keys = set()
api_keys_known = set()
metrics = {}
items = []
for item in queue_items:
# preprocess items and extract set of API keys
item["report"] = self.transform(item["report"])
if item["report"]:
items.append(item)
api_keys.add(item["api_key"])
for api_key in api_keys:
metrics[api_key] = {}
for type_ in ("report", "blue", "cell", "wifi"):
for action in ("drop", "upload"):
metrics[api_key]["%s_%s" % (type_, action)] = 0
with self.task.db_session(commit=False) as session:
# limit database session to get API keys
keys = [key for key in api_keys if key]
if keys:
columns = ApiKey.__table__.c
rows = session.execute(
select([columns.valid_key]).where(columns.valid_key.in_(keys))
).fetchall()
for row in rows:
api_keys_known.add(row.valid_key)
positions = []
observations = {"blue": [], "cell": [], "wifi": []}
for item in items:
api_key = item["api_key"]
report = item["report"]
obs, malformed_obs = self.process_report(report)
any_data = False
for name in ("blue", "cell", "wifi"):
if obs.get(name):
observations[name].extend(obs[name])
metrics[api_key][name + "_upload"] += len(obs[name])
any_data = True
metrics[api_key][name + "_drop"] += malformed_obs.get(name, 0)
metrics[api_key]["report_upload"] += 1
if any_data:
positions.append((report["lat"], report["lon"]))
else:
metrics[api_key]["report_drop"] += 1
with self.task.redis_pipeline() as pipe:
self.queue_observations(pipe, observations)
if _map_content_enabled and positions:
self.process_datamap(pipe, positions)
self.emit_metrics(api_keys_known, metrics)
def queue_observations(self, pipe, observations):
for datatype, shard_model, shard_key, queue_prefix in (
("blue", BlueShard, "mac", "update_blue_"),
("cell", CellShard, "cellid", "update_cell_"),
("wifi", WifiShard, "mac", "update_wifi_"),
):
queued_obs = defaultdict(list)
for obs in observations[datatype]:
# group by sharded queue
shard_id = shard_model.shard_id(getattr(obs, shard_key))
queue_id = queue_prefix + shard_id
queued_obs[queue_id].append(obs.to_json())
for queue_id, values in queued_obs.items():
# enqueue values for each queue
queue = self.task.app.data_queues[queue_id]
queue.enqueue(values, pipe=pipe)
def emit_metrics(self, api_keys_known, metrics):
for api_key, key_metrics in metrics.items():
api_tag = []
if api_key and api_key in api_keys_known:
api_tag = ["key:%s" % api_key]
for name, count in key_metrics.items():
if not count:
continue
type_, action = name.split("_")
if type_ == "report":
suffix = "report"
tags = api_tag
else:
suffix = "observation"
tags = ["type:%s" % type_] + api_tag
METRICS.incr("data.%s.%s" % (suffix, action), count, tags=tags)
def process_report(self, data):
report = Report.create(**data)
if report is None:
return ({}, {})
malformed = {}
observations = {}
for name, report_cls, obs_cls in (
("blue", BlueReport, BlueObservation),
("cell", CellReport, CellObservation),
("wifi", WifiReport, WifiObservation),
):
malformed[name] = 0
observations[name] = {}
if data.get(name):
for item in data[name]:
# validate the blue/cell/wifi specific fields
item_report = report_cls.create(**item)
if item_report is None:
malformed[name] += 1
continue
# combine general and specific report data into one
item_obs = obs_cls.combine(report, item_report)
item_key = item_obs.unique_key
# if we have better data for the same key, ignore
existing = observations[name].get(item_key)
if existing is not None and existing.better(item_obs):
continue
observations[name][item_key] = item_obs
obs = {
"blue": observations["blue"].values(),
"cell": observations["cell"].values(),
"wifi": observations["wifi"].values(),
}
return (obs, malformed)
def process_datamap(self, pipe, positions):
grids = set()
for lat, lon in positions:
if lat is not None and lon is not None:
grids.add(DataMap.scale(lat, lon))
shards = defaultdict(set)
for lat, lon in grids:
shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))
for shard_id, values in shards.items():
queue = self.task.app.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values), pipe=pipe)
| [
"ichnaea.models.ExportConfig.get",
"ichnaea.models.DataMap.scale",
"ichnaea.models.ExportConfig.all",
"urllib.parse.urlparse",
"ichnaea.models.content.encode_datamap_grid",
"re.compile",
"ichnaea.models.DataMap.shard_id",
"ichnaea.models.Report.create",
"json.dumps",
"time.sleep",
"ichnaea.util.utcnow",
"uuid.uuid1",
"boto3.resource",
"collections.defaultdict",
"sqlalchemy.select",
"markus.get_metrics"
] | [((698, 733), 're.compile', 're.compile', (['"""\\\\s"""'], {'flags': 're.UNICODE'}), "('\\\\s', flags=re.UNICODE)\n", (708, 733), False, 'import re\n'), ((745, 765), 'markus.get_metrics', 'markus.get_metrics', ([], {}), '()\n', (763, 765), False, 'import markus\n'), ((1442, 1459), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1453, 1459), False, 'from collections import defaultdict\n'), ((14322, 14343), 'ichnaea.models.Report.create', 'Report.create', ([], {}), '(**data)\n', (14335, 14343), False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((15980, 15996), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (15991, 15996), False, 'from collections import defaultdict\n'), ((1736, 1761), 'ichnaea.models.ExportConfig.all', 'ExportConfig.all', (['session'], {}), '(session)\n', (1752, 1761), False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((3117, 3148), 'ichnaea.models.ExportConfig.get', 'ExportConfig.get', (['session', 'name'], {}), '(session, name)\n', (3133, 3148), False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((5606, 5631), 'urllib.parse.urlparse', 'urlparse', (['self.config.url'], {}), '(self.config.url)\n', (5614, 5631), False, 'from urllib.parse import urlparse\n'), ((6299, 6319), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (6313, 6319), False, 'import boto3\n'), ((13057, 13074), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13068, 13074), False, 'from collections import defaultdict\n'), ((6113, 6125), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (6123, 6125), False, 'import uuid\n'), ((16079, 16108), 'ichnaea.models.content.encode_datamap_grid', 'encode_datamap_grid', (['lat', 'lon'], {}), '(lat, lon)\n', (16098, 16108), False, 'from ichnaea.models.content import encode_datamap_grid\n'), ((3930, 3973), 'time.sleep', 'time.sleep', (['(self._retry_wait * (i ** 2 + 1))'], {}), '(self._retry_wait * (i ** 2 + 1))\n', (3940, 3973), False, 'import time\n'), ((5808, 5821), 'ichnaea.util.utcnow', 'util.utcnow', ([], {}), '()\n', (5819, 5821), False, 'from ichnaea import util\n'), ((15937, 15960), 'ichnaea.models.DataMap.scale', 'DataMap.scale', (['lat', 'lon'], {}), '(lat, lon)\n', (15950, 15960), False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((6210, 6240), 'json.dumps', 'json.dumps', (["{'items': reports}"], {}), "({'items': reports})\n", (6220, 6240), False, 'import json\n'), ((16047, 16073), 'ichnaea.models.DataMap.shard_id', 'DataMap.shard_id', (['lat', 'lon'], {}), '(lat, lon)\n', (16063, 16073), False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((4870, 4900), 'json.dumps', 'json.dumps', (["{'items': reports}"], {}), "({'items': reports})\n", (4880, 4900), False, 'import json\n'), ((11467, 11494), 'sqlalchemy.select', 'select', (['[columns.valid_key]'], {}), '([columns.valid_key])\n', (11473, 11494), False, 'from sqlalchemy import select\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the fseventsd record event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import fseventsd
from tests.formatters import test_lib
class FseventsdFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the fseventsd record event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = fseventsd.FSEventsdEventFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = fseventsd.FSEventsdEventFormatter()
expected_attribute_names = [
u'event_identifier', u'flag_values', u'hex_flags', u'path']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"plaso.formatters.fseventsd.FSEventsdEventFormatter"
] | [((959, 974), 'unittest.main', 'unittest.main', ([], {}), '()\n', (972, 974), False, 'import unittest\n'), ((449, 484), 'plaso.formatters.fseventsd.FSEventsdEventFormatter', 'fseventsd.FSEventsdEventFormatter', ([], {}), '()\n', (482, 484), False, 'from plaso.formatters import fseventsd\n'), ((657, 692), 'plaso.formatters.fseventsd.FSEventsdEventFormatter', 'fseventsd.FSEventsdEventFormatter', ([], {}), '()\n', (690, 692), False, 'from plaso.formatters import fseventsd\n')] |
from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard
from keras.models import load_model
import random
import numpy as np
from scipy import misc
import gc
from keras.optimizers import Adam
from imageio import imread
from datetime import datetime
import os
import json
import models
from utils import DataLoader, LrPolicy
from config import Config
import argparse
def get_parser():
parser = argparse.ArgumentParser('train')
parser.add_argument('--configPath', '-c', required=True)
return parser
def train(args=None):
parser = get_parser()
args = parser.parse_args(args)
conf=Config()
conf.load(args.configPath)
time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
trainString="%s_%s_%s_%s" % (conf.model,conf.optimizer,str(conf.lr),time)
os.makedirs(conf.logPath+"/"+trainString)
conf.save(conf.logPath+"/"+trainString+'/config.json')
print('Compiling model...')
model_checkpoint = ModelCheckpoint(conf.logPath+"/"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True)
change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay)
tbCallBack=TensorBoard(log_dir=conf.logPath+"/"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True)
model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel)
model.compile(optimizer = conf.optimizer, loss = conf.loss)
data = [conf.trainDataPath+"/"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f]
random.shuffle(data)
thr=int(len(data)*conf.validationSplit)
trainData=data[thr:]
valData=data[:thr]
trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue)
validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue)
print('Fitting model...')
model.fit_generator(generator=trainDataLoader.generator(),
validation_data=validationDataLoader.generator(),
steps_per_epoch=len(trainData)//conf.batchSize,
validation_steps=len(valData)//conf.batchSize,
epochs=conf.epoches,
verbose=1,
initial_epoch=0,
callbacks = [model_checkpoint, change_lr,tbCallBack]
)
if __name__ == "__main__":
train()
| [
"os.listdir",
"random.shuffle",
"os.makedirs",
"keras.callbacks.ModelCheckpoint",
"argparse.ArgumentParser",
"config.Config",
"keras.callbacks.TensorBoard",
"models.modelCreator",
"datetime.datetime.now",
"utils.LrPolicy",
"utils.DataLoader"
] | [((434, 466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""train"""'], {}), "('train')\n", (457, 466), False, 'import argparse\n'), ((639, 647), 'config.Config', 'Config', ([], {}), '()\n', (645, 647), False, 'from config import Config\n'), ((815, 860), 'os.makedirs', 'os.makedirs', (["(conf.logPath + '/' + trainString)"], {}), "(conf.logPath + '/' + trainString)\n", (826, 860), False, 'import os\n'), ((971, 1142), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(conf.logPath + '/' + trainString +\n '/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5')"], {'monitor': '"""val_loss"""', 'save_best_only': '(False)', 'save_weights_only': '(True)'}), "(conf.logPath + '/' + trainString +\n '/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss',\n save_best_only=False, save_weights_only=True)\n", (986, 1142), False, 'from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler, TensorBoard\n'), ((1211, 1333), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': "(conf.logPath + '/' + trainString + '/logs')", 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir=conf.logPath + '/' + trainString + '/logs',\n histogram_freq=0, write_graph=True, write_images=True)\n", (1222, 1333), False, 'from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler, TensorBoard\n'), ((1335, 1424), 'models.modelCreator', 'models.modelCreator', (['conf.model', 'conf.inputShape', 'conf.classes', 'conf.pretrainedModel'], {}), '(conf.model, conf.inputShape, conf.classes, conf.\n pretrainedModel)\n', (1354, 1424), False, 'import models\n'), ((1578, 1598), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (1592, 1598), False, 'import random\n'), ((1711, 1783), 'utils.DataLoader', 'DataLoader', (['conf.batchSize', 'conf.inputShape', 'trainData', 'conf.guaMaxValue'], {}), '(conf.batchSize, conf.inputShape, trainData, conf.guaMaxValue)\n', (1721, 1783), False, 'from utils import DataLoader, LrPolicy\n'), ((1806, 1876), 'utils.DataLoader', 'DataLoader', (['conf.batchSize', 'conf.inputShape', 'valData', 'conf.guaMaxValue'], {}), '(conf.batchSize, conf.inputShape, valData, conf.guaMaxValue)\n', (1816, 1876), False, 'from utils import DataLoader, LrPolicy\n'), ((688, 702), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (700, 702), False, 'from datetime import datetime\n'), ((1167, 1184), 'utils.LrPolicy', 'LrPolicy', (['conf.lr'], {}), '(conf.lr)\n', (1175, 1184), False, 'from utils import DataLoader, LrPolicy\n'), ((1527, 1557), 'os.listdir', 'os.listdir', (['conf.trainDataPath'], {}), '(conf.trainDataPath)\n', (1537, 1557), False, 'import os\n')] |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlatten(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.g_shape = (numpy.prod((1,) + self.shape),)
self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flatten(x)
self.assertEqual(y.shape, self.g_shape)
self.assertEqual(y.dtype, self.dtype)
testing.assert_allclose(self.x.flatten(), y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, g_data):
gradient_check.check_backward(
functions.Flatten(), x_data, g_data, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
| [
"numpy.prod",
"chainer.Variable",
"chainer.testing.run_module",
"chainer.testing.product",
"chainer.functions.Flatten",
"chainer.functions.flatten",
"numpy.random.uniform",
"chainer.cuda.to_gpu"
] | [((1406, 1444), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (1424, 1444), False, 'from chainer import testing\n'), ((678, 702), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (694, 702), False, 'import chainer\n'), ((715, 735), 'chainer.functions.flatten', 'functions.flatten', (['x'], {}), '(x)\n', (732, 735), False, 'from chainer import functions\n'), ((222, 323), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64]}"], {}), "({'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.\n float32, numpy.float64]})\n", (237, 323), False, 'from chainer import testing\n'), ((518, 547), 'numpy.prod', 'numpy.prod', (['((1,) + self.shape)'], {}), '((1,) + self.shape)\n', (528, 547), False, 'import numpy\n'), ((1031, 1050), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1042, 1050), False, 'from chainer import cuda\n'), ((1150, 1169), 'chainer.functions.Flatten', 'functions.Flatten', ([], {}), '()\n', (1167, 1169), False, 'from chainer import functions\n'), ((1362, 1381), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1373, 1381), False, 'from chainer import cuda\n'), ((1383, 1402), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.g'], {}), '(self.g)\n', (1394, 1402), False, 'from chainer import cuda\n'), ((435, 474), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (455, 474), False, 'import numpy\n'), ((567, 608), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.g_shape'], {}), '(-1, 1, self.g_shape)\n', (587, 608), False, 'import numpy\n')] |
# Generated by Django 3.0.3 on 2020-03-24 09:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('exercises', '0018_photo_file'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=80)),
('description', models.TextField(blank=True)),
('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('image_url', models.URLField()),
('image_caption', models.CharField(blank=True, max_length=80)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')),
],
options={
'abstract': False,
},
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((379, 472), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (395, 472), False, 'from django.db import migrations, models\n'), ((499, 538), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (519, 538), False, 'from django.db import migrations, models\n'), ((569, 604), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (589, 604), False, 'from django.db import migrations, models\n'), ((632, 663), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(80)'}), '(max_length=80)\n', (648, 663), False, 'from django.db import migrations, models\n'), ((698, 726), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (714, 726), False, 'from django.db import migrations, models\n'), ((759, 889), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""categories"""', 'to': '"""exercises.Exercise"""'}), "(blank=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='categories', to='exercises.Exercise')\n", (776, 889), False, 'from django.db import migrations, models\n'), ((1088, 1181), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1104, 1181), False, 'from django.db import migrations, models\n'), ((1208, 1247), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1228, 1247), False, 'from django.db import migrations, models\n'), ((1278, 1313), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1298, 1313), False, 'from django.db import migrations, models\n'), ((1346, 1363), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (1361, 1363), False, 'from django.db import migrations, models\n'), ((1400, 1443), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(80)'}), '(blank=True, max_length=80)\n', (1416, 1443), False, 'from django.db import migrations, models\n'), ((1475, 1591), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""photos"""', 'to': '"""categories.Category"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='photos', to='categories.Category')\n", (1492, 1591), False, 'from django.db import migrations, models\n')] |
from dagster import check
from .house import Lakehouse
from .table import create_lakehouse_table_def
class SnowflakeLakehouse(Lakehouse):
def __init__(self):
pass
def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata):
return None
def materialize(self, context, table_type, table_metadata, value):
return None, None
def snowflake_table(
name=None,
input_tables=None,
other_input_defs=None,
tags=None,
required_resource_keys=None,
description=None,
):
tags = check.opt_dict_param(tags, 'tags')
tags['lakehouse_type'] = 'snowflake_table'
tags['kind'] = 'snowflake'
required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys')
required_resource_keys.add('snowflake')
if callable(name):
fn = name
return create_lakehouse_table_def(
name=fn.__name__,
lakehouse_fn=fn,
input_tables=[],
required_resource_keys=required_resource_keys,
)
def _wrap(fn):
return create_lakehouse_table_def(
name=name if name is not None else fn.__name__,
lakehouse_fn=fn,
input_tables=input_tables,
other_input_defs=other_input_defs,
tags=tags,
description=description,
required_resource_keys=required_resource_keys,
)
return _wrap
| [
"dagster.check.opt_dict_param",
"dagster.check.opt_set_param"
] | [((562, 596), 'dagster.check.opt_dict_param', 'check.opt_dict_param', (['tags', '"""tags"""'], {}), "(tags, 'tags')\n", (582, 596), False, 'from dagster import check\n'), ((705, 774), 'dagster.check.opt_set_param', 'check.opt_set_param', (['required_resource_keys', '"""required_resource_keys"""'], {}), "(required_resource_keys, 'required_resource_keys')\n", (724, 774), False, 'from dagster import check\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: <NAME> <<EMAIL>>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
class Normalization(object):
""" Speaker normalization S-Norm. """
embeddings = None
in_emb_dir = None
def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,
out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,
plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):
""" Initialize normalization object.
Args:
norm_list (string_types): path to normalization list
audio_dir (string_types|None): path to audio directory
in_rttm_dir (string_types|None): path to directory with rttm files
in_emb_dir (str|None): path to directory with i-vectors
out_emb_dir (str|None): path to directory for storing embeddings
min_length (int): minimal length for extracting embeddings
features_extractor (Any): object for feature extraction
embedding_extractor (Any): object for extracting embedding
plda (PLDA|None): plda model object
wav_suffix (string_types): suffix of wav files
rttm_suffix (string_types): suffix of rttm files
"""
if audio_dir:
self.audio_dir = os.path.abspath(audio_dir)
self.norm_list = norm_list
if in_rttm_dir:
self.in_rttm_dir = os.path.abspath(in_rttm_dir)
else:
raise ValueError('It is required to have input rttm files for normalization.')
self.features_extractor = features_extractor
self.embedding_extractor = embedding_extractor
self.plda = plda
self.wav_suffix = wav_suffix
self.rttm_suffix = rttm_suffix
if in_emb_dir:
self.in_emb_dir = os.path.abspath(in_emb_dir)
if out_emb_dir:
self.out_emb_dir = os.path.abspath(out_emb_dir)
self.min_length = min_length
self.n_jobs = n_jobs
if self.in_emb_dir is None:
self.embeddings = self.extract_embeddings()
else:
self.embeddings = self.load_embeddings()
self.mean = np.mean(self.embeddings, axis=0)
def __iter__(self):
current = 0
while current < len(self.embeddings):
yield self.embeddings[current]
current += 1
def __getitem__(self, key):
return self.embeddings[key]
def __setitem__(self, key, value):
self.embeddings[key] = value
def __len__(self):
return len(self.embeddings)
def extract_embeddings(self):
""" Extract normalization embeddings using averaging.
Returns:
Tuple[np.array, np.array]: vectors for individual speakers, global mean over all speakers
"""
speakers_dict, fns = {}, []
with open(self.norm_list) as f:
for line in f:
if len(line.split()) > 1: # number of speakers is defined
line = line.split()[0]
else:
line = line.replace(os.linesep, '')
fns.append(line)
speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,
embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,
wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,
rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)
assert len(speakers_dict) == len(fns)
# all are the same
merged_speakers_dict = speakers_dict[0]
if self.out_emb_dir:
for speaker in merged_speakers_dict:
out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')
mkdir_p(os.path.dirname(out_path))
with open(out_path, 'wb') as f:
pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)
for speaker in merged_speakers_dict:
merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)
return np.array(list(merged_speakers_dict.values()))
def load_embeddings(self):
""" Load normalization embeddings from pickle files.
Returns:
np.array: embeddings per speaker
"""
embeddings, speakers = [], set()
with open(self.norm_list) as f:
for file_name in f:
if len(file_name.split()) > 1: # number of speakers is defined
file_name = file_name.split()[0]
else:
file_name = file_name.replace(os.linesep, '')
with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:
for line in fp:
speakers.add(line.split()[7])
logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))
for speaker in speakers:
embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))
if os.path.isfile(embedding_path):
logger.info('Loading normalization pickle file `{}`.'.format(speaker))
with open(embedding_path, 'rb') as f:
# append mean from speaker's embeddings
speaker_embeddings = pickle.load(f)
embeddings.append(np.mean(speaker_embeddings, axis=0))
else:
logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))
return np.array(embeddings)
def s_norm(self, test, enroll):
""" Run speaker normalization (S-Norm) on cached embeddings.
Args:
test (np.array): test embedding
enroll (np.array): enroll embedding
Returns:
float: hypothesis
"""
if self.plda:
a = self.plda.score(test, self.embeddings).T
b = self.plda.score(enroll, self.embeddings).T
c = self.plda.score(enroll, test).T
else:
a = cosine_similarity(test, self.embeddings).T
b = cosine_similarity(enroll, self.embeddings).T
c = cosine_similarity(enroll, test).T
scores = []
for ii in range(test.shape[0]):
test_scores = []
for jj in range(enroll.shape[0]):
test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])
enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])
s = c[ii][jj]
test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) / enroll_std) / 2))
scores.append(test_scores)
return np.array(scores)
| [
"logging.getLogger",
"numpy.mean",
"pickle.dump",
"sklearn.metrics.pairwise.cosine_similarity",
"vbdiar.embeddings.embedding.extract_embeddings",
"numpy.std",
"os.path.join",
"pickle.load",
"os.path.isfile",
"numpy.array",
"os.path.dirname",
"multiprocessing.Pool",
"numpy.concatenate",
"vbdiar.utils.utils.Utils.partition",
"os.path.abspath"
] | [((488, 515), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (505, 515), False, 'import logging\n'), ((1308, 1336), 'multiprocessing.Pool', 'multiprocessing.Pool', (['n_jobs'], {}), '(n_jobs)\n', (1328, 1336), False, 'import multiprocessing\n'), ((3560, 3623), 'vbdiar.embeddings.embedding.extract_embeddings', 'extract_embeddings', (['features_dict[speaker]', 'embedding_extractor'], {}), '(features_dict[speaker], embedding_extractor)\n', (3578, 3623), False, 'from vbdiar.embeddings.embedding import extract_embeddings\n'), ((6040, 6072), 'numpy.mean', 'np.mean', (['self.embeddings'], {'axis': '(0)'}), '(self.embeddings, axis=0)\n', (6047, 6072), True, 'import numpy as np\n'), ((9540, 9560), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (9548, 9560), True, 'import numpy as np\n'), ((10675, 10691), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (10683, 10691), True, 'import numpy as np\n'), ((3837, 3902), 'numpy.concatenate', 'np.concatenate', (['(speakers_dict[speaker], embeddings_long)'], {'axis': '(0)'}), '((speakers_dict[speaker], embeddings_long), axis=0)\n', (3851, 3902), True, 'import numpy as np\n'), ((5170, 5196), 'os.path.abspath', 'os.path.abspath', (['audio_dir'], {}), '(audio_dir)\n', (5185, 5196), False, 'import os\n'), ((5287, 5315), 'os.path.abspath', 'os.path.abspath', (['in_rttm_dir'], {}), '(in_rttm_dir)\n', (5302, 5315), False, 'import os\n'), ((5683, 5710), 'os.path.abspath', 'os.path.abspath', (['in_emb_dir'], {}), '(in_emb_dir)\n', (5698, 5710), False, 'import os\n'), ((5766, 5794), 'os.path.abspath', 'os.path.abspath', (['out_emb_dir'], {}), '(out_emb_dir)\n', (5781, 5794), False, 'import os\n'), ((7994, 8040), 'numpy.mean', 'np.mean', (['merged_speakers_dict[speaker]'], {'axis': '(0)'}), '(merged_speakers_dict[speaker], axis=0)\n', (8001, 8040), True, 'import numpy as np\n'), ((9037, 9067), 'os.path.isfile', 'os.path.isfile', (['embedding_path'], {}), '(embedding_path)\n', (9051, 9067), False, 'import os\n'), ((7665, 7713), 'os.path.join', 'os.path.join', (['self.out_emb_dir', 'f"""{speaker}.pkl"""'], {}), "(self.out_emb_dir, f'{speaker}.pkl')\n", (7677, 7713), False, 'import os\n'), ((10050, 10090), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['test', 'self.embeddings'], {}), '(test, self.embeddings)\n', (10067, 10090), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((10109, 10151), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['enroll', 'self.embeddings'], {}), '(enroll, self.embeddings)\n', (10126, 10151), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((10170, 10201), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['enroll', 'test'], {}), '(enroll, test)\n', (10187, 10201), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((1404, 1432), 'vbdiar.utils.utils.Utils.partition', 'Utils.partition', (['fns', 'n_jobs'], {}), '(fns, n_jobs)\n', (1419, 1432), False, 'from vbdiar.utils.utils import Utils\n'), ((2645, 2681), 'os.path.join', 'os.path.join', (['in_rttm_dir', 'file_name'], {}), '(in_rttm_dir, file_name)\n', (2657, 2681), False, 'import os\n'), ((7738, 7763), 'os.path.dirname', 'os.path.dirname', (['out_path'], {}), '(out_path)\n', (7753, 7763), False, 'import os\n'), ((7833, 7903), 'pickle.dump', 'pickle.dump', (['merged_speakers_dict[speaker]', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)\n', (7844, 7903), False, 'import pickle\n'), ((9311, 9325), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9322, 9325), False, 'import pickle\n'), ((10377, 10393), 'numpy.mean', 'np.mean', (['a.T[ii]'], {}), '(a.T[ii])\n', (10384, 10393), True, 'import numpy as np\n'), ((10395, 10410), 'numpy.std', 'np.std', (['a.T[ii]'], {}), '(a.T[ii])\n', (10401, 10410), True, 'import numpy as np\n'), ((10453, 10469), 'numpy.mean', 'np.mean', (['b.T[jj]'], {}), '(b.T[jj])\n', (10460, 10469), True, 'import numpy as np\n'), ((10471, 10486), 'numpy.std', 'np.std', (['b.T[jj]'], {}), '(b.T[jj])\n', (10477, 10486), True, 'import numpy as np\n'), ((9364, 9399), 'numpy.mean', 'np.mean', (['speaker_embeddings'], {'axis': '(0)'}), '(speaker_embeddings, axis=0)\n', (9371, 9399), True, 'import numpy as np\n'), ((8645, 8686), 'os.path.join', 'os.path.join', (['self.in_rttm_dir', 'file_name'], {}), '(self.in_rttm_dir, file_name)\n', (8657, 8686), False, 'import os\n')] |
import datetime
import re
from .exceptions import ObjectIsNotADate
def format_date(value, format="%d %M %Y"):
regex = re.match(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})", value)
if regex is not None:
date = datetime.date(
int(regex.group("year")),
int(regex.group("month")),
int(regex.group("day")))
else:
raise ObjectIsNotADate
return date.strftime(format)
| [
"re.match"
] | [((125, 194), 're.match', 're.match', (['"""(?P<year>\\\\d{4})-(?P<month>\\\\d{2})-(?P<day>\\\\d{2})"""', 'value'], {}), "('(?P<year>\\\\d{4})-(?P<month>\\\\d{2})-(?P<day>\\\\d{2})', value)\n", (133, 194), False, 'import re\n')] |
import numpy as np
import os
import logging
from sklearn.model_selection import train_test_split
DATASET_ROOT_FOLDER = os.path.abspath('datasets')
class DataLoader:
train = None
validation = None
test = None
mode = None
partial_dataset = None
@staticmethod
def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000):
if train_path is not None:
DataLoader.train = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length)
if validation_path is not None:
DataLoader.validation = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length)
elif train_validation_split_point is not None and train_validation_split_point > 0:
if DataLoader.mode is None or DataLoader.partial_dataset is not None:
train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8)
splited_train = {
'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :],
'labels': DataLoader.train['labels'][0:train_validation_split_point]
}
splited_validation = {
'images': DataLoader.train['images'][train_validation_split_point:, :, :, :],
'labels': DataLoader.train['labels'][train_validation_split_point:]
}
DataLoader.train = splited_train
DataLoader.validation = splited_validation
if test_path is not None:
DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length)
logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape)))
logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape)))
logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape)))
return DataLoader
@staticmethod
def get_training_data():
"""
get training data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.train.images
labels = DataLoader.train.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_validation_data():
"""
get validation data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.validation.images
labels = DataLoader.validation.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_test_data():
"""
get test data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.test.images
labels = DataLoader.test.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def load_image_data_with_label_at_end(path, height, length):
data = np.loadtxt(path)
if DataLoader.mode is None:
data = data[0:1000, :]
elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1:
# randomly pick partial dataset
cut_point = int(data.shape[0] * DataLoader.partial_dataset)
indices = np.random.permutation(data.shape[0])
training_idx= indices[:cut_point]
data = data[training_idx, :]
images = data[:, 0:-1]
labels = data[:, -1]
images = np.reshape(images, [images.shape[0], height, length, 1], order='F')
return {
'images': images,
'labels': labels
}
| [
"numpy.reshape",
"os.path.join",
"os.path.abspath",
"numpy.loadtxt",
"numpy.random.permutation"
] | [((120, 147), 'os.path.abspath', 'os.path.abspath', (['"""datasets"""'], {}), "('datasets')\n", (135, 147), False, 'import os\n'), ((3261, 3277), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (3271, 3277), True, 'import numpy as np\n'), ((3810, 3877), 'numpy.reshape', 'np.reshape', (['images', '[images.shape[0], height, length, 1]'], {'order': '"""F"""'}), "(images, [images.shape[0], height, length, 1], order='F')\n", (3820, 3877), True, 'import numpy as np\n'), ((540, 585), 'os.path.join', 'os.path.join', (['DATASET_ROOT_FOLDER', 'train_path'], {}), '(DATASET_ROOT_FOLDER, train_path)\n', (552, 585), False, 'import os\n'), ((755, 805), 'os.path.join', 'os.path.join', (['DATASET_ROOT_FOLDER', 'validation_path'], {}), '(DATASET_ROOT_FOLDER, validation_path)\n', (767, 805), False, 'import os\n'), ((1765, 1809), 'os.path.join', 'os.path.join', (['DATASET_ROOT_FOLDER', 'test_path'], {}), '(DATASET_ROOT_FOLDER, test_path)\n', (1777, 1809), False, 'import os\n'), ((3609, 3645), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (3630, 3645), True, 'import numpy as np\n')] |
from os.path import join
FAAS_ROOT="/lhome/trulsas/faas-profiler"
WORKLOAD_SPECS=join(FAAS_ROOT, "specs", "workloads")
#FAAS_ROOT="/home/truls/uni/phd/faas-profiler"
WSK_PATH = "wsk"
OPENWHISK_PATH = "/lhome/trulsas/openwhisk"
#: Location of output data
DATA_DIR = join(FAAS_ROOT, "..", "profiler_results")
SYSTEM_CPU_SET = "0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30"
| [
"os.path.join"
] | [((82, 119), 'os.path.join', 'join', (['FAAS_ROOT', '"""specs"""', '"""workloads"""'], {}), "(FAAS_ROOT, 'specs', 'workloads')\n", (86, 119), False, 'from os.path import join\n'), ((267, 308), 'os.path.join', 'join', (['FAAS_ROOT', '""".."""', '"""profiler_results"""'], {}), "(FAAS_ROOT, '..', 'profiler_results')\n", (271, 308), False, 'from os.path import join\n')] |
'''
pymmh3 was written by <NAME> and enhanced by <NAME>, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''
import sys as _sys
if (_sys.version_info > (3, 0)):
def xrange( a, b, c ):
return list(range( a, b, c))
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
def fmix( k ):
k ^= k >> 33
k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
| [
"argparse.ArgumentParser"
] | [((13771, 13846), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""pymurmur3"""', '"""pymurmur [options] "string to hash\\""""'], {}), '(\'pymurmur3\', \'pymurmur [options] "string to hash"\')\n', (13794, 13846), False, 'import argparse\n')] |
import asyncio
import discord
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import Nullify
from Cogs import DisplayName
from Cogs import UserTime
from Cogs import Message
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(ServerStats(bot, settings))
class ServerStats:
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
async def message(self, message):
# Check the message and see if we should allow it - always yes.
# This module doesn't need to cancel messages.
# Don't count your own, Pooter
if not message.author.id == self.bot.user.id:
server = message.guild
messages = int(self.settings.getServerStat(server, "TotalMessages"))
if messages == None:
messages = 0
messages += 1
self.settings.setServerStat(server, "TotalMessages", messages)
return { 'Ignore' : False, 'Delete' : False}
@commands.command(pass_context=True)
async def serverinfo(self, ctx, *, guild_name = None):
"""Lists some info about the current or passed server."""
# Check if we passed another guild
guild = None
if guild_name == None:
guild = ctx.guild
else:
for g in self.bot.guilds:
if g.name.lower() == guild_name.lower():
guild = g
break
if str(g.id) == str(guild_name):
guild = g
break
if guild == None:
# We didn't find it
await ctx.send("I couldn't find that guild...")
return
server_embed = discord.Embed(color=ctx.author.color)
server_embed.title = guild.name
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at)
time_str = "{} {}".format(local_time['time'], local_time['zone'])
server_embed.description = "Created at {}".format(time_str)
online_members = 0
bot_member = 0
bot_online = 0
for member in guild.members:
if member.bot:
bot_member += 1
if not member.status == discord.Status.offline:
bot_online += 1
continue
if not member.status == discord.Status.offline:
online_members += 1
# bot_percent = "{:,g}%".format((bot_member/len(guild.members))*100)
user_string = "{:,}/{:,} online ({:,g}%)".format(
online_members,
len(guild.members) - bot_member,
round((online_members/(len(guild.members) - bot_member) * 100), 2)
)
b_string = "bot" if bot_member == 1 else "bots"
user_string += "\n{:,}/{:,} {} online ({:,g}%)".format(
bot_online,
bot_member,
b_string,
round((bot_online/bot_member)*100, 2)
)
#server_embed.add_field(name="Members", value="{:,}/{:,} online ({:.2f}%)\n{:,} {} ({}%)".format(online_members, len(guild.members), bot_percent), inline=True)
server_embed.add_field(name="Members ({:,} total)".format(len(guild.members)), value=user_string, inline=True)
server_embed.add_field(name="Roles", value=str(len(guild.roles)), inline=True)
chandesc = "{:,} text, {:,} voice".format(len(guild.text_channels), len(guild.voice_channels))
server_embed.add_field(name="Channels", value=chandesc, inline=True)
server_embed.add_field(name="Default Role", value=guild.default_role, inline=True)
server_embed.add_field(name="Owner", value=guild.owner.name + "#" + guild.owner.discriminator, inline=True)
server_embed.add_field(name="AFK Channel", value=guild.afk_channel, inline=True)
server_embed.add_field(name="Verification", value=guild.verification_level, inline=True)
server_embed.add_field(name="Voice Region", value=guild.region, inline=True)
server_embed.add_field(name="Considered Large", value=guild.large, inline=True)
# Find out where in our join position this server is
joinedList = []
popList = []
for g in self.bot.guilds:
joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at })
popList.append({ 'ID' : g.id, 'Population' : len(g.members) })
# sort the guilds by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
popList = sorted(popList, key=lambda x:x['Population'], reverse=True)
check_item = { "ID" : guild.id, "Joined" : guild.me.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
server_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
# Get our population position
check_item = { "ID" : guild.id, "Population" : len(guild.members) }
total = len(popList)
position = popList.index(check_item) + 1
server_embed.add_field(name="Population Rank", value="{:,} of {:,}".format(position, total), inline=True)
emojitext = ""
emojicount = 0
for emoji in guild.emojis:
if emoji.animated:
emojiMention = "<a:"+emoji.name+":"+str(emoji.id)+">"
else:
emojiMention = "<:"+emoji.name+":"+str(emoji.id)+">"
test = emojitext + emojiMention
if len(test) > 1024:
# TOOO BIIIIIIIIG
emojicount += 1
if emojicount == 1:
ename = "Emojis ({:,} total)".format(len(guild.emojis))
else:
ename = "Emojis (Continued)"
server_embed.add_field(name=ename, value=emojitext, inline=True)
emojitext=emojiMention
else:
emojitext = emojitext + emojiMention
if len(emojitext):
if emojicount == 0:
emojiname = "Emojis ({} total)".format(len(guild.emojis))
else:
emojiname = "Emojis (Continued)"
server_embed.add_field(name=emojiname, value=emojitext, inline=True)
if len(guild.icon_url):
server_embed.set_thumbnail(url=guild.icon_url)
else:
# No Icon
server_embed.set_thumbnail(url=ctx.author.default_avatar_url)
server_embed.set_footer(text="Server ID: {}".format(guild.id))
await ctx.channel.send(embed=server_embed)
@commands.command(pass_context=True)
async def sharedservers(self, ctx, *, member = None):
"""Lists how many servers you share with the bot."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
if member.id == self.bot.user.id:
count = len(self.bot.guilds)
if count == 1:
await ctx.send("I'm on *1* server. :blush:")
else:
await ctx.send("I'm on *{}* servers. :blush:".format(count))
return
count = 0
for guild in self.bot.guilds:
for mem in guild.members:
if mem.id == member.id:
count += 1
if ctx.author.id == member.id:
targ = "You share"
else:
targ = "*{}* shares".format(DisplayName.name(member))
if count == 1:
await ctx.send("{} *1* server with me. :blush:".format(targ))
else:
await ctx.send("{} *{}* servers with me. :blush:".format(targ, count))
@commands.command(pass_context=True)
async def listservers(self, ctx, number : int = 10):
"""Lists the servers I'm connected to - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
i = 1
msg = '__**Servers I\'m On:**__\n\n'
for server in self.bot.guilds:
if i > number:
break
msg += '{}. *{}*\n'.format(i, server.name)
i += 1
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def topservers(self, ctx, number : int = 10):
"""Lists the top servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
memberCount = 0
for member in server.members:
memberCount += 1
serverList.append({ 'Name' : server.name, 'Users' : memberCount })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True)
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Top {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Top {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def bottomservers(self, ctx, number : int = 10):
"""Lists the bottom servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
serverList.append({ 'Name' : server.name, 'Users' : len(server.members) })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']))
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Bottom {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Bottom {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def users(self, ctx):
"""Lists the total number of users on all servers I'm connected to."""
message = await Message.EmbedText(title="Counting users...", color=ctx.message.author).send(ctx)
servers = members = membersOnline = bots = botsOnline = 0
counted_users = []
counted_bots = []
for server in self.bot.guilds:
servers += 1
for member in server.members:
if member.bot:
bots += 1
if not member.id in counted_bots:
counted_bots.append(member.id)
if not member.status == discord.Status.offline:
botsOnline += 1
else:
members += 1
if not member.id in counted_users:
counted_users.append(member.id)
if not member.status == discord.Status.offline:
membersOnline += 1
await Message.Embed(
title="Member Stats",
description="Current User Information".format(server.name),
fields=[
{ "name" : "Servers", "value" : "โโ {:,}".format(servers), "inline" : False },
{ "name" : "Users", "value" : "โโ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), "inline" : False},
{ "name" : "Bots", "value" : "โโ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), "inline" : False},
{ "name" : "Total", "value" : "โโ {:,}/{:,} online ({:,g}%)".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), "inline" : False}
],
color=ctx.message.author).edit(ctx, message)
'''userCount = 0
serverCount = 0
counted_users = []
message = await ctx.send("Counting users...")
for server in self.bot.guilds:
serverCount += 1
userCount += len(server.members)
for member in server.members:
if not member.id in counted_users:
counted_users.append(member.id)
await message.edit(content='There are *{:,} users* (*{:,}* unique) on the *{:,} servers* I am currently a part of!'.format(userCount, len(counted_users), serverCount))'''
@commands.command(pass_context=True)
async def joinpos(self, ctx, *, member = None):
"""Tells when a user joined compared to other users."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
joinedList = []
for mem in ctx.message.guild.members:
joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
before = ""
after = ""
msg = "*{}'s* join position is **{:,}**.".format(DisplayName.name(member), position, total)
if position-1 == 1:
# We have previous members
before = "**1** user"
elif position-1 > 1:
before = "**{:,}** users".format(position-1)
if total-position == 1:
# There were users after as well
after = "**1** user"
elif total-position > 1:
after = "**{:,}** users".format(total-position)
# Build the string!
if len(before) and len(after):
# Got both
msg += "\n\n{} joined before, and {} after.".format(before, after)
elif len(before):
# Just got before
msg += "\n\n{} joined before.".format(before)
elif len(after):
# Just after
msg += "\n\n{} joined after.".format(after)
await ctx.send(msg)
@commands.command(pass_context=True)
async def firstjoins(self, ctx, number : int = 10):
"""Lists the first users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentjoins(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def firstservers(self, ctx, number : int = 10):
"""Lists the first servers I've joined - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentservers(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def messages(self, ctx):
"""Lists the number of messages I've seen on this sever so far. (only applies after this module's inception, and if I'm online)"""
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
if messages == None:
messages = 0
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages!*'.format(messages))
@commands.command(pass_context=True)
async def allmessages(self, ctx):
"""Lists the number of messages I've seen on all severs so far. (only applies after this module's inception, and if I'm online)"""
messages = 0
for guild in self.bot.guilds:
temp = 0 if self.settings.getServerStat(guild, "TotalMessages") is None else self.settings.getServerStat(guild, "TotalMessages")
messages += int(temp)
messages -= 1
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message across all servers!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages across all servers!*'.format(messages))
# Set our message count locally -1
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
| [
"Cogs.DisplayName.name",
"Cogs.UserTime.getUserTime",
"Cogs.Nullify.clean",
"Cogs.Message.EmbedText",
"Cogs.DisplayName.memberForName",
"Cogs.DisplayName.memberForID",
"discord.Embed",
"discord.ext.commands.command"
] | [((1159, 1194), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (1175, 1194), False, 'from discord.ext import commands\n'), ((6950, 6985), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (6966, 6985), False, 'from discord.ext import commands\n'), ((8626, 8661), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (8642, 8661), False, 'from discord.ext import commands\n'), ((9570, 9605), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (9586, 9605), False, 'from discord.ext import commands\n'), ((11224, 11259), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (11240, 11259), False, 'from discord.ext import commands\n'), ((12780, 12815), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12796, 12815), False, 'from discord.ext import commands\n'), ((15470, 15505), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (15486, 15505), False, 'from discord.ext import commands\n'), ((17730, 17765), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (17746, 17765), False, 'from discord.ext import commands\n'), ((19451, 19486), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (19467, 19486), False, 'from discord.ext import commands\n'), ((21199, 21234), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (21215, 21234), False, 'from discord.ext import commands\n'), ((23168, 23203), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (23184, 23203), False, 'from discord.ext import commands\n'), ((25150, 25185), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (25166, 25185), False, 'from discord.ext import commands\n'), ((25856, 25891), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (25872, 25891), False, 'from discord.ext import commands\n'), ((1914, 1951), 'discord.Embed', 'discord.Embed', ([], {'color': 'ctx.author.color'}), '(color=ctx.author.color)\n', (1927, 1951), False, 'import discord\n'), ((2060, 2125), 'Cogs.UserTime.getUserTime', 'UserTime.getUserTime', (['ctx.author', 'self.settings', 'guild.created_at'], {}), '(ctx.author, self.settings, guild.created_at)\n', (2080, 2125), False, 'from Cogs import UserTime\n'), ((7464, 7508), 'Cogs.DisplayName.memberForName', 'DisplayName.memberForName', (['member', 'ctx.guild'], {}), '(member, ctx.guild)\n', (7489, 7508), False, 'from Cogs import DisplayName\n'), ((9506, 9524), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (9519, 9524), False, 'from Cogs import Nullify\n'), ((11160, 11178), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (11173, 11178), False, 'from Cogs import Nullify\n'), ((12714, 12732), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (12727, 12732), False, 'from Cogs import Nullify\n'), ((15979, 16023), 'Cogs.DisplayName.memberForName', 'DisplayName.memberForName', (['member', 'ctx.guild'], {}), '(member, ctx.guild)\n', (16004, 16023), False, 'from Cogs import DisplayName\n'), ((16849, 16873), 'Cogs.DisplayName.name', 'DisplayName.name', (['member'], {}), '(member)\n', (16865, 16873), False, 'from Cogs import DisplayName\n'), ((18762, 18827), 'Cogs.UserTime.getUserTime', 'UserTime.getUserTime', (['ctx.author', 'self.settings', "member['Joined']"], {}), "(ctx.author, self.settings, member['Joined'])\n", (18782, 18827), False, 'from Cogs import UserTime\n'), ((19387, 19405), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (19400, 19405), False, 'from Cogs import Nullify\n'), ((20504, 20569), 'Cogs.UserTime.getUserTime', 'UserTime.getUserTime', (['ctx.author', 'self.settings', "member['Joined']"], {}), "(ctx.author, self.settings, member['Joined'])\n", (20524, 20569), False, 'from Cogs import UserTime\n'), ((21127, 21145), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (21140, 21145), False, 'from Cogs import Nullify\n'), ((21871, 21919), 'Cogs.DisplayName.memberForID', 'DisplayName.memberForID', (['self.bot.user.id', 'guild'], {}), '(self.bot.user.id, guild)\n', (21894, 21919), False, 'from Cogs import DisplayName\n'), ((22343, 22408), 'Cogs.UserTime.getUserTime', 'UserTime.getUserTime', (['ctx.author', 'self.settings', "member['Joined']"], {}), "(ctx.author, self.settings, member['Joined'])\n", (22363, 22408), False, 'from Cogs import UserTime\n'), ((23104, 23122), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (23117, 23122), False, 'from Cogs import Nullify\n'), ((23841, 23889), 'Cogs.DisplayName.memberForID', 'DisplayName.memberForID', (['self.bot.user.id', 'guild'], {}), '(self.bot.user.id, guild)\n', (23864, 23889), False, 'from Cogs import DisplayName\n'), ((24327, 24392), 'Cogs.UserTime.getUserTime', 'UserTime.getUserTime', (['ctx.author', 'self.settings', "member['Joined']"], {}), "(ctx.author, self.settings, member['Joined'])\n", (24347, 24392), False, 'from Cogs import UserTime\n'), ((25086, 25104), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (25099, 25104), False, 'from Cogs import Nullify\n'), ((8390, 8414), 'Cogs.DisplayName.name', 'DisplayName.name', (['member'], {}), '(member)\n', (8406, 8414), False, 'from Cogs import DisplayName\n'), ((7679, 7697), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (7692, 7697), False, 'from Cogs import Nullify\n'), ((12964, 13034), 'Cogs.Message.EmbedText', 'Message.EmbedText', ([], {'title': '"""Counting users..."""', 'color': 'ctx.message.author'}), "(title='Counting users...', color=ctx.message.author)\n", (12981, 13034), False, 'from Cogs import Message\n'), ((16194, 16212), 'Cogs.Nullify.clean', 'Nullify.clean', (['msg'], {}), '(msg)\n', (16207, 16212), False, 'from Cogs import Nullify\n'), ((18974, 19030), 'Cogs.DisplayName.memberForID', 'DisplayName.memberForID', (["member['ID']", 'ctx.message.guild'], {}), "(member['ID'], ctx.message.guild)\n", (18997, 19030), False, 'from Cogs import DisplayName\n'), ((20716, 20772), 'Cogs.DisplayName.memberForID', 'DisplayName.memberForID', (["member['ID']", 'ctx.message.guild'], {}), "(member['ID'], ctx.message.guild)\n", (20739, 20772), False, 'from Cogs import DisplayName\n')] |
import random
import torch.utils.data.sampler
class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler):
def __init__(
self,
dataset_labels,
batch_size=1,
steps=None,
n_classes=0,
n_samples=2
):
""" Create a balanced batch sampler for label based datasets
Args
dataset_labels : Labels of every entry from a dataset (in the same sequence)
batch_size : batch_size no explaination needed
step_size : Number of batches to generate (if None, then dataset_size / batch_size will be used)
n_classes : Number of classes
n_samples : Number of samples per class
*** If batch_size > n_classes * n_samples, rest of batch will be randomly filled
"""
self.batch_size = batch_size
self.steps = len(dataset_labels) // batch_size if steps is None else steps
self.n_classes = n_classes
self.n_samples = n_samples
# Create a label_to_entry_ids table
self.label_to_entry_ids = {}
for entry_id, label in enumerate(dataset_labels):
if label in self.label_to_entry_ids:
self.label_to_entry_ids[label].append(entry_id)
else:
self.label_to_entry_ids[label] = [entry_id]
# Subset the labels with more than n_samples entries
self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples]
assert len(self.labels_subset) >= n_classes, 'Too little labels have {} entries, choose a smaller n_classes or n_samples'.format(n_samples)
def _make_batch_ids(self):
batch_ids = []
# Choose classes and entries
labels_choosen = random.sample(self.labels_subset, self.n_classes)
# Randomly sample n_samples entries from choosen labels
for l in labels_choosen:
batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples)
if len(batch_ids) < self.batch_size:
# Randomly sample remainder
labels_choosen = {l: None for l in labels_choosen}
remaining_entry_ids = []
for label, entry_ids in self.label_to_entry_ids.items():
if label not in labels_choosen:
remaining_entry_ids += entry_ids
batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids))
# Randomly shuffle batch ids
batch_ids = random.sample(batch_ids, self.batch_size)
batch_ids = torch.LongTensor(batch_ids)
return batch_ids
def __iter__(self):
self.count = 0
while self.count < self.steps:
self.count += 1
yield self._make_batch_ids()
def __len__(self):
return self.steps
| [
"random.sample"
] | [((1796, 1845), 'random.sample', 'random.sample', (['self.labels_subset', 'self.n_classes'], {}), '(self.labels_subset, self.n_classes)\n', (1809, 1845), False, 'import random\n'), ((2535, 2576), 'random.sample', 'random.sample', (['batch_ids', 'self.batch_size'], {}), '(batch_ids, self.batch_size)\n', (2548, 2576), False, 'import random\n'), ((1969, 2026), 'random.sample', 'random.sample', (['self.label_to_entry_ids[l]', 'self.n_samples'], {}), '(self.label_to_entry_ids[l], self.n_samples)\n', (1982, 2026), False, 'import random\n')] |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
__all__ = ["get_bare_principal"]
def get_bare_principal(normalized_principal_name):
"""
Given a normalized principal name (nimbus/[email protected]) returns just the
primary component (nimbus)
:param normalized_principal_name: a string containing the principal name to process
:return: a string containing the primary component value or None if not valid
"""
bare_principal = None
if normalized_principal_name:
match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
if match:
bare_principal = match.group(1)
return bare_principal | [
"re.match"
] | [((1272, 1337), 're.match', 're.match', (['"""([^/@]+)(?:/[^@])?(?:@.*)?"""', 'normalized_principal_name'], {}), "('([^/@]+)(?:/[^@])?(?:@.*)?', normalized_principal_name)\n", (1280, 1337), False, 'import re\n')] |
import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
@pytest.mark.parametrize(
"unvalidated_config",
[
(ValidatorContext({})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_KEY": "foo"}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_SECRET": "foo"}})),
],
)
def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):
validator = BitbucketTriggerValidator()
with pytest.raises(ConfigValidationException):
validator.validate(unvalidated_config)
def test_validate_bitbucket_trigger(app):
url_hit = [False]
@urlmatch(netloc=r"bitbucket.org")
def handler(url, request):
url_hit[0] = True
return {
"status_code": 200,
"content": "oauth_token=foo&oauth_token_secret=bar",
}
with HTTMock(handler):
validator = BitbucketTriggerValidator()
url_scheme_and_hostname = URLSchemeAndHostname("http", "localhost:5000")
unvalidated_config = ValidatorContext(
{
"BITBUCKET_TRIGGER_CONFIG": {
"CONSUMER_KEY": "foo",
"CONSUMER_SECRET": "bar",
},
},
url_scheme_and_hostname=url_scheme_and_hostname,
)
validator.validate(unvalidated_config)
assert url_hit[0]
| [
"util.config.URLSchemeAndHostname",
"httmock.HTTMock",
"util.config.validator.ValidatorContext",
"pytest.raises",
"util.config.validators.validate_bitbucket_trigger.BitbucketTriggerValidator",
"httmock.urlmatch"
] | [((753, 780), 'util.config.validators.validate_bitbucket_trigger.BitbucketTriggerValidator', 'BitbucketTriggerValidator', ([], {}), '()\n', (778, 780), False, 'from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator\n'), ((952, 984), 'httmock.urlmatch', 'urlmatch', ([], {'netloc': '"""bitbucket.org"""'}), "(netloc='bitbucket.org')\n", (960, 984), False, 'from httmock import urlmatch, HTTMock\n'), ((791, 831), 'pytest.raises', 'pytest.raises', (['ConfigValidationException'], {}), '(ConfigValidationException)\n', (804, 831), False, 'import pytest\n'), ((397, 417), 'util.config.validator.ValidatorContext', 'ValidatorContext', (['{}'], {}), '({})\n', (413, 417), False, 'from util.config.validator import ValidatorContext\n'), ((429, 479), 'util.config.validator.ValidatorContext', 'ValidatorContext', (["{'BITBUCKET_TRIGGER_CONFIG': {}}"], {}), "({'BITBUCKET_TRIGGER_CONFIG': {}})\n", (445, 479), False, 'from util.config.validator import ValidatorContext\n'), ((491, 562), 'util.config.validator.ValidatorContext', 'ValidatorContext', (["{'BITBUCKET_TRIGGER_CONFIG': {'CONSUMER_KEY': 'foo'}}"], {}), "({'BITBUCKET_TRIGGER_CONFIG': {'CONSUMER_KEY': 'foo'}})\n", (507, 562), False, 'from util.config.validator import ValidatorContext\n'), ((574, 648), 'util.config.validator.ValidatorContext', 'ValidatorContext', (["{'BITBUCKET_TRIGGER_CONFIG': {'CONSUMER_SECRET': 'foo'}}"], {}), "({'BITBUCKET_TRIGGER_CONFIG': {'CONSUMER_SECRET': 'foo'}})\n", (590, 648), False, 'from util.config.validator import ValidatorContext\n'), ((1177, 1193), 'httmock.HTTMock', 'HTTMock', (['handler'], {}), '(handler)\n', (1184, 1193), False, 'from httmock import urlmatch, HTTMock\n'), ((1215, 1242), 'util.config.validators.validate_bitbucket_trigger.BitbucketTriggerValidator', 'BitbucketTriggerValidator', ([], {}), '()\n', (1240, 1242), False, 'from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator\n'), ((1278, 1324), 'util.config.URLSchemeAndHostname', 'URLSchemeAndHostname', (['"""http"""', '"""localhost:5000"""'], {}), "('http', 'localhost:5000')\n", (1298, 1324), False, 'from util.config import URLSchemeAndHostname\n'), ((1354, 1509), 'util.config.validator.ValidatorContext', 'ValidatorContext', (["{'BITBUCKET_TRIGGER_CONFIG': {'CONSUMER_KEY': 'foo', 'CONSUMER_SECRET': 'bar'}}"], {'url_scheme_and_hostname': 'url_scheme_and_hostname'}), "({'BITBUCKET_TRIGGER_CONFIG': {'CONSUMER_KEY': 'foo',\n 'CONSUMER_SECRET': 'bar'}}, url_scheme_and_hostname=url_scheme_and_hostname\n )\n", (1370, 1509), False, 'from util.config.validator import ValidatorContext\n')] |
""" BigGAN: The Authorized Unofficial PyTorch release
Code by <NAME> and <NAME>
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by <NAME>, <NAME>, and <NAME> (arXiv 1809.11096).
Let's go.
"""
import datetime
import time
import torch
import dataset
import BigGAN
import train_fns
import utils
from common import *
# IMG_SIZE = 64
# IMG_SIZE_2 = IMG_SIZE * 2
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = IMG_SIZE
config['n_classes'] = 1
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
experiment_name = (config['experiment_name'] if config['experiment_name']
else 'generative_dog_images')
print('Experiment name is %s' % experiment_name)
G = BigGAN.Generator(**config).to(device)
D = BigGAN.Discriminator(**config).to(device)
# if config['parallel']:
G = nn.DataParallel(G)
D = nn.DataParallel(D)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(
config['ema_decay']))
G_ema = BigGAN.Generator(**{**config, 'skip_init': True,
'no_optim': True}).to(device)
G_ema = nn.DataParallel(G_ema)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
G_ema, ema = None, None
GD = BigGAN.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G, D]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] *
config['num_D_steps'] * config['num_D_accumulations'])
loaders = dataset.get_data_loaders(
data_root=config['data_root'],
label_root=config['label_root'],
batch_size=D_batch_size,
num_workers=config['num_workers'],
shuffle=config['shuffle'],
pin_memory=config['pin_memory'],
drop_last=True,
load_in_mem=config['load_in_mem'],
mask_out=config['mask_out']
)
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
num_samples = config['num_fixed_samples']
z_, y_ = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
train = train_fns.create_train_fn(
G, D, GD, z_, y_, ema, state_dict, config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
start_time = time.perf_counter()
loader = loaders[0]
total_iters = config['num_epochs'] * len(loader)
# Train for specified number of epochs, although we mostly track G iterations.
pbar = tqdm(total=total_iters)
for _ in range(state_dict['itr']):
pbar.update()
timer = mmcv.Timer()
timer.start()
start_itr = state_dict['itr']
for epoch in range(state_dict['epoch'], config['num_epochs']):
for i, data in enumerate(loader):
x, y = data['img'], data['label']
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
if config['ema']:
G_ema.train()
x, y = x.to(device), y.to(device)
metrics = train(x, y)
if not (state_dict['itr'] % config['log_interval']):
curr_time = timer.since_start()
curr_time_str = datetime.datetime.fromtimestamp(
curr_time).strftime('%H:%M:%S')
# quang duong / (quang duong da di / thoi gian da di)
eta = (
total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1))
eta_str = datetime.datetime.fromtimestamp(
eta).strftime('%H:%M:%S')
log = "[{}] [{}] [{} / {}] Ep {}, ".format(
curr_time_str, eta_str, state_dict['itr'], total_iters, epoch)
log += ', '.join(['%s : %+4.3f' % (key, metrics[key])
for key in metrics])
pbar.set_description(log)
# print(log)
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['sample_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=False)
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=True)
pbar.update()
# Increment epoch counter at end of epoch
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| [
"datetime.datetime.fromtimestamp",
"utils.prepare_root",
"dataset.get_data_loaders",
"utils.prepare_z_y",
"train_fns.save_and_sample",
"BigGAN.Generator",
"time.perf_counter",
"utils.update_config_roots",
"train_fns.create_train_fn",
"utils.ema",
"BigGAN.G_D",
"BigGAN.Discriminator",
"utils.prepare_parser",
"utils.load_weights",
"utils.seed_rng"
] | [((1176, 1209), 'utils.update_config_roots', 'utils.update_config_roots', (['config'], {}), '(config)\n', (1201, 1209), False, 'import utils\n'), ((1249, 1279), 'utils.seed_rng', 'utils.seed_rng', (["config['seed']"], {}), "(config['seed'])\n", (1263, 1279), False, 'import utils\n'), ((1324, 1350), 'utils.prepare_root', 'utils.prepare_root', (['config'], {}), '(config)\n', (1342, 1350), False, 'import utils\n'), ((2253, 2269), 'BigGAN.G_D', 'BigGAN.G_D', (['G', 'D'], {}), '(G, D)\n', (2263, 2269), False, 'import BigGAN\n'), ((3378, 3685), 'dataset.get_data_loaders', 'dataset.get_data_loaders', ([], {'data_root': "config['data_root']", 'label_root': "config['label_root']", 'batch_size': 'D_batch_size', 'num_workers': "config['num_workers']", 'shuffle': "config['shuffle']", 'pin_memory': "config['pin_memory']", 'drop_last': '(True)', 'load_in_mem': "config['load_in_mem']", 'mask_out': "config['mask_out']"}), "(data_root=config['data_root'], label_root=config[\n 'label_root'], batch_size=D_batch_size, num_workers=config[\n 'num_workers'], shuffle=config['shuffle'], pin_memory=config[\n 'pin_memory'], drop_last=True, load_in_mem=config['load_in_mem'],\n mask_out=config['mask_out'])\n", (3402, 3685), False, 'import dataset\n'), ((3971, 4081), 'utils.prepare_z_y', 'utils.prepare_z_y', (['num_samples', 'G.module.dim_z', "config['n_classes']"], {'device': 'device', 'fp16': "config['G_fp16']"}), "(num_samples, G.module.dim_z, config['n_classes'], device=\n device, fp16=config['G_fp16'])\n", (3988, 4081), False, 'import utils\n'), ((4191, 4301), 'utils.prepare_z_y', 'utils.prepare_z_y', (['num_samples', 'G.module.dim_z', "config['n_classes']"], {'device': 'device', 'fp16': "config['G_fp16']"}), "(num_samples, G.module.dim_z, config['n_classes'], device=\n device, fp16=config['G_fp16'])\n", (4208, 4301), False, 'import utils\n'), ((4419, 4487), 'train_fns.create_train_fn', 'train_fns.create_train_fn', (['G', 'D', 'GD', 'z_', 'y_', 'ema', 'state_dict', 'config'], {}), '(G, D, GD, z_, y_, ema, state_dict, config)\n', (4444, 4487), False, 'import train_fns\n'), ((4584, 4603), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4601, 4603), False, 'import time\n'), ((7420, 7442), 'utils.prepare_parser', 'utils.prepare_parser', ([], {}), '()\n', (7440, 7442), False, 'import utils\n'), ((2139, 2200), 'utils.ema', 'utils.ema', (['G', 'G_ema', "config['ema_decay']", "config['ema_start']"], {}), "(G, G_ema, config['ema_decay'], config['ema_start'])\n", (2148, 2200), False, 'import utils\n'), ((2697, 2876), 'utils.load_weights', 'utils.load_weights', (['G', 'D', 'state_dict', "config['weights_root']", 'experiment_name', "(config['load_weights'] if config['load_weights'] else None)", "(G_ema if config['ema'] else None)"], {}), "(G, D, state_dict, config['weights_root'],\n experiment_name, config['load_weights'] if config['load_weights'] else\n None, G_ema if config['ema'] else None)\n", (2715, 2876), False, 'import utils\n'), ((1630, 1656), 'BigGAN.Generator', 'BigGAN.Generator', ([], {}), '(**config)\n', (1646, 1656), False, 'import BigGAN\n'), ((1681, 1711), 'BigGAN.Discriminator', 'BigGAN.Discriminator', ([], {}), '(**config)\n', (1701, 1711), False, 'import BigGAN\n'), ((1971, 2038), 'BigGAN.Generator', 'BigGAN.Generator', ([], {}), "(**{**config, 'skip_init': True, 'no_optim': True})\n", (1987, 2038), False, 'import BigGAN\n'), ((6714, 6838), 'train_fns.save_and_sample', 'train_fns.save_and_sample', (['G', 'D', 'G_ema', 'z_', 'y_', 'fixed_z', 'fixed_y', 'state_dict', 'config', 'experiment_name'], {'save_weight': '(False)'}), '(G, D, G_ema, z_, y_, fixed_z, fixed_y, state_dict,\n config, experiment_name, save_weight=False)\n', (6739, 6838), False, 'import train_fns\n'), ((7088, 7211), 'train_fns.save_and_sample', 'train_fns.save_and_sample', (['G', 'D', 'G_ema', 'z_', 'y_', 'fixed_z', 'fixed_y', 'state_dict', 'config', 'experiment_name'], {'save_weight': '(True)'}), '(G, D, G_ema, z_, y_, fixed_z, fixed_y, state_dict,\n config, experiment_name, save_weight=True)\n', (7113, 7211), False, 'import train_fns\n'), ((5673, 5715), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['curr_time'], {}), '(curr_time)\n', (5704, 5715), False, 'import datetime\n'), ((6008, 6044), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['eta'], {}), '(eta)\n', (6039, 6044), False, 'import datetime\n')] |
# Ce fichier contient (au moins) cinq erreurs.
# Instructions:
# - tester jusqu'ร atteindre 100% de couverture;
# - corriger les bugs;"
# - envoyer le diff ou le dรฉpรดt git par email."""
import hypothesis
from hypothesis import given, settings
from hypothesis.strategies import integers, lists
class BinHeap:
#structure de tas binaires d'entiers
def __init__(self):
#initialise un tas binaire d'entiers avec un element 0
self.heapList = [0]
self.currentSize = 1#taille de la liste heapList (invariant)
def percUp(self,i):
#upward percolation until 0 reached or father is bigger
while i // 2 > 0 and self.heapList[i] < self.heapList[i // 2]:
tmp = self.heapList[i // 2]
self.heapList[i // 2] = self.heapList[i]
self.heapList[i] = tmp
i //= 2
def insert(self,k):
#inserting a new value into the heap
self.heapList.append(k)
self.percUp(self.currentSize)
self.currentSize = self.currentSize + 1
def percDown(self,i):
while (i * 2) < self.currentSize:#while I have a child
mc = self.minChild(i)#mc is the index of the smallest
if self.heapList[i] > self.heapList[mc]:
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
i = mc
def minChild(self,i):
if i * 2 >= self.currentSize or i == 0:
print("No Child. None is returned.")
return
if i * 2 + 1 >= self.currentSize:
return i * 2
else:
if self.heapList[i*2] < self.heapList[i*2+1]:
return i * 2
else:
return i * 2 + 1
def delMin(self):
try:
rval = self.heapList[1]
except IndexError:
print("Empty heap. Nothing is changed. None is returned.")
return
self.currentSize = self.currentSize - 1
self.heapList[1] = self.heapList[self.currentSize]
self.heapList.pop()
self.percDown(1)
return rval
def buildHeap(self,alist):
#creates a whole heap from a list, by percolating all its elements
i = 1
self.currentSize = len(alist) + 1# + 1
self.heapList = [0] + alist # enlever le [:]
while (i < self.currentSize):
self.percUp(i)
i += 1
def assert_isheaplist(x,val,lon,HL):
assert ((x * 2 + 1 > lon) or (x * 2 + 1 == lon and HL[2*x] >= val) or (HL[2*x] >= val and HL[2*x+1] >= val))
def assert_goodheap(tau,lon):
for x in range(1,lon):
assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
def test_init():
tau = BinHeap()
assert tau.heapList == [0]
assert tau.currentSize == 1
@given(integers())
@settings(max_examples=100)
def test_percup(integer):
gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3]
tau = BinHeap()
tau.currentsize = 16
tau.heapList = gamma[:]
tau.percUp(15)
assert tau.heapList == gamma[:]
tau.heapList[15] = 2
tau.percUp(15)
print(tau.heapList)
assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3]
assert tau.currentsize == 16
tau.heapList.append(8)
tau.currentsize = 17
tau.percUp(16)
assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10]
tau.heapList.append(integer)
tau.currentsize = 18
tau.percUp(17)
assert tau.heapList[17] >= tau.heapList[8]
assert tau.heapList[8] >= tau.heapList[4]
@given(lists(elements=integers()))
@settings(max_examples=1000)
def test_build(L):
tau = BinHeap()
tau.buildHeap(L)
assert tau.currentSize == len(L) + 1
assert sorted(tau.heapList) == sorted(L+[0])
assert_goodheap(tau,len(L)+1)
#for x in range(1,len(L) + 1):
# assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
@given(lists(elements=integers()),integers())
@settings(max_examples=1000)
def test_insert(L,i):
tau = BinHeap()
tau.buildHeap(L)
tau.insert(i)
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()),integers())
@settings(max_examples=100)
def test_percDown(L,i):
tau = BinHeap()
L += [10]
tau.buildHeap(L)
tau.heapList[1] = i
tau.percDown(1)
for x in range(1,len(L) + 1):
for _ in range(len(L)):
tau.percDown(x)
#then we test that we got a well-ordered heap
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_delmin(L):
L += [10]
tau = BinHeap()
assert tau.delMin() is None
tau.buildHeap(L)
#print(L)
#print("sorted",sorted(L),"\n")
#print("TAU ", tau.heapList,"\n")
assert tau.delMin() == min(L)
@given(lists(elements=integers()),integers())
@settings(max_examples=400)
def test_minChild(L,i):
tau = BinHeap()
assert tau.minChild(abs(i)) is None
tau.buildHeap(2*L+[0,1])
assert tau.minChild(len(L)+1) is not None
@given(lists(elements=integers()),lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_general(L,K):
tau = BinHeap()
tau.buildHeap(L)#tas construit avec L
for k in K:tau.insert(k)#on rajoute les elements de K
assert_goodheap(tau,tau.currentSize)
x = []
while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements
assert x == sorted(L + K)#verifie qu'on a bien le minimum avec delmin
assert tau.delMin() is None
x = []
tau.buildHeap(K)
for l in L:#teste si 1 suite d'insertion/ suppression maintient la structure
tau.delMin()
tau.insert(l)
assert_goodheap(tau,tau.currentSize) | [
"hypothesis.settings",
"hypothesis.strategies.integers"
] | [((2854, 2880), 'hypothesis.settings', 'settings', ([], {'max_examples': '(100)'}), '(max_examples=100)\n', (2862, 2880), False, 'from hypothesis import given, settings\n'), ((3612, 3639), 'hypothesis.settings', 'settings', ([], {'max_examples': '(1000)'}), '(max_examples=1000)\n', (3620, 3639), False, 'from hypothesis import given, settings\n'), ((3986, 4013), 'hypothesis.settings', 'settings', ([], {'max_examples': '(1000)'}), '(max_examples=1000)\n', (3994, 4013), False, 'from hypothesis import given, settings\n'), ((4181, 4207), 'hypothesis.settings', 'settings', ([], {'max_examples': '(100)'}), '(max_examples=100)\n', (4189, 4207), False, 'from hypothesis import given, settings\n'), ((4551, 4592), 'hypothesis.settings', 'settings', ([], {'max_examples': '(400)', 'deadline': 'None'}), '(max_examples=400, deadline=None)\n', (4559, 4592), False, 'from hypothesis import given, settings\n'), ((4873, 4899), 'hypothesis.settings', 'settings', ([], {'max_examples': '(400)'}), '(max_examples=400)\n', (4881, 4899), False, 'from hypothesis import given, settings\n'), ((5123, 5164), 'hypothesis.settings', 'settings', ([], {'max_examples': '(400)', 'deadline': 'None'}), '(max_examples=400, deadline=None)\n', (5131, 5164), False, 'from hypothesis import given, settings\n'), ((2841, 2851), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (2849, 2851), False, 'from hypothesis.strategies import integers, lists\n'), ((3973, 3983), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (3981, 3983), False, 'from hypothesis.strategies import integers, lists\n'), ((4168, 4178), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (4176, 4178), False, 'from hypothesis.strategies import integers, lists\n'), ((4860, 4870), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (4868, 4870), False, 'from hypothesis.strategies import integers, lists\n'), ((3598, 3608), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (3606, 3608), False, 'from hypothesis.strategies import integers, lists\n'), ((3961, 3971), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (3969, 3971), False, 'from hypothesis.strategies import integers, lists\n'), ((4156, 4166), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (4164, 4166), False, 'from hypothesis.strategies import integers, lists\n'), ((4537, 4547), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (4545, 4547), False, 'from hypothesis.strategies import integers, lists\n'), ((4848, 4858), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (4856, 4858), False, 'from hypothesis.strategies import integers, lists\n'), ((5082, 5092), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (5090, 5092), False, 'from hypothesis.strategies import integers, lists\n'), ((5109, 5119), 'hypothesis.strategies.integers', 'integers', ([], {}), '()\n', (5117, 5119), False, 'from hypothesis.strategies import integers, lists\n')] |
from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
class DQNLoss(Loss):
"""Compute double DQN loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
q_value = network.forward(states).gather(1, actions)
with torch.no_grad():
next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_q = rewards + (1 - dones) * n_step_gamma * next_q
element_wise_loss = F.smooth_l1_loss(
q_value, target_q.detach(), reduction="none"
)
return element_wise_loss
class QRLoss(Loss):
"""Compute quantile regression loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * next_z
distance = target_z - z_dists
quantile_huber_loss = (
network.tau - (distance.detach() < 0).float()
).abs() * self.huber_loss(distance)
element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)
return element_wise_loss
@staticmethod
def huber_loss(x: List[torch.Tensor], k: float = 1.0):
return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
class CategoricalLoss(Loss):
"""Compute C51 loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
batch_size = states.size(0)
offset = (
torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)
.long()
.unsqueeze(1)
.expand(batch_size, network.num_atoms)
)
if self.use_cuda:
offset = offset.cuda()
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * network.support
target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)
target_proj = self.dist_projection(network, next_z, target_z, offset)
log_dist = torch.log(z_dists)
element_wise_loss = -(target_proj * log_dist).sum(1)
return element_wise_loss
def dist_projection(
self,
network: nn.Module,
next_z: torch.Tensor,
target_z: torch.Tensor,
offset: torch.Tensor,
) -> torch.Tensor:
b = (target_z - network.v_min) / network.delta_z
lb = b.floor().long()
ub = b.ceil().long()
proj_dist = torch.zeros(next_z.size())
if self.use_cuda:
proj_dist = proj_dist.cuda()
proj_dist.view(-1).index_add_(
0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)
)
return proj_dist
| [
"torch.log",
"torch.mean",
"torch.linspace",
"torch.no_grad",
"rlcycle.common.abstract.loss.Loss.__init__",
"torch.clamp"
] | [((311, 354), 'rlcycle.common.abstract.loss.Loss.__init__', 'Loss.__init__', (['self', 'hyper_params', 'use_cuda'], {}), '(self, hyper_params, use_cuda)\n', (324, 354), False, 'from rlcycle.common.abstract.loss import Loss\n'), ((1206, 1249), 'rlcycle.common.abstract.loss.Loss.__init__', 'Loss.__init__', (['self', 'hyper_params', 'use_cuda'], {}), '(self, hyper_params, use_cuda)\n', (1219, 1249), False, 'from rlcycle.common.abstract.loss import Loss\n'), ((2173, 2225), 'torch.mean', 'torch.mean', (['quantile_huber_loss'], {'dim': '(1)', 'keepdim': '(True)'}), '(quantile_huber_loss, dim=1, keepdim=True)\n', (2183, 2225), False, 'import torch\n'), ((2553, 2596), 'rlcycle.common.abstract.loss.Loss.__init__', 'Loss.__init__', (['self', 'hyper_params', 'use_cuda'], {}), '(self, hyper_params, use_cuda)\n', (2566, 2596), False, 'from rlcycle.common.abstract.loss import Loss\n'), ((3816, 3834), 'torch.log', 'torch.log', (['z_dists'], {}), '(z_dists)\n', (3825, 3834), False, 'import torch\n'), ((666, 681), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (679, 681), False, 'import torch\n'), ((1616, 1631), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1629, 1631), False, 'import torch\n'), ((3265, 3280), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3278, 3280), False, 'import torch\n'), ((3654, 3713), 'torch.clamp', 'torch.clamp', (['target_z'], {'min': 'network.v_min', 'max': 'network.v_max'}), '(target_z, min=network.v_min, max=network.v_max)\n', (3665, 3713), False, 'import torch\n'), ((2899, 2966), 'torch.linspace', 'torch.linspace', (['(0)', '((batch_size - 1) * network.num_atoms)', 'batch_size'], {}), '(0, (batch_size - 1) * network.num_atoms, batch_size)\n', (2913, 2966), False, 'import torch\n')] |
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import time
import pytest
import zmq
from zmq.tests import BaseZMQTestCase, skip_pypy
class TestDraftSockets(BaseZMQTestCase):
def setUp(self):
if not zmq.DRAFT_API:
raise pytest.skip("draft api unavailable")
super(TestDraftSockets, self).setUp()
def test_client_server(self):
client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)
client.send(b'request')
msg = self.recv(server, copy=False)
assert msg.routing_id is not None
server.send(b'reply', routing_id=msg.routing_id)
reply = self.recv(client)
assert reply == b'reply'
def test_radio_dish(self):
dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)
dish.rcvtimeo = 250
group = 'mygroup'
dish.join(group)
received_count = 0
received = set()
sent = set()
for i in range(10):
msg = str(i).encode('ascii')
sent.add(msg)
radio.send(msg, group=group)
try:
recvd = dish.recv()
except zmq.Again:
time.sleep(0.1)
else:
received.add(recvd)
received_count += 1
# assert that we got *something*
assert len(received.intersection(sent)) >= 5
| [
"pytest.skip",
"time.sleep"
] | [((341, 377), 'pytest.skip', 'pytest.skip', (['"""draft api unavailable"""'], {}), "('draft api unavailable')\n", (352, 377), False, 'import pytest\n'), ((1258, 1273), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1268, 1273), False, 'import time\n')] |
# -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
| [
"warp_mls.WarpMLS",
"numpy.random.randint",
"numpy.arange"
] | [((934, 958), 'numpy.arange', 'np.arange', (['(1)', 'segment', '(1)'], {}), '(1, segment, 1)\n', (943, 958), True, 'import numpy as np\n'), ((1373, 1417), 'warp_mls.WarpMLS', 'WarpMLS', (['src', 'src_pts', 'dst_pts', 'img_w', 'img_h'], {}), '(src, src_pts, dst_pts, img_w, img_h)\n', (1380, 1417), False, 'from warp_mls import WarpMLS\n'), ((2014, 2038), 'numpy.arange', 'np.arange', (['(1)', 'segment', '(1)'], {}), '(1, segment, 1)\n', (2023, 2038), True, 'import numpy as np\n'), ((2309, 2353), 'warp_mls.WarpMLS', 'WarpMLS', (['src', 'src_pts', 'dst_pts', 'img_w', 'img_h'], {}), '(src, src_pts, dst_pts, img_w, img_h)\n', (2316, 2353), False, 'from warp_mls import WarpMLS\n'), ((2913, 2957), 'warp_mls.WarpMLS', 'WarpMLS', (['src', 'src_pts', 'dst_pts', 'img_w', 'img_h'], {}), '(src, src_pts, dst_pts, img_w, img_h)\n', (2920, 2957), False, 'from warp_mls import WarpMLS\n'), ((563, 588), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (580, 588), True, 'import numpy as np\n'), ((590, 615), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (607, 615), True, 'import numpy as np\n'), ((674, 699), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (691, 699), True, 'import numpy as np\n'), ((815, 840), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (832, 840), True, 'import numpy as np\n'), ((2056, 2081), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2073, 2081), True, 'import numpy as np\n'), ((2690, 2715), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2707, 2715), True, 'import numpy as np\n'), ((2746, 2771), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2763, 2771), True, 'import numpy as np\n'), ((647, 672), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (664, 672), True, 'import numpy as np\n'), ((731, 756), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (748, 756), True, 'import numpy as np\n'), ((766, 791), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (783, 791), True, 'import numpy as np\n'), ((850, 875), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (867, 875), True, 'import numpy as np\n'), ((2810, 2835), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2827, 2835), True, 'import numpy as np\n'), ((2870, 2895), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (2887, 2895), True, 'import numpy as np\n'), ((1159, 1184), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1176, 1184), True, 'import numpy as np\n'), ((1093, 1118), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1110, 1118), True, 'import numpy as np\n'), ((1242, 1267), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1259, 1267), True, 'import numpy as np\n'), ((1316, 1341), 'numpy.random.randint', 'np.random.randint', (['thresh'], {}), '(thresh)\n', (1333, 1341), True, 'import numpy as np\n')] |
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, starting_vertex, end_vertex):
self.graph[starting_vertex].append(end_vertex)
def printAllPaths(self, starting_vertex, target_vertex):
visitedVertices = defaultdict(bool)
self.resultPaths = []
self.dfsUtil(starting_vertex, visitedVertices, target_vertex, "")
return self.resultPaths
def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string):
visitedVertices[current_vertex] = True
if output_string == "":
output_string = current_vertex
else:
output_string = output_string + "->" + current_vertex
if current_vertex == target_vertex:
self.resultPaths.append(output_string)
return
for vertex in self.graph[current_vertex]:
if visitedVertices[vertex] == False:
self.dfsUtil(vertex, visitedVertices, target_vertex, output_string)
visitedVertices[vertex] = False
if __name__ == "__main__":
g = Graph()
g.addEdge("A", "B")
g.addEdge("B", "D")
g.addEdge("A", "D")
g.addEdge("C", "A")
g.addEdge("C", "B")
g.addEdge("A", "C")
paths = g.printAllPaths("A", "B")
print(paths)
| [
"collections.defaultdict"
] | [((96, 113), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (107, 113), False, 'from collections import defaultdict\n'), ((310, 327), 'collections.defaultdict', 'defaultdict', (['bool'], {}), '(bool)\n', (321, 327), False, 'from collections import defaultdict\n')] |
from devito.ir import Call
from devito.passes.iet.definitions import DataManager
from devito.passes.iet.langbase import LangBB
__all__ = ['CBB', 'CDataManager']
class CBB(LangBB):
mapper = {
'aligned': lambda i:
'__attribute__((aligned(%d)))' % i,
'host-alloc': lambda i, j, k:
Call('posix_memalign', (i, j, k)),
'host-free': lambda i:
Call('free', (i,)),
}
class CDataManager(DataManager):
lang = CBB
| [
"devito.ir.Call"
] | [((326, 359), 'devito.ir.Call', 'Call', (['"""posix_memalign"""', '(i, j, k)'], {}), "('posix_memalign', (i, j, k))\n", (330, 359), False, 'from devito.ir import Call\n'), ((404, 422), 'devito.ir.Call', 'Call', (['"""free"""', '(i,)'], {}), "('free', (i,))\n", (408, 422), False, 'from devito.ir import Call\n')] |
# PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False, resize=True):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform_items = [
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
]
if resize:
transform_items.insert(0, transforms.Resize(input_size))
return transforms.Compose(transform_items)
class TorchTubDataset(IterableDataset):
'''
Loads the dataset, and creates a train/test split.
'''
def __init__(self, config, records: List[TubRecord], transform=None):
"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""
self.config = config
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.sequence = TubSequence(records)
self.pipeline = self._create_pipeline()
self.len = len(records)
def _create_pipeline(self):
""" This can be overridden if more complicated pipelines are
required """
def y_transform(record: TubRecord):
angle: float = record.underlying['user/angle']
throttle: float = record.underlying['user/throttle']
predictions = torch.tensor([angle, throttle], dtype=torch.float)
# Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions = (predictions + 1) / 2
return predictions
def x_transform(record: TubRecord):
# Loads the result of Image.open()
img_arr = record.image(cached=True, as_nparray=False)
return self.transform(img_arr)
# Build pipeline using the transformations
pipeline = self.sequence.build_pipeline(x_transform=x_transform,
y_transform=y_transform)
return pipeline
def __len__(self):
return len(self.sequence)
def __iter__(self):
return iter(self.pipeline)
class TorchTubDataModule(pl.LightningDataModule):
def __init__(self, config: Any, tub_paths: List[str], transform=None):
"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""
super().__init__()
self.config = config
self.tub_paths = tub_paths
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.tubs: List[Tub] = [Tub(tub_path, read_only=True)
for tub_path in self.tub_paths]
self.records: List[TubRecord] = []
def setup(self, stage=None):
"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""
# Loop through all the different tubs and load all the records for each of them
for tub in self.tubs:
for underlying in tub:
record = TubRecord(self.config, tub.base_path,
underlying=underlying)
self.records.append(record)
train_records, val_records = train_test_split(
self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))
assert len(val_records) > 0, "Not enough validation data. Add more data"
self.train_dataset = TorchTubDataset(
self.config, train_records, transform=self.transform)
self.val_dataset = TorchTubDataset(
self.config, val_records, transform=self.transform)
def train_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
def val_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
| [
"donkeycar.pipeline.sequence.TubSequence",
"donkeycar.utils.train_test_split",
"torch.tensor",
"donkeycar.parts.tub_v2.Tub",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"donkeycar.pipeline.types.TubRecord",
"torchvision.transforms.Compose"
] | [((1394, 1429), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_items'], {}), '(transform_items)\n', (1412, 1429), False, 'from torchvision import transforms\n'), ((1223, 1244), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1242, 1244), False, 'from torchvision import transforms\n'), ((1254, 1294), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (1274, 1294), False, 'from torchvision import transforms\n'), ((2097, 2117), 'donkeycar.pipeline.sequence.TubSequence', 'TubSequence', (['records'], {}), '(records)\n', (2108, 2117), False, 'from donkeycar.pipeline.sequence import TubSequence\n'), ((4993, 5069), 'donkeycar.utils.train_test_split', 'train_test_split', (['self.records'], {'test_size': '(1.0 - self.config.TRAIN_TEST_SPLIT)'}), '(self.records, test_size=1.0 - self.config.TRAIN_TEST_SPLIT)\n', (5009, 5069), False, 'from donkeycar.utils import train_test_split\n'), ((5610, 5695), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_dataset'], {'batch_size': 'self.config.BATCH_SIZE', 'num_workers': '(0)'}), '(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0\n )\n', (5620, 5695), False, 'from torch.utils.data import IterableDataset, DataLoader\n'), ((5912, 5990), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_dataset'], {'batch_size': 'self.config.BATCH_SIZE', 'num_workers': '(0)'}), '(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)\n', (5922, 5990), False, 'from torch.utils.data import IterableDataset, DataLoader\n'), ((1351, 1380), 'torchvision.transforms.Resize', 'transforms.Resize', (['input_size'], {}), '(input_size)\n', (1368, 1380), False, 'from torchvision import transforms\n'), ((2520, 2570), 'torch.tensor', 'torch.tensor', (['[angle, throttle]'], {'dtype': 'torch.float'}), '([angle, throttle], dtype=torch.float)\n', (2532, 2570), False, 'import torch\n'), ((4138, 4167), 'donkeycar.parts.tub_v2.Tub', 'Tub', (['tub_path'], {'read_only': '(True)'}), '(tub_path, read_only=True)\n', (4141, 4167), False, 'from donkeycar.parts.tub_v2 import Tub\n'), ((4815, 4875), 'donkeycar.pipeline.types.TubRecord', 'TubRecord', (['self.config', 'tub.base_path'], {'underlying': 'underlying'}), '(self.config, tub.base_path, underlying=underlying)\n', (4824, 4875), False, 'from donkeycar.pipeline.types import TubRecord, TubDataset\n')] |
import os
from tornado.template import Template
__SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet')
def T(name, **kw):
t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read())
return t.generate(**dict([('template_file', name)] + globals().items() + kw.items()))
| [
"os.path.abspath",
"os.path.join"
] | [((92, 117), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((172, 213), 'os.path.join', 'os.path.join', (['__SNIPPET__', "(name + '.html')"], {}), "(__SNIPPET__, name + '.html')\n", (184, 213), False, 'import os\n')] |
"""
Support for getting the disk temperature of a host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.hddtemp/
"""
import logging
from datetime import timedelta
from telnetlib import Telnet
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE = 'device'
ATTR_MODEL = 'model'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 7634
DEFAULT_NAME = 'HD Temperature'
DEFAULT_TIMEOUT = 5
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HDDTemp sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
disks = config.get(CONF_DISKS)
hddtemp = HddTempData(host, port)
hddtemp.update()
if hddtemp.data is None:
return False
if not disks:
disks = [next(iter(hddtemp.data)).split('|')[0]]
dev = []
for disk in disks:
if disk in hddtemp.data:
dev.append(HddTempSensor(name, disk, hddtemp))
add_devices(dev, True)
class HddTempSensor(Entity):
"""Representation of a HDDTemp sensor."""
def __init__(self, name, disk, hddtemp):
"""Initialize a HDDTemp sensor."""
self.hddtemp = hddtemp
self.disk = disk
self._name = '{} {}'.format(name, disk)
self._state = None
self._details = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._details[3] == 'C':
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_DEVICE: self._details[0],
ATTR_MODEL: self._details[1],
}
def update(self):
"""Get the latest data from HDDTemp daemon and updates the state."""
self.hddtemp.update()
if self.hddtemp.data and self.disk in self.hddtemp.data:
self._details = self.hddtemp.data[self.disk].split('|')
self._state = self._details[2]
else:
self._state = None
class HddTempData(object):
"""Get the latest data from HDDTemp and update the states."""
def __init__(self, host, port):
"""Initialize the data object."""
self.host = host
self.port = port
self.data = None
def update(self):
"""Get the latest data from HDDTemp running as daemon."""
try:
connection = Telnet(
host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)
data = connection.read_all().decode(
'ascii').lstrip('|').rstrip('|').split('||')
self.data = {data[i].split('|')[0]: data[i]
for i in range(0, len(data), 1)}
except ConnectionRefusedError:
_LOGGER.error(
"HDDTemp is not available at %s:%s", self.host, self.port)
self.data = None
| [
"logging.getLogger",
"datetime.timedelta",
"voluptuous.Optional",
"voluptuous.All",
"telnetlib.Telnet"
] | [((577, 604), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (594, 604), False, 'import logging\n'), ((767, 787), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (776, 787), False, 'from datetime import timedelta\n'), ((836, 872), 'voluptuous.Optional', 'vol.Optional', (['CONF_DISKS'], {'default': '[]'}), '(CONF_DISKS, default=[])\n', (848, 872), True, 'import voluptuous as vol\n'), ((916, 961), 'voluptuous.Optional', 'vol.Optional', (['CONF_HOST'], {'default': 'DEFAULT_HOST'}), '(CONF_HOST, default=DEFAULT_HOST)\n', (928, 961), True, 'import voluptuous as vol\n'), ((978, 1023), 'voluptuous.Optional', 'vol.Optional', (['CONF_PORT'], {'default': 'DEFAULT_PORT'}), '(CONF_PORT, default=DEFAULT_PORT)\n', (990, 1023), True, 'import voluptuous as vol\n'), ((1038, 1083), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (1050, 1083), True, 'import voluptuous as vol\n'), ((874, 910), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cv.string]'], {}), '(cv.ensure_list, [cv.string])\n', (881, 910), True, 'import voluptuous as vol\n'), ((3382, 3445), 'telnetlib.Telnet', 'Telnet', ([], {'host': 'self.host', 'port': 'self.port', 'timeout': 'DEFAULT_TIMEOUT'}), '(host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)\n', (3388, 3445), False, 'from telnetlib import Telnet\n')] |
import telegram
from django.conf import settings
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from braces.views import CsrfExemptMixin
from rest_framework.authentication import BasicAuthentication
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from .bots import TelegramBot
from .models import TelegramUser as User
@method_decorator(csrf_exempt, name='dispatch')
class TelegramBotView(APIView):
permission_classes = (AllowAny, )
def post(self, request, *args, **kwargs):
context = request.data
bot = TelegramBot(context)
user, _ = User.objects.get_or_create(
id=bot.sender['id'],
defaults={
'first_name': bot.sender['first_name'],
'last_name': bot.sender.get('last_name', ''),
'username': bot.sender.get('username', ''),
'is_bot': bot.sender.get('is_bot', False)
}
)
user.access_count += 1
user.save()
bot.process(user)
return Response(status=status.HTTP_200_OK)
| [
"rest_framework.response.Response",
"django.utils.decorators.method_decorator"
] | [((580, 626), 'django.utils.decorators.method_decorator', 'method_decorator', (['csrf_exempt'], {'name': '"""dispatch"""'}), "(csrf_exempt, name='dispatch')\n", (596, 626), False, 'from django.utils.decorators import method_decorator\n'), ((1264, 1299), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (1272, 1299), False, 'from rest_framework.response import Response\n')] |
import os
from datetime import datetime
from os.path import join
import pathlib
from tqdm import tqdm
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision
from torchvision.transforms import Pad
from torchvision.utils import make_grid
import repackage
repackage.up()
from imagenet.models import CGN
from imagenet.config import get_cfg_defaults
from shared.losses import *
from utils import Optimizers
from inception_score import *
def save_sample_sheet(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
to_save = []
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
x_gt, mask, premask, foreground, background, bg_mask = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# build class grid
to_plot = [premask, foreground, background, x_gen, x_gt]
grid = make_grid(torch.cat(to_plot).detach().cpu(),
nrow=len(to_plot), padding=2, normalize=True)
# add unnormalized mask
mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu()
grid = torch.cat([mask, grid], 2)
# save to disk
to_save.append(grid)
del to_plot, mask, premask, foreground, background, x_gen, x_gt
# save the image
path = join(sample_path, f'cls_sheet_' + ep_str + '.png')
torchvision.utils.save_image(torch.cat(to_save, 1), path)
cgn.train()
def save_sample_single(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
_, mask, premask, foreground, background, _ = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# save_images
path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png')
torchvision.utils.save_image(premask, path, normalize=True)
path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png')
torchvision.utils.save_image(mask, path, normalize=True)
path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png')
torchvision.utils.save_image(foreground, path, normalize=True)
path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png')
torchvision.utils.save_image(background, path, normalize=True)
path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png')
torchvision.utils.save_image(x_gen, path, normalize=True)
cgn.train()
def fit(cfg, cgn, opts, losses):
inception_score_val = list()
# total number of episodes, accounted for batch accumulation
episodes = cfg.TRAIN.EPISODES
episodes *= cfg.TRAIN.BATCH_ACC
# directories for experiments
time_str = datetime.now().strftime("%Y_%m_%d_%H_%M")
if cfg.WEIGHTS_PATH:
weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent)
start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:])
sample_path = weights_path.replace('weights', 'samples')
ep_range = (start_ep, start_ep + episodes)
else:
model_path = join('imagenet', 'experiments',
f'cgn_{time_str}_{cfg.MODEL_NAME}')
weights_path = join(model_path, 'weights')
sample_path = join(model_path, 'samples')
pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True)
ep_range = (0, episodes)
# fixed noise sample
u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt')
if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE:
u_fixed = cgn.get_noise_vec()
torch.save(u_fixed, u_fixed_path)
else:
u_fixed = torch.load(u_fixed_path)
# Training Loop
cgn.train()
L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses
save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet
pbar = tqdm(range(*ep_range))
for i, ep in enumerate(pbar):
x_gt, mask, premask, foreground, background, background_mask = cgn()
x_gen = mask * foreground + (1 - mask) * background
# Losses
losses_g = {}
losses_g['l1'] = L_l1(x_gen, x_gt)
losses_g['perc'] = L_perc(x_gen, x_gt)
losses_g['binary'] = L_binary(mask)
losses_g['mask'] = L_mask(mask)
losses_g['perc_text'] = L_text(x_gt, mask, foreground)
losses_g['bg'] = L_bg(background_mask)
# backprop
losses_g = {k: v.mean() for k, v in losses_g.items()}
g_loss = sum(losses_g.values())
g_loss.backward()
if (i+1) % cfg.TRAIN.BATCH_ACC == 0:
opts.step(['shape', 'bg', 'texture'])
# Saving
if not i % cfg.LOG.SAVE_ITER:
ep_str = f'ep_{ep:07}'
save_samples(cgn, u_fixed, sample_path, ep_str)
torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth'))
# Logging
if cfg.LOG.LOSSES:
msg = ''.join([f"[{k}: {v:.3f}]" for k, v in losses_g.items()])
pbar.set_description(msg)
# Calculate Inception SCore
if cfg.LOG.INCEPTION_SCORE:
score, score_std = inception_score(x_gen)
inception_score_val.append(score)
def main(cfg):
# model init
cgn = CGN(
batch_sz=cfg.TRAIN.BATCH_SZ,
truncation=cfg.MODEL.TRUNCATION,
pretrained=True,
)
print("------CGN-------")
print(cgn)
if cfg.WEIGHTS_PATH:
weights = torch.load(cfg.WEIGHTS_PATH)
weights = {k.replace('module.', ''): v for k, v in weights.items()}
cgn.load_state_dict(weights)
# optimizers
opts = Optimizers()
opts.set('shape', cgn.f_shape, cfg.LR.SHAPE)
opts.set('texture', cgn.f_text, cfg.LR.TEXTURE)
opts.set('bg', cgn.f_bg, cfg.LR.BG)
# losses
L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1)
L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC)
L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY)
L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK)
L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT)
L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG)
losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg)
# push to device and train
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cgn = cgn.to(device)
losses = (l.to(device) for l in losses)
fit(cfg, cgn, opts, losses)
def merge_args_and_cfg(args, cfg):
cfg.MODEL_NAME = args.model_name
cfg.WEIGHTS_PATH = args.weights_path
cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise
cfg.LOG.SAVE_SINGLES = args.save_singles
cfg.LOG.SAVE_ITER = args.save_iter
cfg.LOG.LOSSES = args.log_losses
cfg.LOG.INCEPTION_SCORE = True
cfg.TRAIN.EPISODES = args.episodes
cfg.TRAIN.BATCH_SZ = args.batch_sz
cfg.TRAIN.BATCH_ACC = args.batch_acc
cfg.MODEL.TRUNCATION = args.truncation
return cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='tmp',
help='Weights and samples will be saved under experiments/model_name')
parser.add_argument('--weights_path', default='',
help='provide path to continue training')
parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',
help='If you want a different noise vector than provided in the repo')
parser.add_argument('--save_singles', default=False, action='store_true',
help='Save single images instead of sheets')
parser.add_argument('--truncation', type=float, default=1.0,
help='Truncation value for noise sampling')
parser.add_argument('--episodes', type=int, default=300,
help="We don't do dataloading, hence, one episode = one gradient update.")
parser.add_argument('--batch_sz', type=int, default=1,
help='Batch size, use in conjunciton with batch_acc')
parser.add_argument('--batch_acc', type=int, default=4000,
help='pseudo_batch_size = batch_acc*batch size')
parser.add_argument('--save_iter', type=int, default=4000,
help='Save samples/weights every n iter')
parser.add_argument('--log_losses', default=False, action='store_true',
help='Print out losses')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg = merge_args_and_cfg(args, cfg)
print(cfg)
main(cfg)
| [
"utils.Optimizers",
"argparse.ArgumentParser",
"repackage.up",
"imagenet.config.get_cfg_defaults",
"pathlib.Path",
"torch.load",
"os.path.join",
"imagenet.models.CGN",
"os.path.isfile",
"datetime.datetime.now",
"torch.cuda.is_available",
"torch.save",
"torch.no_grad",
"torchvision.transforms.Pad",
"torchvision.utils.save_image",
"torch.cat"
] | [((311, 325), 'repackage.up', 'repackage.up', ([], {}), '()\n', (323, 325), False, 'import repackage\n'), ((1569, 1619), 'os.path.join', 'join', (['sample_path', "(f'cls_sheet_' + ep_str + '.png')"], {}), "(sample_path, f'cls_sheet_' + ep_str + '.png')\n", (1573, 1619), False, 'from os.path import join\n'), ((3970, 4015), 'os.path.join', 'join', (['"""imagenet"""', '"""experiments"""', '"""u_fixed.pt"""'], {}), "('imagenet', 'experiments', 'u_fixed.pt')\n", (3974, 4015), False, 'from os.path import join\n'), ((5780, 5866), 'imagenet.models.CGN', 'CGN', ([], {'batch_sz': 'cfg.TRAIN.BATCH_SZ', 'truncation': 'cfg.MODEL.TRUNCATION', 'pretrained': '(True)'}), '(batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION,\n pretrained=True)\n', (5783, 5866), False, 'from imagenet.models import CGN\n'), ((6153, 6165), 'utils.Optimizers', 'Optimizers', ([], {}), '()\n', (6163, 6165), False, 'from utils import Optimizers\n'), ((7479, 7504), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7502, 7504), False, 'import argparse\n'), ((8964, 8982), 'imagenet.config.get_cfg_defaults', 'get_cfg_defaults', ([], {}), '()\n', (8980, 8982), False, 'from imagenet.config import get_cfg_defaults\n'), ((695, 710), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (708, 710), False, 'import torch\n'), ((1653, 1674), 'torch.cat', 'torch.cat', (['to_save', '(1)'], {}), '(to_save, 1)\n', (1662, 1674), False, 'import torch\n'), ((1887, 1902), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1900, 1902), False, 'import torch\n'), ((3558, 3625), 'os.path.join', 'join', (['"""imagenet"""', '"""experiments"""', 'f"""cgn_{time_str}_{cfg.MODEL_NAME}"""'], {}), "('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}')\n", (3562, 3625), False, 'from os.path import join\n'), ((3675, 3702), 'os.path.join', 'join', (['model_path', '"""weights"""'], {}), "(model_path, 'weights')\n", (3679, 3702), False, 'from os.path import join\n'), ((3725, 3752), 'os.path.join', 'join', (['model_path', '"""samples"""'], {}), "(model_path, 'samples')\n", (3729, 3752), False, 'from os.path import join\n'), ((4134, 4167), 'torch.save', 'torch.save', (['u_fixed', 'u_fixed_path'], {}), '(u_fixed, u_fixed_path)\n', (4144, 4167), False, 'import torch\n'), ((4196, 4220), 'torch.load', 'torch.load', (['u_fixed_path'], {}), '(u_fixed_path)\n', (4206, 4220), False, 'import torch\n'), ((5982, 6010), 'torch.load', 'torch.load', (['cfg.WEIGHTS_PATH'], {}), '(cfg.WEIGHTS_PATH)\n', (5992, 6010), False, 'import torch\n'), ((1372, 1398), 'torch.cat', 'torch.cat', (['[mask, grid]', '(2)'], {}), '([mask, grid], 2)\n', (1381, 1398), False, 'import torch\n'), ((2239, 2293), 'os.path.join', 'join', (['sample_path', "(f'{y}_1_premask_' + ep_str + '.png')"], {}), "(sample_path, f'{y}_1_premask_' + ep_str + '.png')\n", (2243, 2293), False, 'from os.path import join\n'), ((2306, 2365), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['premask', 'path'], {'normalize': '(True)'}), '(premask, path, normalize=True)\n', (2334, 2365), False, 'import torchvision\n'), ((2385, 2436), 'os.path.join', 'join', (['sample_path', "(f'{y}_2_mask_' + ep_str + '.png')"], {}), "(sample_path, f'{y}_2_mask_' + ep_str + '.png')\n", (2389, 2436), False, 'from os.path import join\n'), ((2449, 2505), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['mask', 'path'], {'normalize': '(True)'}), '(mask, path, normalize=True)\n', (2477, 2505), False, 'import torchvision\n'), ((2525, 2579), 'os.path.join', 'join', (['sample_path', "(f'{y}_3_texture_' + ep_str + '.png')"], {}), "(sample_path, f'{y}_3_texture_' + ep_str + '.png')\n", (2529, 2579), False, 'from os.path import join\n'), ((2592, 2654), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['foreground', 'path'], {'normalize': '(True)'}), '(foreground, path, normalize=True)\n', (2620, 2654), False, 'import torchvision\n'), ((2674, 2724), 'os.path.join', 'join', (['sample_path', "(f'{y}_4_bgs_' + ep_str + '.png')"], {}), "(sample_path, f'{y}_4_bgs_' + ep_str + '.png')\n", (2678, 2724), False, 'from os.path import join\n'), ((2737, 2799), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['background', 'path'], {'normalize': '(True)'}), '(background, path, normalize=True)\n', (2765, 2799), False, 'import torchvision\n'), ((2819, 2873), 'os.path.join', 'join', (['sample_path', "(f'{y}_5_gen_ims_' + ep_str + '.png')"], {}), "(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png')\n", (2823, 2873), False, 'from os.path import join\n'), ((2886, 2943), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['x_gen', 'path'], {'normalize': '(True)'}), '(x_gen, path, normalize=True)\n', (2914, 2943), False, 'import torchvision\n'), ((3214, 3228), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3226, 3228), False, 'from datetime import datetime\n'), ((4027, 4055), 'os.path.isfile', 'os.path.isfile', (['u_fixed_path'], {}), '(u_fixed_path)\n', (4041, 4055), False, 'import os\n'), ((6788, 6813), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6811, 6813), False, 'import torch\n'), ((3308, 3338), 'pathlib.Path', 'pathlib.Path', (['cfg.WEIGHTS_PATH'], {}), '(cfg.WEIGHTS_PATH)\n', (3320, 3338), False, 'import pathlib\n'), ((3761, 3787), 'pathlib.Path', 'pathlib.Path', (['weights_path'], {}), '(weights_path)\n', (3773, 3787), False, 'import pathlib\n'), ((3831, 3856), 'pathlib.Path', 'pathlib.Path', (['sample_path'], {}), '(sample_path)\n', (3843, 3856), False, 'import pathlib\n'), ((5367, 5402), 'os.path.join', 'join', (['weights_path', "(ep_str + '.pth')"], {}), "(weights_path, ep_str + '.pth')\n", (5371, 5402), False, 'from os.path import join\n'), ((3370, 3400), 'pathlib.Path', 'pathlib.Path', (['cfg.WEIGHTS_PATH'], {}), '(cfg.WEIGHTS_PATH)\n', (3382, 3400), False, 'import pathlib\n'), ((1140, 1158), 'torch.cat', 'torch.cat', (['to_plot'], {}), '(to_plot)\n', (1149, 1158), False, 'import torch\n'), ((1306, 1312), 'torchvision.transforms.Pad', 'Pad', (['(2)'], {}), '(2)\n', (1309, 1312), False, 'from torchvision.transforms import Pad\n')] |
import logging
from typing import Dict, List, Optional
import numpy as np
import qiskit
from qiskit.circuit import Barrier, Delay, Reset
from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate,
PhaseGate, RXGate, RYGate, RZGate, U1Gate,
U2Gate, U3Gate, UGate)
from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate,
SGate, TdgGate, TGate,
ZGate)
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.basepasses import TransformationPass
logger = logging.getLogger(__name__)
class RemoveSmallRotations(TransformationPass):
"""Return a circuit with small rotation gates removed."""
def __init__(self, epsilon: float = 0, modulo2pi=False):
"""Remove all small rotations from a circuit
Args:
epsilon: Threshold for rotation angle to be removed
modulo2pi: If True, then rotations multiples of 2pi are removed as well
"""
super().__init__()
self.epsilon = epsilon
self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1))
self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2))
self.mod2pi = modulo2pi
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the pass on `dag`.
Args:
dag: input dag.
Returns:
Output dag with small rotations removed
"""
def modulo_2pi(x):
x = float(x)
return np.mod(x + np.pi, 2 * np.pi) - np.pi
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag1)
elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag2)
return dag
class RemoveDiagonalGatesAfterInput(TransformationPass):
"""Remove diagonal gates (including diagonal 2Q gates) at the start of a circuit.
Transpiler pass to remove diagonal gates (like RZ, T, Z, etc) at the start of a circuit.
Including diagonal 2Q gates. Nodes after a reset are also included.
"""
def run(self, dag):
"""Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate)
diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate)
nodes_to_remove = set()
for input_node in (dag.input_map.values()):
try:
successor = next(dag.quantum_successors(input_node))
except StopIteration:
continue
if successor.type == "op" and isinstance(successor.op, diagonal_1q_gates):
nodes_to_remove.add(successor)
def valid_predecessor(s):
""" Return True of node is valid predecessor for removal """
if s.type == 'in':
return True
if s.type == "op" and isinstance(s.op, Reset):
return True
return False
if successor.type == "op" and isinstance(successor.op, diagonal_2q_gates):
predecessors = dag.quantum_predecessors(successor)
if all(valid_predecessor(s) for s in predecessors):
nodes_to_remove.add(successor)
for node_to_remove in nodes_to_remove:
dag.remove_op_node(node_to_remove)
return dag
class DecomposeU(TransformationPass):
""" Decompose U gates into elementary rotations Rx, Ry, Rz
The U gates are decomposed using McKay decomposition.
"""
def __init__(self, verbose=0):
"""
Args:
"""
super().__init__()
self._subdags = []
self.verbose = verbose
self.initial_layout = None
def ugate_replacement_circuit(self, ugate):
qc = QuantumCircuit(1)
if isinstance(ugate, (U3Gate, UGate)):
theta, phi, lam = ugate.params
if theta == np.pi/2:
# a u2 gate
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
else:
# from https://arxiv.org/pdf/1707.03429.pdf
qc.rz(lam, 0)
qc.rx(np.pi / 2, 0)
qc.rz(theta + np.pi, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi, 0)
elif isinstance(ugate, U2Gate):
phi, lam = ugate.params
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
elif isinstance(ugate, (U1Gate, PhaseGate)):
lam, = ugate.params
qc.rz(lam, 0)
else:
raise Exception(f'unknown gate type {ugate}')
return qc
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input DAG.
Returns:
Output DAG where ``U`` gates have been decomposed.
"""
# Walk through the DAG and expand each node if required
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)):
subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op))
dag.substitute_node_with_dag(node, subdag)
return dag
class DecomposeCX(TransformationPass):
""" Decompose CX into CZ and single qubit rotations
"""
def __init__(self, mode: str = 'ry'):
"""
Args:
"""
super().__init__()
self._subdags: List = []
self.initial_layout = None
self.gate = qiskit.circuit.library.CXGate
self.decomposition = QuantumCircuit(2)
if mode == 'ry':
self.decomposition.ry(-np.pi / 2, 1)
self.decomposition.cz(0, 1)
self.decomposition.ry(np.pi / 2, 1)
else:
self.decomposition.h(1)
self.decomposition.cz(0, 1)
self.decomposition.h(1)
self._dag = circuit_to_dag(self.decomposition)
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input dag.
Returns:
output dag where ``CX`` was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
dag.substitute_node_with_dag(node, self._dag)
return dag
class SequentialPass(TransformationPass):
"""Adds barriers between gates to make the circuit sequential."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for node in dag.op_nodes():
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
logger.info('SequentialPass: adding node {node.name}')
if node.name in ['barrier', 'measure']:
continue
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class LinearTopologyParallelPass(TransformationPass):
"""Adds barriers to enforce a linear topology
The barrier are placed between gates such that no two qubit gates are executed
at the same time and only single qubit gates on non-neighboring qubits can
be executed in parallel. It assumes a linear topology."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for ii, layer in enumerate(dag.layers()):
gates_1q = []
gates_2q = []
other_gates = []
for node in layer['graph'].op_nodes():
if len(node.qargs) == 2:
gates_2q.append(node)
elif len(node.qargs) == 1:
gates_1q.append(node)
else:
logging.info(f'layer {ii}: other type of node {node}')
other_gates.append(node)
even = []
odd = []
for node in gates_1q:
if node.qargs[0].index % 2 == 0:
even.append(node)
else:
odd.append(node)
logging.info(
f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}')
if len(even) > 0:
for node in even:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
if len(odd) > 0:
for node in odd:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in gates_2q:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in other_gates:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class DelayPass(TransformationPass):
"""Adds delay gates when the qubits are idle.
For every layer of the circuit it finds the gate that
lasts the longest and applies appropriate delays on the
other qubits.
"""
def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None):
"""
Args:
gate_durations: Gate durations in the units of dt
"""
super().__init__()
self.gate_durations = gate_durations
self.delay_quantum = delay_quantum
def add_delay_to_dag(self, duration, dag, qargs, cargs):
if self.delay_quantum:
number_of_delays = int(duration/self.delay_quantum)
for ii in range(number_of_delays):
dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs)
else:
dag.apply_operation_back(Delay(duration), qargs, cargs)
@staticmethod
def _determine_delay_target_qubits(dag, layer):
""" Determine qubits in specified layer which require a delay gate """
partition = layer['partition']
lst = list(dag.qubits)
for el in partition:
for q in el:
if q in lst:
lst.remove(q)
return lst
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for layer_idx, layer in enumerate(dag.layers()):
max_duration = 0
durations = {}
for node in layer['graph'].op_nodes():
if node.name in self.gate_durations:
max_duration = max(max_duration, self.gate_durations[node.name])
for q in node.qargs:
durations[q] = self.gate_durations[node.name]
else:
logger.info('layer {layer_idx}, could not find duration for node {node.name}')
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
partition = layer['partition']
if len(partition) == 0:
continue
lst = DelayPass._determine_delay_target_qubits(dag, layer)
logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}')
for el in lst:
logger.info(f'apply_operation_back: {[el]}')
self.add_delay_to_dag(max_duration, new_dag, [el], [])
for q in durations:
if max_duration - durations[q] > 0:
self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], [])
return new_dag
| [
"logging.getLogger",
"qiskit.circuit.quantumcircuit.QuantumCircuit",
"qiskit.circuit.Delay",
"numpy.abs",
"logging.info",
"qiskit.dagcircuit.DAGCircuit",
"qiskit.converters.circuit_to_dag.circuit_to_dag",
"numpy.mod"
] | [((785, 812), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (802, 812), False, 'import logging\n'), ((5004, 5021), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (5018, 5021), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((6869, 6886), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (6883, 6886), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((7196, 7230), 'qiskit.converters.circuit_to_dag.circuit_to_dag', 'circuit_to_dag', (['self.decomposition'], {}), '(self.decomposition)\n', (7210, 7230), False, 'from qiskit.converters.circuit_to_dag import circuit_to_dag\n'), ((7790, 7802), 'qiskit.dagcircuit.DAGCircuit', 'DAGCircuit', ([], {}), '()\n', (7800, 7802), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((8704, 8716), 'qiskit.dagcircuit.DAGCircuit', 'DAGCircuit', ([], {}), '()\n', (8714, 8716), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((12222, 12234), 'qiskit.dagcircuit.DAGCircuit', 'DAGCircuit', ([], {}), '()\n', (12232, 12234), False, 'from qiskit.dagcircuit import DAGCircuit\n'), ((1333, 1350), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(1)'], {}), '(1)\n', (1347, 1350), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((1412, 1429), 'qiskit.circuit.quantumcircuit.QuantumCircuit', 'QuantumCircuit', (['(2)'], {}), '(2)\n', (1426, 1429), False, 'from qiskit.circuit.quantumcircuit import QuantumCircuit\n'), ((1742, 1770), 'numpy.mod', 'np.mod', (['(x + np.pi)', '(2 * np.pi)'], {}), '(x + np.pi, 2 * np.pi)\n', (1748, 1770), True, 'import numpy as np\n'), ((11792, 11807), 'qiskit.circuit.Delay', 'Delay', (['duration'], {}), '(duration)\n', (11797, 11807), False, 'from qiskit.circuit import Barrier, Delay, Reset\n'), ((11700, 11725), 'qiskit.circuit.Delay', 'Delay', (['self.delay_quantum'], {}), '(self.delay_quantum)\n', (11705, 11725), False, 'from qiskit.circuit import Barrier, Delay, Reset\n'), ((2203, 2214), 'numpy.abs', 'np.abs', (['phi'], {}), '(phi)\n', (2209, 2214), True, 'import numpy as np\n'), ((9261, 9315), 'logging.info', 'logging.info', (['f"""layer {ii}: other type of node {node}"""'], {}), "(f'layer {ii}: other type of node {node}')\n", (9273, 9315), False, 'import logging\n'), ((2691, 2702), 'numpy.abs', 'np.abs', (['phi'], {}), '(phi)\n', (2697, 2702), True, 'import numpy as np\n')] |
def help():
return '''
Isotropic-Anisotropic Filtering Norm Nesterov Algorithm
Solves the filtering norm minimization + quadratic term problem
Nesterov algorithm, with continuation:
argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta
If no filter is provided, solves the L1.
Continuation is performed by sequentially applying Nesterov's algorithm
with a decreasing sequence of values of mu0 >= mu >= muf
The observation matrix A must be a projector (non projector not implemented yet)
Inputs:
IAFNNESTA(b, #Observed data, a m x 1 array
A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function handles)
muf=0.0001, #final mu value, smaller leads to higher accuracy
delta, #l2 error bound. This enforces how close the variable
#must fit the observations b, i.e. || y - Ax ||_2 <= delta
#If delta = 0, enforces y = Ax
#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).
L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms
verbose=0, #whether to print internal steps
maxit=1000, #maximum iterations at the inner loop
x0=[], #initial solution, if not provided, will be At(b)
U=identity,Ut=identity, #Analysis/Synthesis operators
stopTest=1, #stopTest == 1 : stop when the relative change in the objective
function is less than TolVar
stopTest == 2 : stop with the l_infinity norm of difference in
the xk variable is less than TolVar
TolVar = 1e-5, #tolerance for the stopping criteria
AAtinv=[], #not implemented
normU=1, #if U is provided, this should be norm(U)
H=[],Ht=[]): #filter operations in sparse matrix form
#also accepts the string 'tv' as input,
#in that case, calculates the tv norm
Outputs:
return xk, #estimated x reconstructed signal
niter, #number of iterations
residuals #first column is the residual at every step,
#second column is the value of f_mu at every step
'''
import IAFNNesterov
import numpy as np
from scipy import sparse
import fil2mat
def identity(x):
return x
def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):
if delta<0:
raise Exception('Delta must not be negative')
if not callable(A): #If not function
A=lambda x:np.matmul(A,x)
At=lambda x:np.matmul(np.transpose(A),x)
b=b.reshape((-1,1))
Atb=At(b)
if sig_size==0:
sig_size=Atb.shape
if callable(AAtinv):
AtAAtb = At( AAtinv(b) )
else:
if len(AAtinv)>0:
AAtinv=lambda x: np.matmul(AAtinv,x)
AtAAtb = At( AAtinv(b) )
else: #default
AtAAtb = Atb
AAtinv=identity
if len(x0)==0:
x0 = AtAAtb
if len(H)==0:
Hf=identity
Hft=identity
else:
if not sparse.issparse(H):
if isinstance(H, str):
if H=='tv':
hs=[]
hs.append(np.array([[1,-1]]))
hs.append(np.array([[1],[-1]]))
H,_,_,_=fil2mat.fil2mat(hs,sig_size)
else:
print('H not recognized. Must be a sparse matrix, a list of filters or the string tv')
else:
#list of filters:
H,_,_,_=fil2mat.fil2mat(H,sig_size)
#print(H.shape)
#print(H)
#print(type(H))
Ht=H.transpose()
Hf=lambda x: H@x
Hft=lambda x: Ht@x
HU=lambda x: Hf(U(x))
UtHt=lambda x: Ut(Hft(x))
typemin=''
if L1w>0:
typemin+="iso"
if L2w>0:
typemin+="aniso"
typemin+='tropic '
if callable(H):
typemin+='filtering norm '
mu0=0
if L1w>0:
mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))
if L2w>0:
mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))
niter = 0
Gamma = np.power(muf/mu0,1/MaxIntIter)
mu = mu0
Gammat= np.power(TolVar/0.1,1/MaxIntIter)
TolVar = 0.1
for i in range(MaxIntIter):
mu = mu*Gamma
TolVar=TolVar*Gammat;
if verbose>0:
#if k%verbose==0:
print("\tBeginning %s Minimization; mu = %g\n" %(typemin,mu))
xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)
xplug = xk
niter = niter_int + niter
if i==0:
residuals=res
else:
residuals = np.vstack((residuals, res))
return xk.reshape(sig_size)
if __name__ == "__main__":
print(help())
| [
"IAFNNesterov.IAFNNesterov",
"fil2mat.fil2mat",
"numpy.power",
"scipy.sparse.issparse",
"numpy.array",
"numpy.matmul",
"numpy.vstack",
"numpy.transpose"
] | [((4800, 4835), 'numpy.power', 'np.power', (['(muf / mu0)', '(1 / MaxIntIter)'], {}), '(muf / mu0, 1 / MaxIntIter)\n', (4808, 4835), True, 'import numpy as np\n'), ((4856, 4894), 'numpy.power', 'np.power', (['(TolVar / 0.1)', '(1 / MaxIntIter)'], {}), '(TolVar / 0.1, 1 / MaxIntIter)\n', (4864, 4894), True, 'import numpy as np\n'), ((5171, 5383), 'IAFNNesterov.IAFNNesterov', 'IAFNNesterov.IAFNNesterov', (['b'], {'A': 'A', 'At': 'At', 'mu': 'mu', 'delta': 'delta', 'L1w': 'L1w', 'L2w': 'L2w', 'verbose': 'verbose', 'maxit': 'maxit', 'x0': 'x0', 'U': 'U', 'Ut': 'Ut', 'stopTest': 'stopTest', 'TolVar': 'TolVar', 'AAtinv': 'AAtinv', 'normU': 'normU', 'H': 'Hf', 'Ht': 'Hft'}), '(b, A=A, At=At, mu=mu, delta=delta, L1w=L1w, L2w=\n L2w, verbose=verbose, maxit=maxit, x0=x0, U=U, Ut=Ut, stopTest=stopTest,\n TolVar=TolVar, AAtinv=AAtinv, normU=normU, H=Hf, Ht=Hft)\n', (5196, 5383), False, 'import IAFNNesterov\n'), ((3145, 3160), 'numpy.matmul', 'np.matmul', (['A', 'x'], {}), '(A, x)\n', (3154, 3160), True, 'import numpy as np\n'), ((3706, 3724), 'scipy.sparse.issparse', 'sparse.issparse', (['H'], {}), '(H)\n', (3721, 3724), False, 'from scipy import sparse\n'), ((5503, 5530), 'numpy.vstack', 'np.vstack', (['(residuals, res)'], {}), '((residuals, res))\n', (5512, 5530), True, 'import numpy as np\n'), ((3190, 3205), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (3202, 3205), True, 'import numpy as np\n'), ((3431, 3451), 'numpy.matmul', 'np.matmul', (['AAtinv', 'x'], {}), '(AAtinv, x)\n', (3440, 3451), True, 'import numpy as np\n'), ((4199, 4227), 'fil2mat.fil2mat', 'fil2mat.fil2mat', (['H', 'sig_size'], {}), '(H, sig_size)\n', (4214, 4227), False, 'import fil2mat\n'), ((3945, 3974), 'fil2mat.fil2mat', 'fil2mat.fil2mat', (['hs', 'sig_size'], {}), '(hs, sig_size)\n', (3960, 3974), False, 'import fil2mat\n'), ((3845, 3864), 'numpy.array', 'np.array', (['[[1, -1]]'], {}), '([[1, -1]])\n', (3853, 3864), True, 'import numpy as np\n'), ((3895, 3916), 'numpy.array', 'np.array', (['[[1], [-1]]'], {}), '([[1], [-1]])\n', (3903, 3916), True, 'import numpy as np\n')] |
import asyncio
import discord
# Just with a function to add to the bot.
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyรฉ un message!")
# A Listener already created with the function
from discordEasy.objects import Listener
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyรฉ un message!")
listener_on_message = Listener(on_message) | [
"discordEasy.objects.Listener"
] | [((463, 483), 'discordEasy.objects.Listener', 'Listener', (['on_message'], {}), '(on_message)\n', (471, 483), False, 'from discordEasy.objects import Listener\n')] |
import os
import random
from flask import Flask, request, send_from_directory
from werkzeug.utils import secure_filename
from pianonet.core.pianoroll import Pianoroll
from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll
app = Flask(__name__)
base_path = "/app/"
# base_path = "/Users/angsten/PycharmProjects/pianonet"
performances_path = os.path.join(base_path, 'data', 'performances')
def get_random_midi_file_name():
"""
Get a random midi file name that will not ever collide.
"""
return str(random.randint(0, 10000000000000000000)) + ".midi"
def get_performance_path(midi_file_name):
"""
Returns full path to performaqnce midi file given a file name.
"""
return os.path.join(performances_path, midi_file_name)
@app.route('/')
def alive():
return 'OK'
@app.route('/performances/', methods=['GET'])
def get_performance():
"""
Returns the requested performance as midi file.
Expected query string is 'midi_file_name', such as 1234.midi
"""
performance_midi_file_name = request.args.get('midi_file_name')
performance_midi_file_name = secure_filename(performance_midi_file_name)
print(performance_midi_file_name)
if performance_midi_file_name == None:
return {"http_code": 400, "code": "BadRequest", "message": "midi_file_name not found in request."}
midi_file_path = get_performance_path(performance_midi_file_name)
if not os.path.exists(midi_file_path):
return {
"http_code": 404,
"code": "Not Found",
"message": "midi_file " + performance_midi_file_name + " not found."
}
with open(midi_file_path, 'rb') as midi_file:
return send_from_directory(performances_path, performance_midi_file_name)
@app.route('/create-performance', methods=['POST'])
def performance():
"""
Expects post form data as follows:
seed_midi_file_data: Midi file that forms the seed for a performance as string encoding like "8,2,3,4,5..."
seconds_to_generate: Number of seconds of new notes to generate
model_complexity: Quality of model to use, one of ['low', 'medium', 'high', 'highest']
"""
seed_midi_file_data = request.form.get('seed_midi_file_data')
if seed_midi_file_data == None:
return {"http_code": 400, "code": "BadRequest", "message": "seed_midi_file_data not found in request."}
else:
seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')]
frame = bytearray()
for i in seed_midi_file_int_array:
frame.append(i)
saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name())
with open(saved_seed_midi_file_path, 'wb') as midi_file:
midi_file.write(frame)
seconds_to_generate = request.form.get('seconds_to_generate')
if seconds_to_generate == None:
return {"http_code": 400, "code": "BadRequest", "message": "seconds_to_generate not found in request."}
else:
seconds_to_generate = float(seconds_to_generate)
model_complexity = request.form.get('model_complexity', 'low')
if model_complexity == 'low':
model_name = "micro_1"
else:
model_name = "r9p0_3500kparams_approx_9_blocks_model"
model_path = os.path.join(base_path, 'models', model_name)
input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True)
input_pianoroll.trim_silence_off_ends()
final_pianoroll = get_performance_from_pianoroll(
pianoroll_seed=input_pianoroll,
num_time_steps=int(48 * seconds_to_generate),
model_path=model_path,
)
midi_file_name = get_random_midi_file_name()
midi_file_path = get_performance_path(midi_file_name)
final_pianoroll.save_to_midi_file(midi_file_path)
return {"http_code": 200, "code": "Success", "message": "", "midi_file_name": midi_file_name}
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
"flask.request.args.get",
"os.path.exists",
"flask.send_from_directory",
"flask.Flask",
"os.path.join",
"flask.request.form.get",
"pianonet.core.pianoroll.Pianoroll",
"werkzeug.utils.secure_filename",
"random.randint"
] | [((272, 287), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (277, 287), False, 'from flask import Flask, request, send_from_directory\n'), ((387, 434), 'os.path.join', 'os.path.join', (['base_path', '"""data"""', '"""performances"""'], {}), "(base_path, 'data', 'performances')\n", (399, 434), False, 'import os\n'), ((752, 799), 'os.path.join', 'os.path.join', (['performances_path', 'midi_file_name'], {}), '(performances_path, midi_file_name)\n', (764, 799), False, 'import os\n'), ((1085, 1119), 'flask.request.args.get', 'request.args.get', (['"""midi_file_name"""'], {}), "('midi_file_name')\n", (1101, 1119), False, 'from flask import Flask, request, send_from_directory\n'), ((1153, 1196), 'werkzeug.utils.secure_filename', 'secure_filename', (['performance_midi_file_name'], {}), '(performance_midi_file_name)\n', (1168, 1196), False, 'from werkzeug.utils import secure_filename\n'), ((2243, 2282), 'flask.request.form.get', 'request.form.get', (['"""seed_midi_file_data"""'], {}), "('seed_midi_file_data')\n", (2259, 2282), False, 'from flask import Flask, request, send_from_directory\n'), ((2861, 2900), 'flask.request.form.get', 'request.form.get', (['"""seconds_to_generate"""'], {}), "('seconds_to_generate')\n", (2877, 2900), False, 'from flask import Flask, request, send_from_directory\n'), ((3141, 3184), 'flask.request.form.get', 'request.form.get', (['"""model_complexity"""', '"""low"""'], {}), "('model_complexity', 'low')\n", (3157, 3184), False, 'from flask import Flask, request, send_from_directory\n'), ((3341, 3386), 'os.path.join', 'os.path.join', (['base_path', '"""models"""', 'model_name'], {}), "(base_path, 'models', model_name)\n", (3353, 3386), False, 'import os\n'), ((3410, 3474), 'pianonet.core.pianoroll.Pianoroll', 'Pianoroll', (['saved_seed_midi_file_path'], {'use_custom_multitrack': '(True)'}), '(saved_seed_midi_file_path, use_custom_multitrack=True)\n', (3419, 3474), False, 'from pianonet.core.pianoroll import Pianoroll\n'), ((1469, 1499), 'os.path.exists', 'os.path.exists', (['midi_file_path'], {}), '(midi_file_path)\n', (1483, 1499), False, 'import os\n'), ((1738, 1804), 'flask.send_from_directory', 'send_from_directory', (['performances_path', 'performance_midi_file_name'], {}), '(performances_path, performance_midi_file_name)\n', (1757, 1804), False, 'from flask import Flask, request, send_from_directory\n'), ((562, 601), 'random.randint', 'random.randint', (['(0)', '(10000000000000000000)'], {}), '(0, 10000000000000000000)\n', (576, 601), False, 'import random\n')] |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import logging
import json
import os
import pandas as pd
from datetime import datetime
from datetime import timedelta
from urllib import parse
import requests
logger = logging.getLogger(__name__)
external_stylesheets = [dbc.themes.DARKLY]
is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != ''
port = int(os.environ.get('PORT', 8050))
host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')
wml_api_key = os.environ['WML_API_KEY']
wml_scoring_url = os.environ['WML_SCORING_URL']
url = parse.urlparse(wml_scoring_url)
wml_base_url = url._replace(path='').geturl()
wml_instance_id = url.path.split('/')[3]
logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG)
logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port)
logger.info('WML URL: %s', wml_base_url)
logger.info('WML instance ID: %s', wml_instance_id)
wml_credentials = {
"apikey": wml_api_key,
"instance_id": wml_instance_id,
"url": wml_base_url,
}
iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token'
def _get_token():
data = {
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'apikey': wml_credentials['apikey']
}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(iam_token_endpoint, data=data, headers=headers)
return response.json()['access_token']
def score(token, algorithm, start_date, country, predict_range, s, i, r):
headers = {'Authorization': 'Bearer ' + token}
payload = {
"fields": ["algorithm", "start_date", "country", "predict_range", "S0", "I0", "R0"],
"values": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]]
}
logger.info('Scoring with payload: %s', json.dumps(payload))
response = requests.post(wml_scoring_url, json=payload, headers=headers)
if response.status_code == 200:
result = response.json()
else:
raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text))
n_days = len(result['values'])
index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)]
return pd.DataFrame(result['values'], columns=result['fields'], index=index)
def serve_layout():
token = _get_token()
# predict_range = 14
# sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
# logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10)
# days = list(sir_result.index)
days = list(calibration_result.index)
calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0)
calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode="markers", marker=dict(size=8)),
secondary_y=False,
)
fig.update_layout(
title="Prediction of confirmed cases for Poland",
template="plotly_dark",
height=900
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Total confirmed cases", secondary_y=False, range=[0, 6000])
fig.update_yaxes(title_text="New cases per day", secondary_y=True, range=[0, 1000])
# fig = go.Figure(
# data=[
# go.Scatter(x=days, y=sir_result['I'], name='SIR'),
# go.Scatter(x=days, y=logistic_result['I'], name='Logistic'),
# ],
# layout=go.Layout(
# title="COVID19 infected prediction in Poland",
# template="plotly_dark",
# height=600
# )
# )
return html.Div(children=[
html.H1(children='COVID-19 Predictions with Watson Machine Learning'),
dcc.Graph(
id='example-graph',
figure=fig
)
])
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = serve_layout
if __name__ == '__main__':
app.run_server(debug=(not is_cf_instance), port=port, host=host)
| [
"logging.getLogger",
"datetime.datetime",
"plotly.graph_objects.Bar",
"requests.post",
"plotly.subplots.make_subplots",
"urllib.parse.urlparse",
"json.dumps",
"os.environ.get",
"plotly.graph_objects.Scatter",
"dash_html_components.H1",
"pandas.DataFrame",
"datetime.timedelta",
"dash.Dash",
"dash_core_components.Graph"
] | [((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((552, 606), 'os.environ.get', 'os.environ.get', (['"""CF_INSTANCE_INTERNAL_IP"""', '"""127.0.0.1"""'], {}), "('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')\n", (566, 606), False, 'import os\n'), ((702, 733), 'urllib.parse.urlparse', 'parse.urlparse', (['wml_scoring_url'], {}), '(wml_scoring_url)\n', (716, 733), False, 'from urllib import parse\n'), ((4778, 4840), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (4787, 4840), False, 'import dash\n'), ((459, 497), 'os.environ.get', 'os.environ.get', (['"""CF_INSTANCE_GUID"""', '""""""'], {}), "('CF_INSTANCE_GUID', '')\n", (473, 497), False, 'import os\n'), ((515, 543), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(8050)'], {}), "('PORT', 8050)\n", (529, 543), False, 'import os\n'), ((1480, 1541), 'requests.post', 'requests.post', (['iam_token_endpoint'], {'data': 'data', 'headers': 'headers'}), '(iam_token_endpoint, data=data, headers=headers)\n', (1493, 1541), False, 'import requests\n'), ((2007, 2068), 'requests.post', 'requests.post', (['wml_scoring_url'], {'json': 'payload', 'headers': 'headers'}), '(wml_scoring_url, json=payload, headers=headers)\n', (2020, 2068), False, 'import requests\n'), ((2379, 2448), 'pandas.DataFrame', 'pd.DataFrame', (["result['values']"], {'columns': "result['fields']", 'index': 'index'}), "(result['values'], columns=result['fields'], index=index)\n", (2391, 2448), True, 'import pandas as pd\n'), ((3189, 3235), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}]]"}), "(specs=[[{'secondary_y': True}]])\n", (3202, 3235), False, 'from plotly.subplots import make_subplots\n'), ((1971, 1990), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1981, 1990), False, 'import json\n'), ((2788, 2809), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(22)'], {}), '(2020, 1, 22)\n', (2796, 2809), False, 'from datetime import datetime\n'), ((3264, 3362), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'days', 'y': "calibration_result['PredictedChange']", 'name': '"""Predicted Change"""', 'opacity': '(0.5)'}), "(x=days, y=calibration_result['PredictedChange'], name=\n 'Predicted Change', opacity=0.5)\n", (3270, 3362), True, 'import plotly.graph_objects as go\n'), ((3418, 3509), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'days', 'y': "calibration_result['ActualChange']", 'name': '"""Actual Change"""', 'opacity': '(0.5)'}), "(x=days, y=calibration_result['ActualChange'], name='Actual Change',\n opacity=0.5)\n", (3424, 3509), True, 'import plotly.graph_objects as go\n'), ((3566, 3639), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'days', 'y': "calibration_result['Predicted']", 'name': '"""Calibration"""'}), "(x=days, y=calibration_result['Predicted'], name='Calibration')\n", (3576, 3639), True, 'import plotly.graph_objects as go\n'), ((4606, 4675), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""COVID-19 Predictions with Watson Machine Learning"""'}), "(children='COVID-19 Predictions with Watson Machine Learning')\n", (4613, 4675), True, 'import dash_html_components as html\n'), ((4687, 4728), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""example-graph"""', 'figure': 'fig'}), "(id='example-graph', figure=fig)\n", (4696, 4728), True, 'import dash_core_components as dcc\n'), ((2304, 2321), 'datetime.timedelta', 'timedelta', ([], {'days': 'i'}), '(days=i)\n', (2313, 2321), False, 'from datetime import timedelta\n')] |
from robot import __version__ as ROBOT_VERSION
import sys
import tempfile
import textwrap
import unittest
import shutil
import subprocess
class PabotOrderingGroupTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_tests_with(self, testfile, orderfile):
robot_file = open("{}/test.robot".format(self.tmpdir), "w")
robot_file.write(textwrap.dedent(testfile))
robot_file.close()
with open("{}/order.dat".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(orderfile))
process = subprocess.Popen(
[
sys.executable,
"-m" "pabot.pabot",
"--testlevelsplit",
"--ordering",
"{}/order.dat".format(self.tmpdir),
"{}/test.robot".format(self.tmpdir),
],
cwd=self.tmpdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return process.communicate()
def test_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
def test_two_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Second And Quarter
Should Be Equal ${SCALAR} Hello, globe!
Second And Half
Should Be Equal ${SCALAR} Hello, globe!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
{
--test Test.Second And Quarter
--test Test.Second And Half
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = "5 critical tests, 5 passed, 0 failed"
else:
expected_write = "5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 3)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = b"5 critical tests, 5 passed, 0 failed"
else:
expected_write = b"5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 3)
def test_too_big_testname(self):
stdout, stderr = self._run_tests_with(
"""
*** Test Cases ***
Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus eget orci porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.
Log Test
""",
"""
--test Invalid
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 1)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 1)
def test_longnames_in_tests(self):
stdout, stderr = self._run_tests_with(
"""
*** Settings ***
Test Template Test1
*** Test Cases ***
The Somewhat Long Name Of The Test S1Test 01 1
The Somewhat Long Name Of The Test S1Test 02 1
The Somewhat Long Name Of The Test S1Test 03 1
The Somewhat Long Name Of The Test S1Test 04 1
The Somewhat Long Name Of The Test S1Test 05 1
The Somewhat Long Name Of The Test S1Test 06 1
The Somewhat Long Name Of The Test S1Test 07 1
The Somewhat Long Name Of The Test S1Test 08 1
The Somewhat Long Name Of The Test S1Test 09 1
The Somewhat Long Name Of The Test S1Test 10 1
The Somewhat Long Name Of The Test S1Test 11 1
The Somewhat Long Name Of The Test S1Test 12 1
*** Keywords ***
Test1
[Arguments] ${arg}
Log Test
""",
"""
{
--test Test.The Somewhat Long Name Of The Test S1Test 01
--test Test.The Somewhat Long Name Of The Test S1Test 02
--test Test.The Somewhat Long Name Of The Test S1Test 03
--test Test.The Somewhat Long Name Of The Test S1Test 04
--test Test.The Somewhat Long Name Of The Test S1Test 05
--test Test.The Somewhat Long Name Of The Test S1Test 06
}
{
--test Test.The Somewhat Long Name Of The Test S1Test 07
--test Test.The Somewhat Long Name Of The Test S1Test 08
--test Test.The Somewhat Long Name Of The Test S1Test 09
--test Test.The Somewhat Long Name Of The Test S1Test 10
--test Test.The Somewhat Long Name Of The Test S1Test 11
--test Test.The Somewhat Long Name Of The Test S1Test 12
}
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
| [
"textwrap.dedent",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((232, 250), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (248, 250), False, 'import tempfile\n'), ((284, 310), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (297, 310), False, 'import shutil\n'), ((457, 482), 'textwrap.dedent', 'textwrap.dedent', (['testfile'], {}), '(testfile)\n', (472, 482), False, 'import textwrap\n'), ((596, 622), 'textwrap.dedent', 'textwrap.dedent', (['orderfile'], {}), '(orderfile)\n', (611, 622), False, 'import textwrap\n')] |
import torch
ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth'
save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth'
states_dict = torch.load(ckp_path)
states_dict_new = states_dict.copy()
for key in states_dict.keys():
if "running_var" in key or "running_mean" in key:
del states_dict_new[key]
torch.save(states_dict_new, save_path) | [
"torch.load",
"torch.save"
] | [((154, 174), 'torch.load', 'torch.load', (['ckp_path'], {}), '(ckp_path)\n', (164, 174), False, 'import torch\n'), ((322, 360), 'torch.save', 'torch.save', (['states_dict_new', 'save_path'], {}), '(states_dict_new, save_path)\n', (332, 360), False, 'import torch\n')] |
import multiprocessing
# ========== #Python3 - concurrent
from math import floor, sqrt
numbers = [
112272537195293,
112582718962171,
112272537095293,
115280098190773,
115797840077099,
1099726829285419]
# numbers = [33, 44, 55, 275]
def lowest_factor(n, _start=3):
if n % 2 == 0:
return 2
search_max = int(floor(sqrt(n))) + 1
for i in range(_start, search_max, 2):
if n % i == 0:
return i
return n
def prime_factors(n, lowest):
pf = []
while n > 1:
pf.append(lowest)
n //= lowest
lowest = lowest_factor(n, max(lowest, 3))
return pf
# ========== #Python3 - concurrent
def prime_factors_of_number_with_lowest_prime_factor(numbers):
pool = multiprocessing.Pool(processes=5)
factors = pool.map(lowest_factor,numbers)
low_factor,number = max((l,f) for l,f in zip(factors,numbers))
all_factors = prime_factors(number,low_factor)
return number,all_factors
if __name__ == '__main__':
print('For these numbers:')
print('\n '.join(str(p) for p in numbers))
number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers)
print(' The one with the largest minimum prime factor is {}:'.format(number))
print(' All its prime factors in order are: {}'.format(all_factors))
| [
"math.sqrt",
"multiprocessing.Pool"
] | [((749, 782), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(5)'}), '(processes=5)\n', (769, 782), False, 'import multiprocessing\n'), ((354, 361), 'math.sqrt', 'sqrt', (['n'], {}), '(n)\n', (358, 361), False, 'from math import floor, sqrt\n')] |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Project: Nowcasting the air pollution using online search log',
author='<NAME>(IR Lab)',
license='MIT',
)
| [
"setuptools.find_packages"
] | [((81, 96), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (94, 96), False, 'from setuptools import find_packages, setup\n')] |
import ast
from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name
class ObjectGenerator(object):
@staticmethod
def _get_property_name(node_assign):
name = node_assign.targets[0]
return name.id
@staticmethod
def _nesting_class(node_assign):
for node in ast.walk(node_assign):
if isinstance(node, ast.Call):
if node.func.attr == "Nested":
return class_name(node.args[0].id)
@staticmethod
def _non_primitive_nested_list(node_assign):
if node_assign.value.func.attr == "List":
return (
len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == "Nested"
)
else:
return False
@staticmethod
def _init_non_primitive_nested_class(node_assign, object_, prop):
"""
If the nested list is non-primitive, initialise sub-classes in a list comp
If the nest is primitive, we can simply get it
Marshmallow will do the type marshalling
"""
return ast.ListComp(
elt=ast.Call(
func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)),
args=[ast.Name(id="el")],
keywords=[],
),
generators=[
ast.comprehension(
target=ast.Name(id="el"),
iter=ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])],
keywords=[],
),
ifs=[],
is_async=0,
)
],
)
@staticmethod
def _get_key_from_object(object_, prop):
return ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop)],
keywords=[],
)
@staticmethod
def _hint_required_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword):
if "required" in node.arg:
value = ast.Subscript(
value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop))
)
return value
@staticmethod
def _get_default_for_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "required":
return value
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "default":
default_value = [
keyword.value
for keyword in node_assign.value.keywords
if keyword.arg == "default"
][0]
value.args.append(default_value)
return value
else:
return value
@staticmethod
def assign_property(node_assign, object_):
"""
Required property -> self.prop = parent_dict["prop"]
Optional property -> self.prop = parent_dict.get("prop")
Primative nested list -> self.prop = parent_dict.get("prop")
Non-primative nested list -> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})]
"""
prop = ObjectGenerator._get_property_name(node_assign)
if ObjectGenerator._non_primitive_nested_list(node_assign):
value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop)
else:
# Assign the property as self.prop = table.get("prop")
value = ObjectGenerator._get_key_from_object(object_, prop)
# If the property is required, assign as self.prop = table["prop"]
value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop)
value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop)
return ast.AnnAssign(
target=ast.Attribute(value=ast.Name(id="self"), attr=prop),
value=value,
simple=0,
annotation=Annotations(node_assign).type,
)
@staticmethod
def construct_class(schema):
name = class_name(schema.name)
name_lower = name.lower()
# Bundle function arguments and keywords
fn_arguments = ast.arguments(
args=[
ast.arg(arg="self", annotation=None),
ast.arg(arg=name_lower, annotation=ast.Name(id="dict")),
],
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
defaults=[],
)
fn_body = [
ObjectGenerator.assign_property(node, name_lower)
for node in schema.body
if isinstance(node, ast.Assign)
]
# pass if no Assign nodes
if len(fn_body) == 0:
fn_body = [ast.Pass()]
# Generate class constructor
class_body = [
ast.FunctionDef(
name="__init__", args=fn_arguments, body=fn_body, decorator_list=[], returns=None
),
ObjectGenerator._construct_to_("json")(schema),
ObjectGenerator._construct_to_("dict")(schema),
ObjectGenerator.construct_from_json(schema),
]
return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[])
@staticmethod
def _construct_to_(output):
if output == "json":
method = "dumps"
elif output == "dict":
method = "dump"
else:
raise NotImplementedError("Only deserialisation to json or dict supported")
def _construct_to_helper(schema):
fn_args = ast.arguments(
args=[ast.arg(arg="self", annotation=None)],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
)
fn_body = [
ast.Return(
value=ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(
arg="strict", value=ast.NameConstant(value=True)
)
],
),
attr=method,
),
args=[ast.Name(id="self")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name=f"to_{output}", args=fn_args, body=fn_body, decorator_list=[], returns=None
)
return _construct_to_helper
@staticmethod
def construct_from_json(schema):
fn_args = ast.arguments(
args=[
ast.arg(arg="json", annotation=ast.Name(id="str")),
ast.arg(arg="only", annotation=None),
],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[ast.NameConstant(value=None)],
)
fn_body = [
ast.Return(
ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(arg="strict", value=ast.NameConstant(value=True)),
ast.keyword(arg="only", value=ast.Name(id="only")),
],
),
attr="loads",
),
args=[ast.Name(id="json")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name="from_json",
args=fn_args,
body=fn_body,
decorator_list=[ast.Name(id="staticmethod")],
returns=None,
)
| [
"json_codegen.generators.python3_marshmallow.utils.class_name",
"ast.walk",
"ast.Str",
"ast.ClassDef",
"ast.arg",
"ast.Dict",
"ast.Name",
"json_codegen.generators.python3_marshmallow.utils.Annotations",
"ast.FunctionDef",
"ast.NameConstant",
"ast.Pass"
] | [((327, 348), 'ast.walk', 'ast.walk', (['node_assign'], {}), '(node_assign)\n', (335, 348), False, 'import ast\n'), ((2112, 2133), 'ast.walk', 'ast.walk', (['node_assign'], {}), '(node_assign)\n', (2120, 2133), False, 'import ast\n'), ((2511, 2532), 'ast.walk', 'ast.walk', (['node_assign'], {}), '(node_assign)\n', (2519, 2532), False, 'import ast\n'), ((2657, 2678), 'ast.walk', 'ast.walk', (['node_assign'], {}), '(node_assign)\n', (2665, 2678), False, 'import ast\n'), ((4421, 4444), 'json_codegen.generators.python3_marshmallow.utils.class_name', 'class_name', (['schema.name'], {}), '(schema.name)\n', (4431, 4444), False, 'from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name\n'), ((5546, 5632), 'ast.ClassDef', 'ast.ClassDef', ([], {'name': 'name', 'bases': '[]', 'body': 'class_body', 'decorator_list': '[]', 'keywords': '[]'}), '(name=name, bases=[], body=class_body, decorator_list=[],\n keywords=[])\n', (5558, 5632), False, 'import ast\n'), ((5213, 5315), 'ast.FunctionDef', 'ast.FunctionDef', ([], {'name': '"""__init__"""', 'args': 'fn_arguments', 'body': 'fn_body', 'decorator_list': '[]', 'returns': 'None'}), "(name='__init__', args=fn_arguments, body=fn_body,\n decorator_list=[], returns=None)\n", (5228, 5315), False, 'import ast\n'), ((7167, 7268), 'ast.FunctionDef', 'ast.FunctionDef', ([], {'name': 'f"""to_{output}"""', 'args': 'fn_args', 'body': 'fn_body', 'decorator_list': '[]', 'returns': 'None'}), "(name=f'to_{output}', args=fn_args, body=fn_body,\n decorator_list=[], returns=None)\n", (7182, 7268), False, 'import ast\n'), ((5128, 5138), 'ast.Pass', 'ast.Pass', ([], {}), '()\n', (5136, 5138), False, 'import ast\n'), ((467, 494), 'json_codegen.generators.python3_marshmallow.utils.class_name', 'class_name', (['node.args[0].id'], {}), '(node.args[0].id)\n', (477, 494), False, 'from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name\n'), ((1952, 1967), 'ast.Str', 'ast.Str', ([], {'s': 'prop'}), '(s=prop)\n', (1959, 1967), False, 'import ast\n'), ((4313, 4337), 'json_codegen.generators.python3_marshmallow.utils.Annotations', 'Annotations', (['node_assign'], {}), '(node_assign)\n', (4324, 4337), False, 'from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name\n'), ((4602, 4638), 'ast.arg', 'ast.arg', ([], {'arg': '"""self"""', 'annotation': 'None'}), "(arg='self', annotation=None)\n", (4609, 4638), False, 'import ast\n'), ((7525, 7561), 'ast.arg', 'ast.arg', ([], {'arg': '"""only"""', 'annotation': 'None'}), "(arg='only', annotation=None)\n", (7532, 7561), False, 'import ast\n'), ((7704, 7732), 'ast.NameConstant', 'ast.NameConstant', ([], {'value': 'None'}), '(value=None)\n', (7720, 7732), False, 'import ast\n'), ((8744, 8771), 'ast.Name', 'ast.Name', ([], {'id': '"""staticmethod"""'}), "(id='staticmethod')\n", (8752, 8771), False, 'import ast\n'), ((1899, 1919), 'ast.Name', 'ast.Name', ([], {'id': 'object_'}), '(id=object_)\n', (1907, 1919), False, 'import ast\n'), ((4210, 4229), 'ast.Name', 'ast.Name', ([], {'id': '"""self"""'}), "(id='self')\n", (4218, 4229), False, 'import ast\n'), ((6003, 6039), 'ast.arg', 'ast.arg', ([], {'arg': '"""self"""', 'annotation': 'None'}), "(arg='self', annotation=None)\n", (6010, 6039), False, 'import ast\n'), ((1243, 1260), 'ast.Name', 'ast.Name', ([], {'id': '"""el"""'}), "(id='el')\n", (1251, 1260), False, 'import ast\n'), ((1394, 1411), 'ast.Name', 'ast.Name', ([], {'id': '"""el"""'}), "(id='el')\n", (1402, 1411), False, 'import ast\n'), ((2297, 2317), 'ast.Name', 'ast.Name', ([], {'id': 'object_'}), '(id=object_)\n', (2305, 2317), False, 'import ast\n'), ((4691, 4710), 'ast.Name', 'ast.Name', ([], {'id': '"""dict"""'}), "(id='dict')\n", (4699, 4710), False, 'import ast\n'), ((7488, 7506), 'ast.Name', 'ast.Name', ([], {'id': '"""str"""'}), "(id='str')\n", (7496, 7506), False, 'import ast\n'), ((2341, 2356), 'ast.Str', 'ast.Str', ([], {'s': 'prop'}), '(s=prop)\n', (2348, 2356), False, 'import ast\n'), ((8444, 8463), 'ast.Name', 'ast.Name', ([], {'id': '"""json"""'}), "(id='json')\n", (8452, 8463), False, 'import ast\n'), ((1562, 1577), 'ast.Str', 'ast.Str', ([], {'s': 'prop'}), '(s=prop)\n', (1569, 1577), False, 'import ast\n'), ((1579, 1607), 'ast.Dict', 'ast.Dict', ([], {'keys': '[]', 'values': '[]'}), '(keys=[], values=[])\n', (1587, 1607), False, 'import ast\n'), ((1497, 1517), 'ast.Name', 'ast.Name', ([], {'id': 'object_'}), '(id=object_)\n', (1505, 1517), False, 'import ast\n'), ((6966, 6985), 'ast.Name', 'ast.Name', ([], {'id': '"""self"""'}), "(id='self')\n", (6974, 6985), False, 'import ast\n'), ((7982, 8006), 'ast.Name', 'ast.Name', ([], {'id': 'schema.name'}), '(id=schema.name)\n', (7990, 8006), False, 'import ast\n'), ((6476, 6500), 'ast.Name', 'ast.Name', ([], {'id': 'schema.name'}), '(id=schema.name)\n', (6484, 6500), False, 'import ast\n'), ((8160, 8188), 'ast.NameConstant', 'ast.NameConstant', ([], {'value': '(True)'}), '(value=True)\n', (8176, 8188), False, 'import ast\n'), ((8257, 8276), 'ast.Name', 'ast.Name', ([], {'id': '"""only"""'}), "(id='only')\n", (8265, 8276), False, 'import ast\n'), ((6711, 6739), 'ast.NameConstant', 'ast.NameConstant', ([], {'value': '(True)'}), '(value=True)\n', (6727, 6739), False, 'import ast\n')] |
import BboxToolkit as bt
import pickle
import copy
import numpy as np
path1="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl"
path2="/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl"#
with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl
data2 = pickle.load(f)
with open(path1,'rb') as f:
obbdets = pickle.load(f)
polydets=copy.deepcopy(obbdets)
for i in range(len(obbdets)):
for j in range(len(obbdets[0][1])):
data=obbdets[i][1][j]
if data.size!= 0:
polys=[]
for k in range(len(data)):
poly = bt.obb2poly(data[k][0:5])
poly=np.append(poly,data[k][5])
polys.append(poly)
else:
polys=[]
polydets[i][1][j]=polys
savepath="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/"
for i in range(len(polydets)):
txtfile=savepath+polydets[i][0]+".txt"
f = open(txtfile, "w")
for j in range(len(polydets[0][1])):
if polydets[i][1][j]!=[]:
for k in range(len(polydets[i][1][j])):
f.write(str(polydets[i][1][j][k][0])+" "+
str(polydets[i][1][j][k][1])+" "+
str(polydets[i][1][j][k][2])+" "+
str(polydets[i][1][j][k][3])+" "+
str(polydets[i][1][j][k][4])+" "+
str(polydets[i][1][j][k][5])+" "+
str(polydets[i][1][j][k][6])+" "+
str(polydets[i][1][j][k][7])+" "+
str(data2["cls"][j])+" "+
str(polydets[i][1][j][k][8])+"\n")
f.close() | [
"numpy.append",
"BboxToolkit.obb2poly",
"pickle.load",
"copy.deepcopy"
] | [((344, 358), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (355, 358), False, 'import pickle\n'), ((402, 416), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (413, 416), False, 'import pickle\n'), ((430, 452), 'copy.deepcopy', 'copy.deepcopy', (['obbdets'], {}), '(obbdets)\n', (443, 452), False, 'import copy\n'), ((662, 687), 'BboxToolkit.obb2poly', 'bt.obb2poly', (['data[k][0:5]'], {}), '(data[k][0:5])\n', (673, 687), True, 'import BboxToolkit as bt\n'), ((709, 736), 'numpy.append', 'np.append', (['poly', 'data[k][5]'], {}), '(poly, data[k][5])\n', (718, 736), True, 'import numpy as np\n')] |
import taichi as ti
import utils
from apic_extension import *
@ti.data_oriented
class Initializer3D: # tmp initializer
def __init__(self, res, x0, y0, z0, x1, y1, z1):
self.res = res
self.x0 = int(res * x0)
self.y0 = int(res * y0)
self.z0 = int(res * z0)
self.x1 = int(res * x1)
self.y1 = int(res * y1)
self.z1 = int(res * z1)
@ti.kernel
def init_kernel(self, cell_type : ti.template()):
for i, j, k in cell_type:
if i >= self.x0 and i <= self.x1 and \
j >= self.y0 and j <= self.y1 and \
k >= self.z0 and k <= self.z1:
cell_type[i, j, k] = utils.FLUID
def init_scene(self, simulator):
self.init_kernel(simulator.cell_type)
dx = simulator.dx
simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 * dx), (self.x1 * dx, self.y1 * dx, self.z1 * dx))
| [
"taichi.template"
] | [((443, 456), 'taichi.template', 'ti.template', ([], {}), '()\n', (454, 456), True, 'import taichi as ti\n')] |
# (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
influxdb_opts = [
cfg.StrOpt('database_name',
help='database name where metrics are stored',
default='mon'),
cfg.HostAddressOpt('ip_address',
help='Valid IP address or hostname '
'to InfluxDB instance'),
cfg.PortOpt('port',
help='port to influxdb',
default=8086),
cfg.StrOpt('user',
help='influxdb user ',
default='mon_persister'),
cfg.StrOpt('password',
secret=True,
help='influxdb password')]
influxdb_group = cfg.OptGroup(name='influxdb',
title='influxdb')
def register_opts(conf):
conf.register_group(influxdb_group)
conf.register_opts(influxdb_opts, influxdb_group)
def list_opts():
return influxdb_group, influxdb_opts
| [
"oslo_config.cfg.PortOpt",
"oslo_config.cfg.OptGroup",
"oslo_config.cfg.StrOpt",
"oslo_config.cfg.HostAddressOpt"
] | [((1281, 1328), 'oslo_config.cfg.OptGroup', 'cfg.OptGroup', ([], {'name': '"""influxdb"""', 'title': '"""influxdb"""'}), "(name='influxdb', title='influxdb')\n", (1293, 1328), False, 'from oslo_config import cfg\n'), ((697, 790), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""database_name"""'], {'help': '"""database name where metrics are stored"""', 'default': '"""mon"""'}), "('database_name', help='database name where metrics are stored',\n default='mon')\n", (707, 790), False, 'from oslo_config import cfg\n'), ((822, 917), 'oslo_config.cfg.HostAddressOpt', 'cfg.HostAddressOpt', (['"""ip_address"""'], {'help': '"""Valid IP address or hostname to InfluxDB instance"""'}), "('ip_address', help=\n 'Valid IP address or hostname to InfluxDB instance')\n", (840, 917), False, 'from oslo_config import cfg\n'), ((972, 1030), 'oslo_config.cfg.PortOpt', 'cfg.PortOpt', (['"""port"""'], {'help': '"""port to influxdb"""', 'default': '(8086)'}), "('port', help='port to influxdb', default=8086)\n", (983, 1030), False, 'from oslo_config import cfg\n'), ((1068, 1134), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""user"""'], {'help': '"""influxdb user """', 'default': '"""mon_persister"""'}), "('user', help='influxdb user ', default='mon_persister')\n", (1078, 1134), False, 'from oslo_config import cfg\n'), ((1170, 1231), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""password"""'], {'secret': '(True)', 'help': '"""influxdb password"""'}), "('password', secret=True, help='influxdb password')\n", (1180, 1231), False, 'from oslo_config import cfg\n')] |
import json
your_json = '["foo", {"bar":["baz", null, 1.0, 2]}]'
parsed = json.loads(your_json)
print(type(your_json))
print(type(parsed))
#print(json.dumps(parsed, indent=4, sort_keys=True)) | [
"json.loads"
] | [((75, 96), 'json.loads', 'json.loads', (['your_json'], {}), '(your_json)\n', (85, 96), False, 'import json\n')] |
import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_grid_addremove():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.remove_grid()
f.add_grid()
f.close()
def test_grid_showhide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.hide()
f.grid.show()
f.close()
def test_grid_spacing():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_xspacing(1.)
f.grid.set_xspacing('tick')
with pytest.raises(ValueError):
f.grid.set_xspacing('auto')
f.grid.set_yspacing(2.)
f.grid.set_yspacing('tick')
with pytest.raises(ValueError):
f.grid.set_yspacing('auto')
f.close()
def test_grid_color():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_color('black')
f.grid.set_color('#003344')
f.grid.set_color((1.0, 0.4, 0.3))
f.close()
def test_grid_alpha():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_alpha(0.0)
f.grid.set_alpha(0.3)
f.grid.set_alpha(1.0)
f.close()
def test_grid_linestyle():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linestyle('solid')
f.grid.set_linestyle('dashed')
f.grid.set_linestyle('dotted')
f.close()
def test_grid_linewidth():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linewidth(0)
f.grid.set_linewidth(2)
f.grid.set_linewidth(5)
f.close()
| [
"matplotlib.use",
"numpy.zeros",
"astropy.tests.helper.pytest.raises"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((167, 185), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (175, 185), True, 'import numpy as np\n'), ((318, 336), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (326, 336), True, 'import numpy as np\n'), ((467, 485), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (475, 485), True, 'import numpy as np\n'), ((842, 860), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (850, 860), True, 'import numpy as np\n'), ((1053, 1071), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1061, 1071), True, 'import numpy as np\n'), ((1246, 1264), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1254, 1264), True, 'import numpy as np\n'), ((1465, 1483), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (1473, 1483), True, 'import numpy as np\n'), ((597, 622), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (610, 622), False, 'from astropy.tests.helper import pytest\n'), ((729, 754), 'astropy.tests.helper.pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (742, 754), False, 'from astropy.tests.helper import pytest\n')] |
import os, sys
class Object:
## @name constructor
def __init__(self, V):
self.value = V
self.nest = []
def box(self, that):
if isinstance(that, Object): return that
if isinstance(that, str): return S(that)
raise TypeError(['box', type(that), that])
## @name dump / string
def test(self): return self.dump(test=True)
def __repr__(self): return self.dump(test=False)
def dump(self, cycle=[], depth=0, prefix='', test=False):
# head
def pad(depth): return '\n' + '\t' * depth
ret = pad(depth) + self.head(prefix, test)
# subtree
return ret
def head(self, prefix='', test=False):
gid = '' if test else f' @{id(self):x}'
return f'{prefix}<{self.tag()}:{self.val()}>{gid}'
def __format__(self, spec=''):
if not spec: return self.val()
raise TypeError(['__format__', spec])
def tag(self): return self.__class__.__name__.lower()
def val(self): return f'{self.value}'
## @name operator
def __iter__(self):
return iter(self.nest)
def __floordiv__(self, that):
self.nest.append(self.box(that)); return self
class Primitive(Object):
pass
class S(Primitive):
def __init__(self, V=None, end=None, pfx=None, sfx=None):
super().__init__(V)
self.end = end; self.pfx = pfx; self.sfx = sfx
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n'
if self.value is not None:
ret += f'{to.tab*depth}{self.value}\n'
for i in self:
ret += i.gen(to, depth + 1)
if self.end is not None:
ret += f'{to.tab*depth}{self.end}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n'
return ret
class Sec(S):
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n' if self.pfx else '\n'
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} \\ {self}\n'
for i in self:
ret += i.gen(to, depth + 0)
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} / {self}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n' if self.pfx else '\n'
return ret
class IO(Object):
def __init__(self, V):
super().__init__(V)
self.path = V
class Dir(IO):
def __floordiv__(self, that):
assert isinstance(that, IO)
that.path = f'{self.path}/{that.path}'
return super().__floordiv__(that)
def sync(self):
try: os.mkdir(self.path)
except FileExistsError: pass
for i in self: i.sync()
class File(IO):
def __init__(self, V, ext='', tab=' ' * 4, comment='#'):
super().__init__(V + ext)
self.top = Sec(); self.bot = Sec()
self.tab = tab; self.comment = comment
def sync(self):
with open(self.path, 'w') as F:
F.write(self.top.gen(self))
for i in self: F.write(i.gen(self))
F.write(self.bot.gen(self))
class giti(File):
def __init__(self, V='.gitignore'):
super().__init__(V)
self.bot // f'!{self}'
class Makefile(File):
def __init__(self, V='Makefile'):
super().__init__(V, tab='\t')
class pyFile(File):
def __init__(self, V, ext='.py'):
super().__init__(V, ext)
class jsonFile(File):
def __init__(self, V, ext='.json', comment='//'):
super().__init__(V, ext, comment=comment)
class Meta(Object): pass
class Class(Meta):
def __init__(self, C, sup=[]):
assert callable(C)
super().__init__(C.__name__)
self.clazz = C; self.sup = sup
def gen(self, to, depth=0):
ret = S(f'class {self}:', pfx='') // 'pass'
return ret.gen(to, depth)
class Project(Meta):
def __init__(self, V=None, title='', about=''):
if not V: V = os.getcwd().split('/')[-1]
super().__init__(V)
#
self.TITLE = title if title else f'{self}'
self.ABOUT = about
self.AUTHOR = '<NAME>'
self.EMAIL = '<EMAIL>'
self.GITHUB = 'https://github.com/ponyatov'
self.YEAR = 2020
self.LICENSE = 'All rights reserved'
self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}'
#
self.dirs()
self.mk()
self.src()
self.vscode()
self.apt()
def apt(self):
self.apt = File('apt', '.txt'); self.d // self.apt
self.apt \
// 'git make curl' // 'code meld' \
// 'python3 python3-venv' \
// 'build-essential g++'
def vscode(self):
self.vscode = Dir('.vscode'); self.d // self.vscode
self.settings()
self.tasks()
def settings(self):
self.settings = jsonFile('settings'); self.vscode // self.settings
#
def multi(key, cmd):
return (S('{', '},')
// f'"command": "multiCommand.{key}",'
// (S('"sequence": [', ']')
// '"workbench.action.files.saveAll",'
// (S('{"command": "workbench.action.terminal.sendSequence",')
// f'"args": {{"text": "\\u000D {cmd} \\u000D"}}}}'
)))
self.multi = \
(Sec('multi')
// (S('"multiCommand.commands": [', '],')
// multi('f11', 'make meta')
// multi('f12', 'make all')
))
#
self.files = (Sec()
// f'"{self}/**":true,'
)
self.exclude = \
(Sec()
// (S('"files.exclude": {', '},') // self.files))
self.watcher = \
(Sec()
// (S('"files.watcherExclude": {', '},') // self.files))
self.assoc = \
(Sec()
// (S('"files.associations": {', '},')))
self.files = (Sec('files', pfx='')
// self.exclude
// self.watcher
// self.assoc)
#
self.editor = (Sec('editor', pfx='')
// '"editor.tabSize": 4,'
// '"editor.rulers": [80],'
// '"workbench.tree.indent": 32,'
)
#
self.settings \
// (S('{', '}')
// self.multi
// self.files
// self.editor)
def tasks(self):
self.tasks = jsonFile('tasks'); self.vscode // self.tasks
def task(clazz, cmd):
return (S('{', '},')
// f'"label": "{clazz}: {cmd}",'
// f'"type": "shell",'
// f'"command": "make {cmd}",'
// f'"problemMatcher": []'
)
self.tasks \
// (S('{', '}')
// '"version": "2.0.0",'
// (S('"tasks": [', ']')
// task('project', 'install')
// task('project', 'update')
// task('git', 'dev')
// task('git', 'shadow')
))
def src(self):
self.py()
self.test()
self.config()
def config(self):
self.config = pyFile('config'); self.d // self.config
self.config \
// f"{'SECURE_KEY':<11} = {os.urandom(0x22)}" \
// f"{'HOST':<11} = '127..0.0.1'" \
// f"{'PORT':<11} = 12345"
def py(self):
self.py = pyFile(f'{self}'); self.d // self.py
self.py \
// 'import os, sys'
for i in [Object, S, Sec, IO, Dir, File, Meta, Class, Project]:
self.py // Class(i)
self.py // Class(Primitive, [Object])
self.py \
// S('Project().sync()', pfx='')
def test(self):
self.test = pyFile(f'test_{self}'); self.d // self.test
self.test \
// 'import pytest' \
// f'from {self} import *' \
// 'def test_any(): assert True'
def dirs(self):
self.d = Dir(f'{self}'); self.giti = giti(); self.d // self.giti
self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx = ''
self.giti // f'/{self}/' // '/__pycache__/'
self.giti.bot.pfx = ''
#
self.bin = Dir('bin'); self.d // self.bin
def mk(self):
self.mk = Makefile(); self.d // self.mk
#
self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var
self.mk.var \
// f'{"MODULE":<11} = $(notdir $(CURDIR))' \
// f'{"OS":<11} = $(shell uname -s)' \
// f'{"CORES":<11} = $(shell grep processor /proc/cpuinfo | wc -l)'
#
self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir
self.mk.dir \
// f'{"CWD":<11} = $(CURDIR)' \
// f'{"BIN":<11} = $(CWD)/bin' \
// f'{"DOC":<11} = $(CWD)/doc' \
// f'{"LIB":<11} = $(CWD)/lib' \
// f'{"SRC":<11} = $(CWD)/src' \
// f'{"TMP":<11} = $(CWD)/tmp'
#
self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool
self.mk.tool \
// f'CURL = curl -L -o' \
// f'PY = $(shell which python3)' \
// f'PYT = $(shell which pytest)' \
// f'PEP = $(shell which autopep8)'
#
self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package
self.mk.package \
// f'SYSLINUX_VER = 6.0.3'
#
self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src
self.mk.src \
// f'Y += $(MODULE).py test_$(MODULE).py' \
// f'P += config.py' \
// f'S += $(Y)'
#
self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg
self.mk.cfg \
// f'PEPS = E26,E302,E305,E401,E402,E701,E702'
#
self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all
self.mk.all \
// (S('meta: $(Y)', pfx='.PHONY: meta')
// '$(MAKE) test'
// '$(PY) $(MODULE).py'
// '$(PEP) --ignore=$(PEPS) --in-place $?')
self.mk.all \
// (S('test: $(Y)', pfx='\n.PHONY: test')
// '$(PYT) test_$(MODULE).py')
#
self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule
#
self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc
self.mk.doc \
// S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc')
self.mk.doc \
// (S('doc/pyMorphic.pdf:')
// '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf')
#
self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install
self.mk.install // '.PHONY: install update'
self.mk.install \
// (S('install: $(OS)_install doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('update: $(OS)_update doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('Linux_install Linux_update:',
pfx='.PHONY: Linux_install Linux_update')
// 'sudo apt update'
// 'sudo apt install -u `cat apt.txt`')
#
self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge
self.mk.merge \
// 'SHADOW ?= ponymuck'
self.mk.merge \
// 'MERGE = Makefile .gitignore README.md apt.txt $(S)' \
// 'MERGE += .vscode bin doc lib src tmp'
self.mk.merge \
// (S('dev:', pfx='\n.PHONY: dev')
// 'git push -v'
// 'git checkout $@'
// 'git checkout $(SHADOW) -- $(MERGE)'
)
self.mk.merge \
// (S('shadow:', pfx='\n.PHONY: shadow')
// 'git push -v'
// 'git checkout $(SHADOW)'
)
self.mk.merge \
// (S('release:', pfx='\n.PHONY: release')
)
self.mk.merge \
// (S('zip:', pfx='\n.PHONY: zip')
)
def sync(self):
self.readme()
self.d.sync()
def readme(self):
self.readme = File('README', '.md'); self.d // self.readme
self.readme \
// f'#  `{self}`' // f'## {self.TITLE}'
self.readme \
// '' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}'
self.readme // self.ABOUT
Project(
title='ViZual language environment',
about='''
* object (hyper)graph interpreter
'''
).sync()
| [
"os.urandom",
"os.mkdir",
"os.getcwd"
] | [((2736, 2755), 'os.mkdir', 'os.mkdir', (['self.path'], {}), '(self.path)\n', (2744, 2755), False, 'import os, sys\n'), ((4051, 4062), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4060, 4062), False, 'import os, sys\n'), ((7598, 7612), 'os.urandom', 'os.urandom', (['(34)'], {}), '(34)\n', (7608, 7612), False, 'import os, sys\n')] |
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR
def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1):
"""Create LR step scheduler.
Args:
optimizer (torch.optim): Model optimizer.
step_size (int): Frequency for changing learning rate.
gamma (float): Factor for changing learning rate. (default: 0.1)
last_epoch (int): The index of last epoch. (default: -1)
Returns:
StepLR: Learning rate scheduler.
"""
return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch)
def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0):
"""Create LR plateau reduction scheduler.
Args:
optimizer (torch.optim): Model optimizer.
factor (float, optional): Factor by which the learning rate will be reduced.
(default: 0.1)
patience (int, optional): Number of epoch with no improvement after which learning
rate will be will be reduced. (default: 10)
verbose (bool, optional): If True, prints a message to stdout for each update.
(default: False)
min_lr (float, optional): A scalar or a list of scalars. A lower bound on the
learning rate of all param groups or each group respectively. (default: 0)
Returns:
ReduceLROnPlateau instance.
"""
return ReduceLROnPlateau(
optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr
)
def one_cycle_lr(
optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000
):
"""Create One Cycle Policy for Learning Rate.
Args:
optimizer (torch.optim): Model optimizer.
max_lr (float): Upper learning rate boundary in the cycle.
epochs (int): The number of epochs to train for. This is used along with
steps_per_epoch in order to infer the total number of steps in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
pct_start (float, optional): The percentage of the cycle (in number of steps)
spent increasing the learning rate. (default: 0.5)
div_factor (float, optional): Determines the initial learning rate via
initial_lr = max_lr / div_factor. (default: 10.0)
final_div_factor (float, optional): Determines the minimum learning rate via
min_lr = initial_lr / final_div_factor. (default: 1e4)
Returns:
OneCycleLR instance.
"""
return OneCycleLR(
optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch,
pct_start=pct_start, div_factor=div_factor, final_div_factor=final_div_factor
)
| [
"torch.optim.lr_scheduler.OneCycleLR",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.lr_scheduler.StepLR"
] | [((512, 586), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': 'step_size', 'gamma': 'gamma', 'last_epoch': 'last_epoch'}), '(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch)\n', (518, 586), False, 'from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR\n'), ((1405, 1504), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'factor': 'factor', 'patience': 'patience', 'verbose': 'verbose', 'min_lr': 'min_lr'}), '(optimizer, factor=factor, patience=patience, verbose=\n verbose, min_lr=min_lr)\n', (1422, 1504), False, 'from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR\n'), ((2670, 2835), 'torch.optim.lr_scheduler.OneCycleLR', 'OneCycleLR', (['optimizer', 'max_lr'], {'epochs': 'epochs', 'steps_per_epoch': 'steps_per_epoch', 'pct_start': 'pct_start', 'div_factor': 'div_factor', 'final_div_factor': 'final_div_factor'}), '(optimizer, max_lr, epochs=epochs, steps_per_epoch=\n steps_per_epoch, pct_start=pct_start, div_factor=div_factor,\n final_div_factor=final_div_factor)\n', (2680, 2835), False, 'from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR\n')] |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for Zones"""
import copy
import unittest
import armi
from armi import settings
from armi.reactor import assemblies
from armi.reactor import blueprints
from armi.reactor import geometry
from armi.reactor import grids
from armi.reactor import reactors
from armi.reactor import zones
from armi.reactor.flags import Flags
from armi.reactor.tests import test_reactors
from armi.utils import pathTools
from armi.settings.fwSettings import globalSettings
THIS_DIR = pathTools.armiAbsDirFromName(__name__)
class Zone_TestCase(unittest.TestCase):
def setUp(self):
bp = blueprints.Blueprints()
geom = geometry.SystemLayoutInput()
geom.symmetry = "third core periodic"
r = reactors.Reactor(settings.getMasterCs(), bp)
r.add(reactors.Core("Core", settings.getMasterCs(), geom))
r.core.spatialGrid = grids.hexGridFromPitch(1.0)
aList = []
for ring in range(10):
a = assemblies.HexAssembly("fuel")
a.spatialLocator = r.core.spatialGrid[ring, 1, 0]
a.parent = r.core
aList.append(a)
self.aList = aList
def test_addAssemblyLocations(self):
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for a in self.aList:
self.assertIn(a.getLocation(), zone)
self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList)
def test_iteration(self):
locs = [a.getLocation() for a in self.aList]
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for aLoc in zone:
self.assertIn(aLoc, locs)
# loop twice to make sure it iterates nicely.
for aLoc in zone:
self.assertIn(aLoc, locs)
def test_addRing(self):
zone = zones.Zone("TestZone")
zone.addRing(5)
self.assertIn("A5003", zone)
self.assertNotIn("A6002", zone)
zone.addRing(6, 3, 9)
self.assertIn("A6003", zone)
self.assertIn("A6009", zone)
self.assertNotIn("A6002", zone)
self.assertNotIn("A6010", zone)
class Zones_InReactor(unittest.TestCase):
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor()
def test_buildRingZones(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = []
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 1)
self.assertEqual(9, r.core.numRings)
cs["ringZones"] = [5, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 2)
zone = zonez["ring-1"]
self.assertEqual(len(zone), (5 * (5 - 1) + 1))
zone = zonez["ring-2"]
# Note that the actual number of rings in the reactor model is 9. Even though we
# asked for the last zone to to to 8, the zone engine should bump it out. Not
# sure if this is behavior that we want to preserve, but at least it's being
# tested properly now.
self.assertEqual(len(zone), (9 * (9 - 1) + 1) - (5 * (5 - 1) + 1))
cs["ringZones"] = [5, 7, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 3)
zone = zonez["ring-3"]
self.assertEqual(len(zone), 30) # rings 8 and 9. See above comment
def test_removeZone(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [5, 8]
# produce 2 zones, with the names ringzone0 and ringzone1
daZones = zones.buildZones(r.core, cs)
daZones.removeZone("ring-1")
# The names list should only house the only other remaining zone now
self.assertEqual(["ring-2"], daZones.names)
# if indexed like a dict, the zones object should give a key error from the removed zone
with self.assertRaises(KeyError):
daZones["ring-1"]
# Ensure we can still iterate through our zones object
for name in daZones.names:
aZone = daZones[name]
def test_findZoneAssemblyIsIn(self):
cs = self.o.cs
cs["ringZones"] = [5, 7, 8]
daZones = zones.buildZones(self.r.core, cs)
for zone in daZones:
a = self.r.core.getAssemblyWithStringLocation(zone.locList[0])
aZone = daZones.findZoneAssemblyIsIn(a)
self.assertEqual(aZone, zone)
# lets test if we get a none and a warning if the assembly does not exist in a zone
a = self.r.core.getAssemblyWithStringLocation(
daZones[daZones.names[0]].locList[0]
) # get assem from first zone
daZones.removeZone(
daZones.names[0]
) # remove a zone to ensure that our assem does not have a zone anymore
self.assertEqual(daZones.findZoneAssemblyIsIn(a), None)
class Zones_InRZReactor(unittest.TestCase):
def test_splitZones(self):
# Test to make sure that we can split a zone containing control and fuel assemblies.
# Also test that we can separate out assemblies with differing numbers of blocks.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
diverseZone = "ring-4"
r.core.buildZones(cs)
daZones = r.core.zones
# lets make one of the assemblies have an extra block
zoneLocations = daZones.getZoneLocations(diverseZone)
originalAssemblies = r.core.getLocationContents(
zoneLocations, assemblyLevel=True
)
fuel = [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0]
newBlock = copy.deepcopy(fuel[-1])
fuel.add(newBlock)
# should contain a zone for every ring zone
# we only want one ring zone for this test, containing assemblies of different types.
zoneTup = tuple(daZones.names)
for zoneName in zoneTup:
if zoneName != diverseZone:
daZones.removeZone(zoneName)
# this should split diverseZone into multiple zones by nodalization type.
cs["splitZones"] = True
zones.splitZones(r.core, cs, daZones)
# test to make sure that we split the ring zone correctly
self.assertEqual(len(daZones["ring-4-primary-control-5"]), 2)
self.assertEqual(len(daZones["ring-4-middle-fuel-5"]), 3)
self.assertEqual(len(daZones["ring-4-middle-fuel-6"]), 1)
def test_createHotZones(self):
# Test to make sure createHotZones identifies the highest p/f location in a zone
# Test to make sure createHotZones can remove the peak assembly from that zone and place it in a new zone
# Test that the power in the old zone and the new zone is conserved.
# Test that if a hot zone can not be created from a single assembly zone.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [9] # build one giant zone
r.core.buildZones(cs)
daZones = r.core.zones
originalassemblies = []
originalPower = 0.0
peakZonePFRatios = []
# Create a single assembly zone to verify that it will not create a hot zone
single = zones.Zone("single")
daZones.add(single)
aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation()
single.append(aLoc)
# Set power and flow.
# Also gather channel peak P/F ratios, assemblies and power.
for zone in daZones:
powerToFlow = []
zoneLocations = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True)
power = 300.0
flow = 300.0
for a in assems:
a.getFirstBlock().p.power = power
assemblyPower = a.calcTotalParam("power")
a[-1].p.THmassFlowRate = flow
powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate)
originalPower += assemblyPower
originalassemblies.append(a)
power += 1
flow -= 1
peakZonePFRatios.append(max(powerToFlow))
daZones = zones.createHotZones(r.core, daZones)
# Test that the hot zones have the peak P/F from the host channels
i = 0
for zone in daZones:
if zone.hotZone:
hotAssemLocation = daZones.getZoneLocations(zone.name)
hotAssem = r.core.getLocationContents(
hotAssemLocation, assemblyLevel=True
)[0]
self.assertEqual(
peakZonePFRatios[i],
hotAssem.calcTotalParam("power") / hotAssem[-1].p.THmassFlowRate,
)
i += 1
powerAfterHotZoning = 0.0
assembliesAfterHotZoning = []
# Check that power is conserved and that we did not lose any assemblies
for zone in daZones:
locs = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(locs, assemblyLevel=True)
for a in assems:
assembliesAfterHotZoning.append(a)
powerAfterHotZoning += a.calcTotalParam("power")
self.assertEqual(powerAfterHotZoning, originalPower)
self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies))
# check that the original zone with 1 channel has False for hotzone
self.assertEqual(single.hotZone, False)
# check that we have the correct number of hot and normal zones.
hotCount = 0
normalCount = 0
for zone in daZones:
if zone.hotZone:
hotCount += 1
else:
normalCount += 1
self.assertEqual(hotCount, 1)
self.assertEqual(normalCount, 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Zones_InReactor.test_buildRingZones']
unittest.main()
| [
"armi.reactor.grids.hexGridFromPitch",
"armi.reactor.geometry.SystemLayoutInput",
"armi.reactor.blueprints.Blueprints",
"armi.reactor.zones.Zone",
"armi.reactor.zones.createHotZones",
"armi.utils.pathTools.armiAbsDirFromName",
"armi.reactor.zones.splitZones",
"armi.settings.getMasterCs",
"copy.deepcopy",
"unittest.main",
"armi.reactor.assemblies.HexAssembly",
"armi.reactor.tests.test_reactors.loadTestReactor",
"armi.reactor.zones.buildZones"
] | [((1049, 1087), 'armi.utils.pathTools.armiAbsDirFromName', 'pathTools.armiAbsDirFromName', (['__name__'], {}), '(__name__)\n', (1077, 1087), False, 'from armi.utils import pathTools\n'), ((10873, 10888), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10886, 10888), False, 'import unittest\n'), ((1164, 1187), 'armi.reactor.blueprints.Blueprints', 'blueprints.Blueprints', ([], {}), '()\n', (1185, 1187), False, 'from armi.reactor import blueprints\n'), ((1203, 1231), 'armi.reactor.geometry.SystemLayoutInput', 'geometry.SystemLayoutInput', ([], {}), '()\n', (1229, 1231), False, 'from armi.reactor import geometry\n'), ((1431, 1458), 'armi.reactor.grids.hexGridFromPitch', 'grids.hexGridFromPitch', (['(1.0)'], {}), '(1.0)\n', (1453, 1458), False, 'from armi.reactor import grids\n'), ((1760, 1782), 'armi.reactor.zones.Zone', 'zones.Zone', (['"""TestZone"""'], {}), "('TestZone')\n", (1770, 1782), False, 'from armi.reactor import zones\n'), ((2086, 2108), 'armi.reactor.zones.Zone', 'zones.Zone', (['"""TestZone"""'], {}), "('TestZone')\n", (2096, 2108), False, 'from armi.reactor import zones\n'), ((2382, 2404), 'armi.reactor.zones.Zone', 'zones.Zone', (['"""TestZone"""'], {}), "('TestZone')\n", (2392, 2404), False, 'from armi.reactor import zones\n'), ((2781, 2812), 'armi.reactor.tests.test_reactors.loadTestReactor', 'test_reactors.loadTestReactor', ([], {}), '()\n', (2810, 2812), False, 'from armi.reactor.tests import test_reactors\n'), ((3005, 3033), 'armi.reactor.zones.buildZones', 'zones.buildZones', (['r.core', 'cs'], {}), '(r.core, cs)\n', (3021, 3033), False, 'from armi.reactor import zones\n'), ((3175, 3203), 'armi.reactor.zones.buildZones', 'zones.buildZones', (['r.core', 'cs'], {}), '(r.core, cs)\n', (3191, 3203), False, 'from armi.reactor import zones\n'), ((3786, 3814), 'armi.reactor.zones.buildZones', 'zones.buildZones', (['r.core', 'cs'], {}), '(r.core, cs)\n', (3802, 3814), False, 'from armi.reactor import zones\n'), ((4228, 4256), 'armi.reactor.zones.buildZones', 'zones.buildZones', (['r.core', 'cs'], {}), '(r.core, cs)\n', (4244, 4256), False, 'from armi.reactor import zones\n'), ((4845, 4878), 'armi.reactor.zones.buildZones', 'zones.buildZones', (['self.r.core', 'cs'], {}), '(self.r.core, cs)\n', (4861, 4878), False, 'from armi.reactor import zones\n'), ((5790, 5860), 'armi.reactor.tests.test_reactors.loadTestReactor', 'test_reactors.loadTestReactor', ([], {'inputFileName': '"""partisnTestReactor.yaml"""'}), "(inputFileName='partisnTestReactor.yaml')\n", (5819, 5860), False, 'from armi.reactor.tests import test_reactors\n'), ((6453, 6476), 'copy.deepcopy', 'copy.deepcopy', (['fuel[-1]'], {}), '(fuel[-1])\n', (6466, 6476), False, 'import copy\n'), ((6930, 6967), 'armi.reactor.zones.splitZones', 'zones.splitZones', (['r.core', 'cs', 'daZones'], {}), '(r.core, cs, daZones)\n', (6946, 6967), False, 'from armi.reactor import zones\n'), ((7649, 7719), 'armi.reactor.tests.test_reactors.loadTestReactor', 'test_reactors.loadTestReactor', ([], {'inputFileName': '"""partisnTestReactor.yaml"""'}), "(inputFileName='partisnTestReactor.yaml')\n", (7678, 7719), False, 'from armi.reactor.tests import test_reactors\n'), ((8143, 8163), 'armi.reactor.zones.Zone', 'zones.Zone', (['"""single"""'], {}), "('single')\n", (8153, 8163), False, 'from armi.reactor import zones\n'), ((9117, 9154), 'armi.reactor.zones.createHotZones', 'zones.createHotZones', (['r.core', 'daZones'], {}), '(r.core, daZones)\n', (9137, 9154), False, 'from armi.reactor import zones\n'), ((1307, 1329), 'armi.settings.getMasterCs', 'settings.getMasterCs', ([], {}), '()\n', (1327, 1329), False, 'from armi import settings\n'), ((1525, 1555), 'armi.reactor.assemblies.HexAssembly', 'assemblies.HexAssembly', (['"""fuel"""'], {}), "('fuel')\n", (1547, 1555), False, 'from armi.reactor import assemblies\n'), ((1371, 1393), 'armi.settings.getMasterCs', 'settings.getMasterCs', ([], {}), '()\n', (1391, 1393), False, 'from armi import settings\n')] |
"""empty message
Revision ID: 2018_04_20_data_src_refactor
Revises: 2018_04_11_add_sandbox_topic
Create Date: 2018-04-20 13:03:32.478880
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy.dialects.postgresql import ARRAY
revision = '2018_04_20_data_src_refactor'
down_revision = '2018_04_11_add_sandbox_topic'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types')
op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True))
op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True))
op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True))
op.get_bind()
op.execute('''
UPDATE page SET suppression_and_disclosure = suppression_rules
WHERE disclosure_control is null;
''')
op.execute('''
UPDATE page SET suppression_and_disclosure = disclosure_control
WHERE suppression_rules is null;
''')
op.execute('''
UPDATE page SET suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control)
WHERE suppression_rules is not null
AND disclosure_control is not null;
''')
op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey')
op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey')
op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey')
op.drop_column('page', 'secondary_source_1_date_next_update')
op.drop_column('page', 'secondary_source_1_date_updated')
op.drop_column('page', 'secondary_source_1_suppression_rules')
op.drop_column('page', 'secondary_source_1_disclosure_control')
op.drop_column('page', 'secondary_source_2_frequency')
op.drop_column('page', 'secondary_source_2_contact_2_name')
op.drop_column('page', 'secondary_source_2_contact_2_phone')
op.drop_column('page', 'secondary_source_2_url')
op.drop_column('page', 'secondary_source_2_date_next_update')
op.drop_column('page', 'secondary_source_2_contact_1_name')
op.drop_column('page', 'last_update_date')
op.drop_column('page', 'secondary_source_2_contact_1_phone')
op.drop_column('page', 'secondary_source_2_publisher_text')
op.drop_column('page', 'secondary_source_2_disclosure_control')
op.drop_column('page', 'secondary_source_2_type_of_statistic_id')
op.drop_column('page', 'secondary_source_2_suppression_rules')
op.drop_column('page', 'secondary_source_2_frequency_other')
op.drop_column('page', 'secondary_source_2_publisher_id')
op.drop_column('page', 'secondary_source_2_title')
op.drop_column('page', 'secondary_source_2_date')
op.drop_column('page', 'next_update_date')
op.drop_column('page', 'secondary_source_2_date_updated')
op.drop_column('page', 'secondary_source_2_statistic_type')
op.drop_column('page', 'secondary_source_2_frequency_id')
op.drop_column('page', 'secondary_source_2_contact_2_email')
op.drop_column('page', 'secondary_source_2_contact_1_email')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))
op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id'])
op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id'])
op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id'])
op.drop_column('page', 'secondary_source_1_type_of_data')
op.drop_column('page', 'suppression_and_disclosure')
op.drop_column('page', 'note_on_corrections_or_updates')
op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates')
op.drop_column('page', 'secondary_source_1_data_source_purpose')
# ### end Alembic commands ###
| [
"alembic.op.get_bind",
"alembic.op.create_foreign_key",
"alembic.op.drop_constraint",
"sqlalchemy.VARCHAR",
"alembic.op.drop_column",
"sqlalchemy.TEXT",
"alembic.op.execute",
"sqlalchemy.INTEGER",
"sqlalchemy.Enum",
"sqlalchemy.dialects.postgresql.ARRAY"
] | [((520, 582), 'sqlalchemy.Enum', 'sa.Enum', (['"""ADMINISTRATIVE"""', '"""SURVEY"""'], {'name': '"""type_of_data_types"""'}), "('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types')\n", (527, 582), True, 'import sqlalchemy as sa\n'), ((1113, 1126), 'alembic.op.get_bind', 'op.get_bind', ([], {}), '()\n', (1124, 1126), False, 'from alembic import op\n'), ((1132, 1279), 'alembic.op.execute', 'op.execute', (['"""\n UPDATE page SET suppression_and_disclosure = suppression_rules \n WHERE disclosure_control is null;\n """'], {}), '(\n """\n UPDATE page SET suppression_and_disclosure = suppression_rules \n WHERE disclosure_control is null;\n """\n )\n', (1142, 1279), False, 'from alembic import op\n'), ((1275, 1431), 'alembic.op.execute', 'op.execute', (['"""\n UPDATE page SET suppression_and_disclosure = disclosure_control \n WHERE suppression_rules is null;\n """'], {}), '(\n """\n UPDATE page SET suppression_and_disclosure = disclosure_control \n WHERE suppression_rules is null;\n """\n )\n', (1285, 1431), False, 'from alembic import op\n'), ((1427, 1658), 'alembic.op.execute', 'op.execute', (['"""\n UPDATE page SET suppression_and_disclosure = trim(suppression_rules || \' \' || disclosure_control)\n WHERE suppression_rules is not null\n AND disclosure_control is not null;\n """'], {}), '(\n """\n UPDATE page SET suppression_and_disclosure = trim(suppression_rules || \' \' || disclosure_control)\n WHERE suppression_rules is not null\n AND disclosure_control is not null;\n """\n )\n', (1437, 1658), False, 'from alembic import op\n'), ((1654, 1745), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""organisation_secondary_source_2_fkey"""', '"""page"""'], {'type_': '"""foreignkey"""'}), "('organisation_secondary_source_2_fkey', 'page', type_=\n 'foreignkey')\n", (1672, 1745), False, 'from alembic import op\n'), ((1745, 1833), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""frequency_secondary_source_2_fkey"""', '"""page"""'], {'type_': '"""foreignkey"""'}), "('frequency_secondary_source_2_fkey', 'page', type_=\n 'foreignkey')\n", (1763, 1833), False, 'from alembic import op\n'), ((1833, 1928), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""secondary_source_2_type_of_statistic_fkey"""', '"""page"""'], {'type_': '"""foreignkey"""'}), "('secondary_source_2_type_of_statistic_fkey', 'page',\n type_='foreignkey')\n", (1851, 1928), False, 'from alembic import op\n'), ((1929, 1990), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_date_next_update"""'], {}), "('page', 'secondary_source_1_date_next_update')\n", (1943, 1990), False, 'from alembic import op\n'), ((1995, 2052), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_date_updated"""'], {}), "('page', 'secondary_source_1_date_updated')\n", (2009, 2052), False, 'from alembic import op\n'), ((2057, 2119), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_suppression_rules"""'], {}), "('page', 'secondary_source_1_suppression_rules')\n", (2071, 2119), False, 'from alembic import op\n'), ((2124, 2187), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_disclosure_control"""'], {}), "('page', 'secondary_source_1_disclosure_control')\n", (2138, 2187), False, 'from alembic import op\n'), ((2192, 2246), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_frequency"""'], {}), "('page', 'secondary_source_2_frequency')\n", (2206, 2246), False, 'from alembic import op\n'), ((2251, 2310), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_contact_2_name"""'], {}), "('page', 'secondary_source_2_contact_2_name')\n", (2265, 2310), False, 'from alembic import op\n'), ((2315, 2375), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_contact_2_phone"""'], {}), "('page', 'secondary_source_2_contact_2_phone')\n", (2329, 2375), False, 'from alembic import op\n'), ((2380, 2428), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_url"""'], {}), "('page', 'secondary_source_2_url')\n", (2394, 2428), False, 'from alembic import op\n'), ((2433, 2494), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_date_next_update"""'], {}), "('page', 'secondary_source_2_date_next_update')\n", (2447, 2494), False, 'from alembic import op\n'), ((2499, 2558), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_contact_1_name"""'], {}), "('page', 'secondary_source_2_contact_1_name')\n", (2513, 2558), False, 'from alembic import op\n'), ((2563, 2605), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""last_update_date"""'], {}), "('page', 'last_update_date')\n", (2577, 2605), False, 'from alembic import op\n'), ((2610, 2670), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_contact_1_phone"""'], {}), "('page', 'secondary_source_2_contact_1_phone')\n", (2624, 2670), False, 'from alembic import op\n'), ((2675, 2734), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_publisher_text"""'], {}), "('page', 'secondary_source_2_publisher_text')\n", (2689, 2734), False, 'from alembic import op\n'), ((2739, 2802), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_disclosure_control"""'], {}), "('page', 'secondary_source_2_disclosure_control')\n", (2753, 2802), False, 'from alembic import op\n'), ((2807, 2872), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_type_of_statistic_id"""'], {}), "('page', 'secondary_source_2_type_of_statistic_id')\n", (2821, 2872), False, 'from alembic import op\n'), ((2877, 2939), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_suppression_rules"""'], {}), "('page', 'secondary_source_2_suppression_rules')\n", (2891, 2939), False, 'from alembic import op\n'), ((2944, 3004), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_frequency_other"""'], {}), "('page', 'secondary_source_2_frequency_other')\n", (2958, 3004), False, 'from alembic import op\n'), ((3009, 3066), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_publisher_id"""'], {}), "('page', 'secondary_source_2_publisher_id')\n", (3023, 3066), False, 'from alembic import op\n'), ((3071, 3121), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_title"""'], {}), "('page', 'secondary_source_2_title')\n", (3085, 3121), False, 'from alembic import op\n'), ((3126, 3175), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_date"""'], {}), "('page', 'secondary_source_2_date')\n", (3140, 3175), False, 'from alembic import op\n'), ((3180, 3222), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""next_update_date"""'], {}), "('page', 'next_update_date')\n", (3194, 3222), False, 'from alembic import op\n'), ((3227, 3284), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_date_updated"""'], {}), "('page', 'secondary_source_2_date_updated')\n", (3241, 3284), False, 'from alembic import op\n'), ((3289, 3348), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_statistic_type"""'], {}), "('page', 'secondary_source_2_statistic_type')\n", (3303, 3348), False, 'from alembic import op\n'), ((3353, 3410), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_frequency_id"""'], {}), "('page', 'secondary_source_2_frequency_id')\n", (3367, 3410), False, 'from alembic import op\n'), ((3415, 3475), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_contact_2_email"""'], {}), "('page', 'secondary_source_2_contact_2_email')\n", (3429, 3475), False, 'from alembic import op\n'), ((3480, 3540), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_2_contact_1_email"""'], {}), "('page', 'secondary_source_2_contact_1_email')\n", (3494, 3540), False, 'from alembic import op\n'), ((6822, 6974), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['"""secondary_source_2_type_of_statistic_fkey"""', '"""page"""', '"""type_of_statistic"""', "['secondary_source_2_type_of_statistic_id']", "['id']"], {}), "('secondary_source_2_type_of_statistic_fkey', 'page',\n 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id'])\n", (6843, 6974), False, 'from alembic import op\n'), ((6975, 7114), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['"""frequency_secondary_source_2_fkey"""', '"""page"""', '"""frequency_of_release"""', "['secondary_source_2_frequency_id']", "['id']"], {}), "('frequency_secondary_source_2_fkey', 'page',\n 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id'])\n", (6996, 7114), False, 'from alembic import op\n'), ((7115, 7249), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (['"""organisation_secondary_source_2_fkey"""', '"""page"""', '"""organisation"""', "['secondary_source_2_publisher_id']", "['id']"], {}), "('organisation_secondary_source_2_fkey', 'page',\n 'organisation', ['secondary_source_2_publisher_id'], ['id'])\n", (7136, 7249), False, 'from alembic import op\n'), ((7251, 7308), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_type_of_data"""'], {}), "('page', 'secondary_source_1_type_of_data')\n", (7265, 7308), False, 'from alembic import op\n'), ((7313, 7365), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""suppression_and_disclosure"""'], {}), "('page', 'suppression_and_disclosure')\n", (7327, 7365), False, 'from alembic import op\n'), ((7370, 7426), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""note_on_corrections_or_updates"""'], {}), "('page', 'note_on_corrections_or_updates')\n", (7384, 7426), False, 'from alembic import op\n'), ((7431, 7506), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_note_on_corrections_or_updates"""'], {}), "('page', 'secondary_source_1_note_on_corrections_or_updates')\n", (7445, 7506), False, 'from alembic import op\n'), ((7511, 7575), 'alembic.op.drop_column', 'op.drop_column', (['"""page"""', '"""secondary_source_1_data_source_purpose"""'], {}), "('page', 'secondary_source_1_data_source_purpose')\n", (7525, 7575), False, 'from alembic import op\n'), ((654, 679), 'sqlalchemy.dialects.postgresql.ARRAY', 'ARRAY', (['type_of_data_types'], {}), '(type_of_data_types)\n', (659, 679), False, 'from sqlalchemy.dialects.postgresql import ARRAY\n'), ((763, 772), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (770, 772), True, 'import sqlalchemy as sa\n'), ((860, 869), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (867, 869), True, 'import sqlalchemy as sa\n'), ((976, 985), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (983, 985), True, 'import sqlalchemy as sa\n'), ((1081, 1090), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (1088, 1090), True, 'import sqlalchemy as sa\n'), ((3735, 3744), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (3742, 3744), True, 'import sqlalchemy as sa\n'), ((3857, 3866), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (3864, 3866), True, 'import sqlalchemy as sa\n'), ((3976, 3988), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (3986, 3988), True, 'import sqlalchemy as sa\n'), ((4100, 4109), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (4107, 4109), True, 'import sqlalchemy as sa\n'), ((4219, 4228), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (4226, 4228), True, 'import sqlalchemy as sa\n'), ((4330, 4339), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (4337, 4339), True, 'import sqlalchemy as sa\n'), ((4442, 4451), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (4449, 4451), True, 'import sqlalchemy as sa\n'), ((4561, 4583), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(255)'}), '(length=255)\n', (4571, 4583), True, 'import sqlalchemy as sa\n'), ((4696, 4718), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(255)'}), '(length=255)\n', (4706, 4718), True, 'import sqlalchemy as sa\n'), ((4833, 4842), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (4840, 4842), True, 'import sqlalchemy as sa\n'), ((4960, 4972), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (4970, 4972), True, 'import sqlalchemy as sa\n'), ((5088, 5097), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5095, 5097), True, 'import sqlalchemy as sa\n'), ((5209, 5218), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5216, 5218), True, 'import sqlalchemy as sa\n'), ((5331, 5340), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5338, 5340), True, 'import sqlalchemy as sa\n'), ((5452, 5461), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5459, 5461), True, 'import sqlalchemy as sa\n'), ((5575, 5584), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5582, 5584), True, 'import sqlalchemy as sa\n'), ((5685, 5694), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5692, 5694), True, 'import sqlalchemy as sa\n'), ((5807, 5816), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5814, 5816), True, 'import sqlalchemy as sa\n'), ((5928, 5937), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (5935, 5937), True, 'import sqlalchemy as sa\n'), ((6044, 6053), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (6051, 6053), True, 'import sqlalchemy as sa\n'), ((6148, 6170), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(255)'}), '(length=255)\n', (6158, 6170), True, 'import sqlalchemy as sa\n'), ((6265, 6287), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(255)'}), '(length=255)\n', (6275, 6287), True, 'import sqlalchemy as sa\n'), ((6401, 6410), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (6408, 6410), True, 'import sqlalchemy as sa\n'), ((6520, 6529), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (6527, 6529), True, 'import sqlalchemy as sa\n'), ((6645, 6654), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (6652, 6654), True, 'import sqlalchemy as sa\n'), ((6769, 6778), 'sqlalchemy.TEXT', 'sa.TEXT', ([], {}), '()\n', (6776, 6778), True, 'import sqlalchemy as sa\n')] |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral import exceptions as exc
from mistral.tests.unit import base
from mistral.utils import ssh_utils
from mistral_lib import utils
class UtilsTest(base.BaseTest):
def test_itersubclasses(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(C):
pass
self.assertEqual([B, C, D], list(utils.iter_subclasses(A)))
def test_paramiko_to_private_key(self):
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"../dir"
)
self.assertRaises(
exc.DataAccessException,
ssh_utils._to_paramiko_private_key,
"..\\dir"
)
self.assertIsNone(
ssh_utils._to_paramiko_private_key(private_key_filename=None,
password='<PASSWORD>')
)
| [
"mistral.utils.ssh_utils._to_paramiko_private_key",
"mistral_lib.utils.iter_subclasses"
] | [((1498, 1587), 'mistral.utils.ssh_utils._to_paramiko_private_key', 'ssh_utils._to_paramiko_private_key', ([], {'private_key_filename': 'None', 'password': '"""<PASSWORD>"""'}), "(private_key_filename=None, password=\n '<PASSWORD>')\n", (1532, 1587), False, 'from mistral.utils import ssh_utils\n'), ((1099, 1123), 'mistral_lib.utils.iter_subclasses', 'utils.iter_subclasses', (['A'], {}), '(A)\n', (1120, 1123), False, 'from mistral_lib import utils\n')] |
import xmltodict
import json
from .models import Tunein
from .utils import _init_session
from .Exceptions import APIException
base_url = 'http://api.shoutcast.com'
tunein_url = 'http://yp.shoutcast.com/{base}?id={id}'
tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]
def call_api_xml(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
response_as_dict = xmltodict.parse(response.content)
api_response = response_as_dict.get('response')
if api_response:
api_status_code = int(api_response.get('statusCode'))
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText')
)
raise APIException(message, code=api_status_code)
return response_as_dict
raise APIException(response.content, code=response.status_code)
def call_api_json(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
json_response = json.loads(response.content.decode('utf-8'))
api_response = json_response.get('response')
api_status_code = int(api_response.get('statusCode'))
if api_status_code != 200:
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText', '')
)
raise APIException(message, code=api_status_code)
return json_response.get('response')['data']
raise APIException(response.reason, code=response.status_code)
def call_api_tunein(station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=tuneins[2], id=station_id)
response = session.get(url)
if response.status_code == 200:
api_response = xmltodict.parse(response.content.decode('utf-8'))
return api_response
raise APIException(response.reason, code=response.status_code)
def call_api_tunein_any(base: Tunein, station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=base, id=station_id)
response = session.get(url)
if response.status_code == 200:
return response.content.decode('utf-8')
raise APIException(response.reason, code=response.status_code)
| [
"xmltodict.parse"
] | [((604, 637), 'xmltodict.parse', 'xmltodict.parse', (['response.content'], {}), '(response.content)\n', (619, 637), False, 'import xmltodict\n')] |
from django.core.management.base import BaseCommand, no_translations
from django.contrib.auth.models import Group
from django.conf import settings
import sys
class Command(BaseCommand):
def handle(self, *args, **options):
sys.stdout.write("\nResolving app groups")
app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS]
for app_name in app_list:
created = Group.objects.get_or_create(name=app_name)
sys.stdout.write(f"\n{app_name}, new={created}")
sys.stdout.write("\n") | [
"django.contrib.auth.models.Group.objects.get_or_create",
"sys.stdout.write"
] | [((243, 288), 'sys.stdout.write', 'sys.stdout.write', (['"""\nResolving app groups"""'], {}), '("""\nResolving app groups""")\n', (259, 288), False, 'import sys\n'), ((559, 581), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (575, 581), False, 'import sys\n'), ((438, 480), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': 'app_name'}), '(name=app_name)\n', (465, 480), False, 'from django.contrib.auth.models import Group\n'), ((493, 544), 'sys.stdout.write', 'sys.stdout.write', (['f"""\n{app_name}, new={created}"""'], {}), '(f"""\n{app_name}, new={created}""")\n', (509, 544), False, 'import sys\n')] |
"""
Provides usable args and kwargs from inspect.getcallargs.
For Python 3.3 and above, this module is unnecessary and can be achieved using
features from PEP 362:
http://www.python.org/dev/peps/pep-0362/
For example, to override a parameter of some function:
>>> import inspect
>>> def func(a, b=1, c=2, d=3):
... return a, b, c, d
...
>>> def override_c(*args, **kwargs):
... sig = inspect.signature(override)
... ba = sig.bind(*args, **kwargs)
... ba['c'] = 10
... return func(*ba.args, *ba.kwargs)
...
>>> override_c(0, c=3)
(0, 1, 10, 3)
Also useful:
http://www.python.org/dev/peps/pep-3102/
"""
import sys
import inspect
from inspect import getcallargs
try:
from inspect import getfullargspec
except ImportError:
# Python 2.X
from collections import namedtuple
from inspect import getargspec
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(f):
args, varargs, varkw, defaults = getargspec(f)
kwonlyargs = []
kwonlydefaults = None
annotations = getattr(f, '__annotations__', {})
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations)
def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs):
"""Binds arguments and keyword arguments to a function or method.
Returns a tuple (bargs, bkwargs) suitable for manipulation and passing
to the specified function.
`bargs` consists of the bound args, varargs, and kwonlyargs from
getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec.
Both can be used in a call to the specified function. Any default
parameter values are included in the output.
Examples
--------
>>> def func(a, b=3, *args, **kwargs):
... pass
>>> bindcallargs(func, 5)
((5, 3), {})
>>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there')
((5, 4, 3, 2, 1), {'hello': 'there'})
>>> args, kwargs = bindcallargs(func, 5)
>>> kwargs['b'] = 5 # overwrite default value for b
>>> func(*args, **kwargs)
"""
# It is necessary to choose an unlikely variable name for the function.
# The reason is that any kwarg by the same name will cause a TypeError
# due to multiple values being passed for that argument name.
func = _fUnCtIoN_
callargs = getcallargs(func, *args, **kwargs)
spec = getfullargspec(func)
# Construct all args and varargs and use them in bargs
bargs = [callargs[arg] for arg in spec.args]
if spec.varargs is not None:
bargs.extend(callargs[spec.varargs])
bargs = tuple(bargs)
# Start with kwonlyargs.
bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs}
# Add in kwonlydefaults for unspecified kwonlyargs only.
# Since keyword only arguements aren't allowed in python2, and we
# don't support python 3.0, 3.1, 3.2, this should never be executed:
if spec.kwonlydefaults is not None: # pragma: no cover
bkwargs.update({k: v for k, v in spec.kwonlydefaults.items()
if k not in bkwargs})
# Add in varkw.
if spec.varkw is not None:
bkwargs.update(callargs[spec.varkw])
return bargs, bkwargs
def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs):
# Should match functionality of bindcallargs_32 for Python > 3.3.
sig = inspect.signature(_fUnCtIoN_)
ba = sig.bind(*args, **kwargs)
# Add in all default values
for param in sig.parameters.values():
if param.name not in ba.arguments:
ba.arguments[param.name] = param.default
return ba.args, ba.kwargs
if sys.version_info[0:2] < (3,3):
bindcallargs = bindcallargs_leq32
else:
bindcallargs = bindcallargs_geq33
| [
"collections.namedtuple",
"inspect.signature",
"inspect.getfullargspec",
"inspect.getargspec",
"inspect.getcallargs"
] | [((2471, 2505), 'inspect.getcallargs', 'getcallargs', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (2482, 2505), False, 'from inspect import getcallargs\n'), ((2517, 2537), 'inspect.getfullargspec', 'getfullargspec', (['func'], {}), '(func)\n', (2531, 2537), False, 'from inspect import getfullargspec\n'), ((3502, 3531), 'inspect.signature', 'inspect.signature', (['_fUnCtIoN_'], {}), '(_fUnCtIoN_)\n', (3519, 3531), False, 'import inspect\n'), ((915, 1019), 'collections.namedtuple', 'namedtuple', (['"""FullArgSpec"""', '"""args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations"""'], {}), "('FullArgSpec',\n 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')\n", (925, 1019), False, 'from collections import namedtuple\n'), ((1089, 1102), 'inspect.getargspec', 'getargspec', (['f'], {}), '(f)\n', (1099, 1102), False, 'from inspect import getargspec\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-10 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0089_auto_20160602_2123'),
]
operations = [
migrations.AlterField(
model_name='vendor',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),
),
]
| [
"django.db.models.EmailField"
] | [((399, 466), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'verbose_name': '"""Email"""'}), "(blank=True, max_length=254, verbose_name='Email')\n", (416, 466), False, 'from django.db import migrations, models\n')] |
import pymongo
from conf import Configuracoes
class Mongo_Database:
""" Singleton com a conexao com o MongoDB """
_instancia = None
def __new__(cls, *args, **kwargs):
if not(cls._instancia):
cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs)
return cls._instancia
def __init__(self,):
#pega a string de conexao no arquivo de configuracao
string_conexao = Configuracoes().get_config("database", "string_connection")
assert (string_conexao != ""), "String de conexao indefinida"
try:
self.mongo_client = pymongo.MongoClient(string_conexao)
self.collection_filmes = self.mongo_client["popcorn_time"]["filmes"]
self.collection_tweets = self.mongo_client["twitter_log"]["tweets"]
except:
raise Exception("Nao foi possivel se conectar ao B.D.")
print("Conectado a", string_conexao)
def grava_filmes(self, lista_filmes):
#verifica se o filme ja existe
#se nao existir, grava e adiciona a lista de novos filmes
novos = []
try:
for filme in lista_filmes:
if (self.collection_filmes.count_documents({"_id": filme["_id"]}) == 0):
self.collection_filmes.insert_one(filme)
novos.append(filme)
finally:
return novos
def grava_tweet(self, tweet_info):
#grava o retorno dos tweets
self.collection_tweets.insert_one(tweet_info)
| [
"pymongo.MongoClient",
"conf.Configuracoes"
] | [((615, 650), 'pymongo.MongoClient', 'pymongo.MongoClient', (['string_conexao'], {}), '(string_conexao)\n', (634, 650), False, 'import pymongo\n'), ((440, 455), 'conf.Configuracoes', 'Configuracoes', ([], {}), '()\n', (453, 455), False, 'from conf import Configuracoes\n')] |
# Generated by Django 3.1.7 on 2021-03-27 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('HackBitApp', '0002_company_photo'),
]
operations = [
migrations.CreateModel(
name='Roadmap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(db_index=True, max_length=200, unique=True)),
('photo1', models.ImageField(upload_to='photos/company/roadmap')),
('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')),
('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')),
],
options={
'verbose_name': 'roadmap',
'verbose_name_plural': 'roadmaps',
'ordering': ('company_name',),
},
),
]
| [
"django.db.models.ImageField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((329, 422), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (345, 422), False, 'from django.db import migrations, models\n'), ((454, 514), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(200)', 'unique': '(True)'}), '(db_index=True, max_length=200, unique=True)\n', (470, 514), False, 'from django.db import migrations, models\n'), ((544, 597), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""photos/company/roadmap"""'}), "(upload_to='photos/company/roadmap')\n", (561, 597), False, 'from django.db import migrations, models\n'), ((627, 692), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '"""photos/company/roadmap"""'}), "(blank=True, upload_to='photos/company/roadmap')\n", (644, 692), False, 'from django.db import migrations, models\n'), ((722, 787), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '"""photos/company/roadmap"""'}), "(blank=True, upload_to='photos/company/roadmap')\n", (739, 787), False, 'from django.db import migrations, models\n')] |
import json
from wptserve.utils import isomorphic_decode
def main(request, response):
origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none')
if b"check" in request.GET:
token = request.GET.first(b"token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first(b"check", None) == b"keep":
request.server.stash.put(token, value)
body = u"1"
else:
body = u"0"
return [(b"Content-Type", b"text/plain")], body
if origin != b'none':
response.headers.set(b"Access-Control-Allow-Origin", origin)
if b'origin2' in request.GET:
response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2'))
#Preflight
if b'headers' in request.GET:
response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers'))
if b'credentials' in request.GET:
response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials'))
if b'methods' in request.GET:
response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods'))
code_raw = request.GET.first(b'code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == u'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if b'preflight' in request.GET:
code = int(request.GET.first(b'preflight'))
#Log that the preflight actually happened if we have an ident
if b'token' in request.GET:
request.server.stash.put(request.GET[b'token'], True)
if b'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set(b"Location", request.GET.first(b'location'))
headers = {}
for name, values in request.headers.items():
if len(values) == 1:
headers[isomorphic_decode(name)] = isomorphic_decode(values[0])
else:
#I have no idea, really
headers[name] = values
headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))
body = json.dumps(headers)
if code:
return (code, b"StatusText"), [], body
else:
return body
| [
"json.dumps",
"wptserve.utils.isomorphic_decode"
] | [((2260, 2279), 'json.dumps', 'json.dumps', (['headers'], {}), '(headers)\n', (2270, 2279), False, 'import json\n'), ((2049, 2077), 'wptserve.utils.isomorphic_decode', 'isomorphic_decode', (['values[0]'], {}), '(values[0])\n', (2066, 2077), False, 'from wptserve.utils import isomorphic_decode\n'), ((2022, 2045), 'wptserve.utils.isomorphic_decode', 'isomorphic_decode', (['name'], {}), '(name)\n', (2039, 2045), False, 'from wptserve.utils import isomorphic_decode\n')] |
#!/usr/bin/env python
import argparse
DELIMITER = "\t"
def merge(genotypes_filename, gq_filename, merged_filename):
with open(genotypes_filename, "r") as genotypes, open(gq_filename, "r") as gq, open(merged_filename, "w") as merged:
# Integrity check: do the files have same columns?
genotypes_header = genotypes.readline().rstrip().split(DELIMITER)
gq_header = gq.readline().rstrip().split(DELIMITER)
if not genotypes_header == gq_header:
raise ValueError("The files do not have same number/order of columns")
n_cols = len(gq_header)
for genotypes_line, gq_line in zip(genotypes, gq):
x = genotypes_line.rstrip().split(DELIMITER)
y = gq_line.rstrip().split(DELIMITER)
# Check if lines in the files are in the correct order.
if not x[0:4] == y[0:4]:
raise ValueError(f"The lines in the files are not in the same order; "
f"expected the following lines to match.\n{x[0:4]}\n{y[0:4]}")
h = DELIMITER.join(x[0:4])
for i in range(4, n_cols):
merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('genotypes')
parser.add_argument('GQ')
parser.add_argument('fout')
args = parser.parse_args()
merge(args.genotypes, args.GQ, args.fout)
| [
"argparse.ArgumentParser"
] | [((1257, 1360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (1280, 1360), False, 'import argparse\n')] |
import tweepy
import traceback
import time
import pymongo
from tweepy import OAuthHandler
from pymongo import MongoClient
from pymongo.cursor import CursorType
twitter_consumer_key = ""
twitter_consumer_secret = ""
twitter_access_token = ""
twitter_access_secret = ""
auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_secret)
api = tweepy.API(auth)
def crawllTwit(snsname, findtag):
account = snsname
tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended')
snsList = []
snsTime = []
url = []
pic = []
i = 0
for tweet in tweets:
flag = tweet.full_text.find(findtag)
if flag >= 0:
ttp = tweet.full_text.split("https://")
gong = ""
count = 0
for slist in ttp:
if count == (len(ttp) - 1):
break
gong = gong + slist
count += 1
snsList.append(gong)
snsTime.append(tweet.created_at)
tmp = f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}"
url.append(tmp)
i += 1
media = tweet.entities.get('media', [])
if (len(media) > 0):
pic.append(media[0]['media_url'])
else:
pic.append("")
j = 0
while j < len(snsList):
if j == 10:
break
snsList[j] = snsList[j].replace('<', '<')
snsList[j] = snsList[j].replace('>', '>')
snsList[j] = snsList[j].replace('โถ๏ธ', ' โ ')
j += 1
mydb = my_client['TwoRolless']
mycol = mydb['sns']
for k in range(0, len(snsList)):
if k == 15:
break
x = mycol.insert_one(
{
"tag": findtag,
"time": snsTime[k],
"text": snsList[k],
"img": pic[k],
"url": url[k]
}
)
conn_str = ""
my_client = pymongo.MongoClient(conn_str)
if __name__ == '__main__':
while True:
print("cycles start")
mydb = my_client['TwoRolless']
mycol = mydb['sns']
mycol.remove({})
crawllTwit("@m_thelastman", "๋๋ผ์คํธ๋งจ")
crawllTwit("@Musical_NarGold", "๋๋ฅด์น์ค์_๊ณจ๋๋ฌธํธ")
crawllTwit("@rndworks", "๋๋ฐ๋น")
crawllTwit("@ninestory9", "์๋ฆฌํํธ์ก")
crawllTwit("@companyrang", "์ฟ ๋ก์ด์ ํ์๋๊ฐ์ด๊ณ ์์๊น")
crawllTwit("@companyrang", "๋์์ด๋ค")
crawllTwit("@page1company", "๊ณคํฌ๋ชจ๋ก์ฐ")
crawllTwit("@HONGcompany", "๋๋ชจ๋จผํธ")
crawllTwit("@orchardmusical", "์น ์น ")
crawllTwit("@livecorp2011", "ํฌ๋ ํฐ")
crawllTwit("@shownote", "์ ํ๋งจ์ค๊ฐ์ด๋")
crawllTwit("@od_musical", "์งํฌ์คํ์ด๋")
crawllTwit("@kontentz", "์๋ฉ๋
ธํธ")
crawllTwit("@i_seensee", "๋น๋ฆฌ")
crawllTwit("@doublek_ent", "์ํ์ฒ ๋์")
crawllTwit("@Insight_Since96", "๋ฑํ์ด์ด์๋")
print("cycle end")
print("sleep 30 seconds")
time.sleep(30)
print("sleep end")
| [
"pymongo.MongoClient",
"tweepy.API",
"time.sleep",
"tweepy.OAuthHandler"
] | [((277, 336), 'tweepy.OAuthHandler', 'OAuthHandler', (['twitter_consumer_key', 'twitter_consumer_secret'], {}), '(twitter_consumer_key, twitter_consumer_secret)\n', (289, 336), False, 'from tweepy import OAuthHandler\n'), ((410, 426), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (420, 426), False, 'import tweepy\n'), ((2075, 2104), 'pymongo.MongoClient', 'pymongo.MongoClient', (['conn_str'], {}), '(conn_str)\n', (2094, 2104), False, 'import pymongo\n'), ((3049, 3063), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (3059, 3063), False, 'import time\n')] |
#!/usr/bin/env python
"""Handles Earth Engine service account configuration."""
import ee
# The service account email address authorized by your Google contact.
# Set up a service account as described in the README.
EE_ACCOUNT = '<EMAIL>'
# The private key associated with your service account in Privacy Enhanced
# Email format (.pem suffix). To convert a private key from the RSA format
# (.p12 suffix) to .pem, run the openssl command like this:
# openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem
EE_PRIVATE_KEY_FILE = 'privatekey.pem'
EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)
| [
"ee.ServiceAccountCredentials"
] | [((590, 651), 'ee.ServiceAccountCredentials', 'ee.ServiceAccountCredentials', (['EE_ACCOUNT', 'EE_PRIVATE_KEY_FILE'], {}), '(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)\n', (618, 651), False, 'import ee\n')] |
import os
from graphene_sqlalchemy import SQLAlchemyObjectType
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
POSTGRES_CONNECTION_STRING = (
os.environ.get("POSTGRES_CONNECTION_STRING")
or "postgres://postgres:password@localhost:6432/postgres"
)
engine = create_engine(POSTGRES_CONNECTION_STRING, convert_unicode=True)
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
Base = declarative_base()
Base.query = db_session.query_property()
class UserModel(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String)
balance = Column(Integer)
class MinAmountModel(Base):
__tablename__ = "min_amount"
amount = Column(Integer, primary_key=True)
class User(SQLAlchemyObjectType):
class Meta:
model = UserModel
class MinAmount(SQLAlchemyObjectType):
class Meta:
model = MinAmountModel
| [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"os.environ.get",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
] | [((392, 455), 'sqlalchemy.create_engine', 'create_engine', (['POSTGRES_CONNECTION_STRING'], {'convert_unicode': '(True)'}), '(POSTGRES_CONNECTION_STRING, convert_unicode=True)\n', (405, 455), False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((559, 577), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (575, 577), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((273, 317), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_CONNECTION_STRING"""'], {}), "('POSTGRES_CONNECTION_STRING')\n", (287, 317), False, 'import os\n'), ((489, 549), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'autocommit': '(False)', 'autoflush': '(False)', 'bind': 'engine'}), '(autocommit=False, autoflush=False, bind=engine)\n', (501, 549), False, 'from sqlalchemy.orm import scoped_session, sessionmaker\n'), ((681, 714), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (687, 714), False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((726, 740), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (732, 740), False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((755, 770), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (761, 770), False, 'from sqlalchemy import Column, Integer, String, create_engine\n'), ((847, 880), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (853, 880), False, 'from sqlalchemy import Column, Integer, String, create_engine\n')] |
# microsig
"""
Author: <NAME>
More detail about the MicroSIG can be found at:
Website:
https://gitlab.com/defocustracking/microsig-python
Publication:
Rossi M, Synthetic image generator for defocusing and astigmatic PIV/PTV, Meas. Sci. Technol., 31, 017003 (2020)
DOI:10.1088/1361-6501/ab42bb.
"""
import numpy as np
import imageio
import tkinter as tk
import os
from os import listdir
from os.path import isfile, basename, join, isdir
import sys
import glob
# import time as tm
from tkinter import filedialog
# ----- code adapted by <NAME> ------
# 2.0 define class
class CurlypivMicrosigCollection(object):
def __init__(self, testSetup, synCol, use_gui=False,
use_internal_setting=False, setting_file=None,
use_internal_data=False, data_files=None,
to_internal_sequence=False, destination_folder=None,
output_dtype='np.uint16'):
if not isinstance(testSetup, object):
raise ValueError("{} must be a CurlypivTestSetup class object".format(testSetup))
if not isinstance(synCol, object):
raise ValueError("{} must be a CurlypivSyntheticCollection class object".format(synCol))
valid_output_dtype = ['np.uint16', 'np.uint8']
if output_dtype not in valid_output_dtype:
raise ValueError("{} must be one of {}".format(output_dtype, valid_output_dtype))
self.testSetup = testSetup
self.synCol = synCol
self.use_gui = use_gui
self.output_dtype = output_dtype
if self.use_gui:
run()
else:
if use_internal_setting:
self.setting_file = self.synCol.microsigSetup
else:
if not isinstance(setting_file, str):
raise ValueError("{} must be a filepath to microsig settings text file".format(setting_file))
self.setting_file = os.path.abspath(setting_file)
if use_internal_data:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(data_files, str):
raise ValueError("{} must be a filepath to particle location text files".format(data_files))
all_files = glob.glob(data_files + '/*.txt')
save_files = []
for ff in [f for f in all_files if f.endswith('.txt')]:
save_files.append(ff)
save_files.sort()
self.data_files = save_files
if to_internal_sequence:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(destination_folder, str):
raise ValueError("{} must be a filepath to write output images".format(destination_folder))
self.destination_folder = os.path.abspath(destination_folder)
self.generate()
def generate(self):
# %%
mic = {}
f = open(self.setting_file)
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(self.data_files)
for data in self.data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
if self.output_dtype == 'np.uint16':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint16(I))
elif self.output_dtype == 'np.uint8':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint8(I))
print('done!')
# %%
def sorter(f):
sorting = int(f[:-4])
return sorting
def run():
# %%
root = tk.Tk()
root.attributes('-topmost', True)
root.withdraw()
setting_file = filedialog.askopenfilenames(
title="Select settings file", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
data_files = filedialog.askopenfilenames(
title="Select data file(s)", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
destination_folder = filedialog.askdirectory(
title="Select destination file", parent=root)
if not setting_file:
sys.exit('input file not valid')
# %%
mic = {}
f = open(setting_file[0])
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(data_files)
for data in data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
print('done!')
# %%
def take_image(mic, P):
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']));
dp_s = np.unique(P[:, 3])
if P.shape[1] == 5 or P.shape[1] == 8:
k_id = P[:, -1]
else:
k_id = np.ones(P.shape[0])
if P.shape[1] <= 5 and dp_s.size == 1:
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(dp_s * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(dp_s, n_points, mic['n_rays'])
for ii in range(0, P.shape[0]):
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] <= 5 and dp_s.size != 1:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(P[ii, 3], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] >= 7:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
ecc = P[ii, 4]
if ecc > 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc / np.sqrt(1 - 1 / ecc ** 2)
* np.arcsin(np.sqrt(1 - 1 / ecc ** 2)))
n_points = int(np.round(fact * n_points))
elif ecc < 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc ** 2 / np.sqrt(1 - ecc ** 2)
* np.arctan(np.sqrt(1 - ecc ** 2)))
n_points = int(np.round(fact * n_points))
xp = create_ellipsoid(P[ii, 3:7], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3]);
I = I + Id * k_id[ii]
I = I * mic['gain']
if mic['background_mean'] != 0:
I = I + mic['background_mean']
if mic['background_noise'] != 0:
Irand = np.random.normal(0, mic['background_noise'],
(mic['pixel_dim_y'], mic['pixel_dim_x']))
I = I + np.round(Irand)
# I = np.round(I+random('norm',0,mic.background_noise,...
# mic.pixel_dim_y,mic.pixel_dim_x));
return I
# %%
def image_spherical(mic, xp, P1):
# take image of a particle with a spherical lens
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P1
lens_radius = (np.tan(np.arcsin(mic['numerical_aperture']))
* (1 + 1 / mic['magnification']) * mic['focal_length'])
# distance lens-ccd
dCCD = -mic['focal_length'] * (mic['magnification'] + 1);
# distance particle-lens
dPART = P1[2] + mic['focal_length'] * (1 / mic['magnification'] + 1);
# linear transformation from the object plane to the lens plane
T2 = np.array([[1, 0, dPART, 0],
[0, 1, 0, dPART],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field right before the lens
x = np.linalg.inv(T2) @ xp
# remove rays outside of the lens aperture
ind = x[0, :] ** 2 + x[1, :] ** 2 <= lens_radius ** 2
x = x[:, ind]
# transformation of the light field with spherical lens
a = x[0, :];
b = x[1, :]
c = x[2, :];
d = x[3, :]
# radius of curvature of the lens
rk = mic['focal_length'] * (mic['ri_lens'] / mic['ri_medium'] - 1) * 2
dum = a * 0
# refraction medium-lens
# ray-vector befor lens
Vr = np.vstack((1 + dum, c, d))
Vr = (Vr / np.tile(np.sqrt(sum(Vr ** 2)), (3, 1)))
# normal-vector to the lens surface
Vl = np.vstack((rk + dum, a, b))
Vl = (Vl / np.tile(np.sqrt(sum(Vl ** 2)), (3, 1)))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_medium'] / mic['ri_lens'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector inside the lens
Vr11 = (Vl * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# refraction lens-medium
# normal-vector to the lens surface
Vl2 = np.vstack((Vl[0, :], -Vl[1:, :]))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl2, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl2, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl2, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_lens'] / mic['ri_medium'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector outside the lens
Vr11 = (Vl2 * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# light field after the spherical lens
x[2, :] = Vr[1, :]
x[3, :] = Vr[2, :]
if mic['cyl_focal_length'] == 0:
# linear transformation from the lens plane to the ccd plane
T1 = np.array([[1, 0, -dCCD, 0],
[0, 1, 0, -dCCD],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field at the ccd plane
xs = np.linalg.inv(T1) @ x
else:
# # linear transformation from the lens plane to the cyl_lens plane
T1c = np.array([[1, 0, -dCCD * 1 / 3, 0],
[0, 1, 0, -dCCD * 1 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# # light field at the cylindrical lens plane
xc = np.linalg.inv(T1c) @ x
# # light field after the cylindrical lens plane
Tc = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[-1 / mic['cyl_focal_length'], 0, 1, 0],
[0, 0, 0, 1]])
xc_a = np.linalg.inv(Tc) @ xc
# # light field at the ccd plane
T1 = np.array([[1, 0, -dCCD * 2 / 3, 0],
[0, 1, 0, -dCCD * 2 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]]);
# # light field at the ccd plane
xs = np.linalg.inv(T1) @ xc_a
# transform the position in pixel units
X = np.round(xs[0, :] / mic['pixel_size'] + P1[0])
Y = np.round(xs[1, :] / mic['pixel_size'] + P1[1])
# remove rays outside the CCD
ind = np.all([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'],
X.imag == 0, Y.imag == 0], axis=0)
# count number of rays in each pixel
countXY = np.sort(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])
indi, ia = np.unique(countXY, return_index=True)
nCounts = np.hstack((ia[1:], countXY.size + 1)) - ia
# prepare image
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']))
Ifr = I.flatten('F')
Ifr[indi.astype(int) - 1] = nCounts
I = Ifr.reshape(mic['pixel_dim_y'], mic['pixel_dim_x'], order='F')
return I
# %%
def create_particle(D, Ns, Nr):
R = D / 2
V = spiral_sphere(Ns)
V[0:2, V[0, :] > 0] = -V[0:2, V[0, :] > 0]
x = R * V[0, :]
y = R * V[1, :]
z = R * V[2, :]
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def create_ellipsoid(Deab, Ns, Nr):
D = Deab[0];
ecc = Deab[1]
alpha = Deab[2];
beta = Deab[3]
R = D / 2
V = spiral_sphere(Ns)
V = R * V
V[2, :] = V[2, :] * ecc
R_beta = np.array([[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)]])
R_alpha = np.array([[np.cos(alpha), -np.sin(alpha), 0],
[np.sin(alpha), np.cos(alpha), 0],
[0, 0, 1]])
Vf = R_alpha @ (R_beta @ V)
ii1 = (Vf[1, :] == np.min(Vf[1, :])).nonzero()[0][0]
ii2 = (Vf[1, :] == np.max(Vf[1, :])).nonzero()[0][0]
ii3 = (Vf[2, :] == np.min(Vf[2, :])).nonzero()[0][0]
ii4 = (Vf[2, :] == np.max(Vf[2, :])).nonzero()[0][0]
Vdum = Vf[:, [ii1, ii2, ii3, ii4]]
A = np.c_[Vdum[1, :], Vdum[2, :], np.ones(Vdum.shape[1])]
C, _, _, _ = np.linalg.lstsq(A, Vdum[0, :], rcond=None)
V1dum = C[0] * Vf[1, :] + C[1] * Vf[2, :] + C[2]
ind = (Vf[0, :] - V1dum) < 0
x = Vf[0, ind]
y = Vf[1, ind]
z = Vf[2, ind]
Ns = z.size
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def spiral_sphere(N):
gr = (1 + np.sqrt(5)) / 2 # golden ratio
ga = 2 * np.pi * (1 - 1 / gr) # golden angle
ind_p = np.arange(0, N) # particle (i.e., point sample) index
lat = np.arccos(1 - 2 * ind_p / (
N - 1)) # latitude is defined so that particle index is proportional to surface area between 0 and lat
lon = ind_p * ga # position particles at even intervals along longitude
# Convert from spherical to Cartesian co-ordinates
x = np.sin(lat) * np.cos(lon)
y = np.sin(lat) * np.sin(lon)
z = np.cos(lat)
V = np.vstack((x, y, z))
return V
# %%
if __name__ == '__main__':
run()
| [
"numpy.uint8",
"tkinter.filedialog.askdirectory",
"numpy.arccos",
"numpy.sqrt",
"numpy.hstack",
"numpy.array",
"sys.exit",
"numpy.sin",
"numpy.genfromtxt",
"numpy.arange",
"numpy.cross",
"numpy.sort",
"os.path.split",
"numpy.max",
"tkinter.filedialog.askopenfilenames",
"numpy.vstack",
"numpy.linalg.lstsq",
"numpy.min",
"numpy.round",
"glob.glob",
"numpy.random.normal",
"numpy.tile",
"numpy.arctan",
"numpy.uint16",
"numpy.ones",
"numpy.cos",
"numpy.unique",
"numpy.arcsin",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"tkinter.Tk",
"numpy.linalg.inv",
"numpy.random.uniform",
"os.path.abspath",
"numpy.all"
] | [((4177, 4184), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4182, 4184), True, 'import tkinter as tk\n'), ((4263, 4395), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', ([], {'title': '"""Select settings file"""', 'parent': 'root', 'filetypes': "(('txt files', '*.txt'), ('all files', '*.*'))"}), "(title='Select settings file', parent=root,\n filetypes=(('txt files', '*.txt'), ('all files', '*.*')))\n", (4290, 4395), False, 'from tkinter import filedialog\n'), ((4494, 4625), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', ([], {'title': '"""Select data file(s)"""', 'parent': 'root', 'filetypes': "(('txt files', '*.txt'), ('all files', '*.*'))"}), "(title='Select data file(s)', parent=root,\n filetypes=(('txt files', '*.txt'), ('all files', '*.*')))\n", (4521, 4625), False, 'from tkinter import filedialog\n'), ((4732, 4801), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'title': '"""Select destination file"""', 'parent': 'root'}), "(title='Select destination file', parent=root)\n", (4755, 4801), False, 'from tkinter import filedialog\n'), ((5683, 5733), 'numpy.zeros', 'np.zeros', (["(mic['pixel_dim_y'], mic['pixel_dim_x'])"], {}), "((mic['pixel_dim_y'], mic['pixel_dim_x']))\n", (5691, 5733), True, 'import numpy as np\n'), ((5747, 5765), 'numpy.unique', 'np.unique', (['P[:, 3]'], {}), '(P[:, 3])\n', (5756, 5765), True, 'import numpy as np\n'), ((8732, 8806), 'numpy.array', 'np.array', (['[[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, dPART, 0], [0, 1, 0, dPART], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (8740, 8806), True, 'import numpy as np\n'), ((9382, 9408), 'numpy.vstack', 'np.vstack', (['(1 + dum, c, d)'], {}), '((1 + dum, c, d))\n', (9391, 9408), True, 'import numpy as np\n'), ((9513, 9540), 'numpy.vstack', 'np.vstack', (['(rk + dum, a, b)'], {}), '((rk + dum, a, b))\n', (9522, 9540), True, 'import numpy as np\n'), ((9648, 9682), 'numpy.cross', 'np.cross', (['Vr', 'Vl'], {'axisa': '(0)', 'axisb': '(0)'}), '(Vr, Vl, axisa=0, axisb=0)\n', (9656, 9682), True, 'import numpy as np\n'), ((9850, 9873), 'numpy.sum', 'np.sum', (['(Vr * Vl)'], {'axis': '(0)'}), '(Vr * Vl, axis=0)\n', (9856, 9873), True, 'import numpy as np\n'), ((9899, 9924), 'numpy.sum', 'np.sum', (['(Vr * Vrot)'], {'axis': '(0)'}), '(Vr * Vrot, axis=0)\n', (9905, 9924), True, 'import numpy as np\n'), ((10306, 10339), 'numpy.vstack', 'np.vstack', (['(Vl[0, :], -Vl[1:, :])'], {}), '((Vl[0, :], -Vl[1:, :]))\n', (10315, 10339), True, 'import numpy as np\n'), ((10392, 10427), 'numpy.cross', 'np.cross', (['Vr', 'Vl2'], {'axisa': '(0)', 'axisb': '(0)'}), '(Vr, Vl2, axisa=0, axisb=0)\n', (10400, 10427), True, 'import numpy as np\n'), ((10596, 10620), 'numpy.sum', 'np.sum', (['(Vr * Vl2)'], {'axis': '(0)'}), '(Vr * Vl2, axis=0)\n', (10602, 10620), True, 'import numpy as np\n'), ((10646, 10671), 'numpy.sum', 'np.sum', (['(Vr * Vrot)'], {'axis': '(0)'}), '(Vr * Vrot, axis=0)\n', (10652, 10671), True, 'import numpy as np\n'), ((12418, 12464), 'numpy.round', 'np.round', (["(xs[0, :] / mic['pixel_size'] + P1[0])"], {}), "(xs[0, :] / mic['pixel_size'] + P1[0])\n", (12426, 12464), True, 'import numpy as np\n'), ((12473, 12519), 'numpy.round', 'np.round', (["(xs[1, :] / mic['pixel_size'] + P1[1])"], {}), "(xs[1, :] / mic['pixel_size'] + P1[1])\n", (12481, 12519), True, 'import numpy as np\n'), ((12565, 12676), 'numpy.all', 'np.all', (["[X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'], X.imag == \n 0, Y.imag == 0]"], {'axis': '(0)'}), "([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'], X.\n imag == 0, Y.imag == 0], axis=0)\n", (12571, 12676), True, 'import numpy as np\n'), ((12746, 12797), 'numpy.sort', 'np.sort', (["(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])"], {}), "(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])\n", (12753, 12797), True, 'import numpy as np\n'), ((12813, 12850), 'numpy.unique', 'np.unique', (['countXY'], {'return_index': '(True)'}), '(countXY, return_index=True)\n', (12822, 12850), True, 'import numpy as np\n'), ((12937, 12987), 'numpy.zeros', 'np.zeros', (["(mic['pixel_dim_y'], mic['pixel_dim_x'])"], {}), "((mic['pixel_dim_y'], mic['pixel_dim_x']))\n", (12945, 12987), True, 'import numpy as np\n'), ((13386, 13405), 'numpy.tile', 'np.tile', (['x', '(Nr, 1)'], {}), '(x, (Nr, 1))\n', (13393, 13405), True, 'import numpy as np\n'), ((13414, 13433), 'numpy.tile', 'np.tile', (['y', '(Nr, 1)'], {}), '(y, (Nr, 1))\n', (13421, 13433), True, 'import numpy as np\n'), ((13473, 13513), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', 'z.size'], {}), '(-np.pi, np.pi, z.size)\n', (13490, 13513), True, 'import numpy as np\n'), ((13523, 13534), 'numpy.cos', 'np.cos', (['phs'], {}), '(phs)\n', (13529, 13534), True, 'import numpy as np\n'), ((13544, 13555), 'numpy.sin', 'np.sin', (['phs'], {}), '(phs)\n', (13550, 13555), True, 'import numpy as np\n'), ((14902, 14944), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'Vdum[0, :]'], {'rcond': 'None'}), '(A, Vdum[0, :], rcond=None)\n', (14917, 14944), True, 'import numpy as np\n'), ((15165, 15184), 'numpy.tile', 'np.tile', (['x', '(Nr, 1)'], {}), '(x, (Nr, 1))\n', (15172, 15184), True, 'import numpy as np\n'), ((15193, 15212), 'numpy.tile', 'np.tile', (['y', '(Nr, 1)'], {}), '(y, (Nr, 1))\n', (15200, 15212), True, 'import numpy as np\n'), ((15252, 15292), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', 'z.size'], {}), '(-np.pi, np.pi, z.size)\n', (15269, 15292), True, 'import numpy as np\n'), ((15302, 15313), 'numpy.cos', 'np.cos', (['phs'], {}), '(phs)\n', (15308, 15313), True, 'import numpy as np\n'), ((15323, 15334), 'numpy.sin', 'np.sin', (['phs'], {}), '(phs)\n', (15329, 15334), True, 'import numpy as np\n'), ((15932, 15947), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (15941, 15947), True, 'import numpy as np\n'), ((15997, 16031), 'numpy.arccos', 'np.arccos', (['(1 - 2 * ind_p / (N - 1))'], {}), '(1 - 2 * ind_p / (N - 1))\n', (16006, 16031), True, 'import numpy as np\n'), ((16354, 16365), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (16360, 16365), True, 'import numpy as np\n'), ((16374, 16394), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (16383, 16394), True, 'import numpy as np\n'), ((4443, 4475), 'sys.exit', 'sys.exit', (['"""input file not valid"""'], {}), "('input file not valid')\n", (4451, 4475), False, 'import sys\n'), ((4673, 4705), 'sys.exit', 'sys.exit', (['"""input file not valid"""'], {}), "('input file not valid')\n", (4681, 4705), False, 'import sys\n'), ((4845, 4877), 'sys.exit', 'sys.exit', (['"""input file not valid"""'], {}), "('input file not valid')\n", (4853, 4877), False, 'import sys\n'), ((5327, 5346), 'numpy.genfromtxt', 'np.genfromtxt', (['data'], {}), '(data)\n', (5340, 5346), True, 'import numpy as np\n'), ((5429, 5448), 'os.path.split', 'os.path.split', (['data'], {}), '(data)\n', (5442, 5448), False, 'import os\n'), ((5858, 5877), 'numpy.ones', 'np.ones', (['P.shape[0]'], {}), '(P.shape[0])\n', (5865, 5877), True, 'import numpy as np\n'), ((7801, 7892), 'numpy.random.normal', 'np.random.normal', (['(0)', "mic['background_noise']", "(mic['pixel_dim_y'], mic['pixel_dim_x'])"], {}), "(0, mic['background_noise'], (mic['pixel_dim_y'], mic[\n 'pixel_dim_x']))\n", (7817, 7892), True, 'import numpy as np\n'), ((8913, 8930), 'numpy.linalg.inv', 'np.linalg.inv', (['T2'], {}), '(T2)\n', (8926, 8930), True, 'import numpy as np\n'), ((10199, 10226), 'numpy.tile', 'np.tile', (['Vr11[0, :]', '(3, 1)'], {}), '(Vr11[0, :], (3, 1))\n', (10206, 10226), True, 'import numpy as np\n'), ((10949, 10976), 'numpy.tile', 'np.tile', (['Vr11[0, :]', '(3, 1)'], {}), '(Vr11[0, :], (3, 1))\n', (10956, 10976), True, 'import numpy as np\n'), ((11190, 11264), 'numpy.array', 'np.array', (['[[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, -dCCD, 0], [0, 1, 0, -dCCD], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (11198, 11264), True, 'import numpy as np\n'), ((11519, 11613), 'numpy.array', 'np.array', (['[[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0], [0, 0, 0, 1]\n ]'], {}), '([[1, 0, -dCCD * 1 / 3, 0], [0, 1, 0, -dCCD * 1 / 3], [0, 0, 1, 0],\n [0, 0, 0, 1]])\n', (11527, 11613), True, 'import numpy as np\n'), ((11856, 11954), 'numpy.array', 'np.array', (["[[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, 0], [0, 0,\n 0, 1]]"], {}), "([[1, 0, 0, 0], [0, 1, 0, 0], [-1 / mic['cyl_focal_length'], 0, 1, \n 0], [0, 0, 0, 1]])\n", (11864, 11954), True, 'import numpy as np\n'), ((12118, 12212), 'numpy.array', 'np.array', (['[[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0], [0, 0, 0, 1]\n ]'], {}), '([[1, 0, -dCCD * 2 / 3, 0], [0, 1, 0, -dCCD * 2 / 3], [0, 0, 1, 0],\n [0, 0, 0, 1]])\n', (12126, 12212), True, 'import numpy as np\n'), ((12865, 12902), 'numpy.hstack', 'np.hstack', (['(ia[1:], countXY.size + 1)'], {}), '((ia[1:], countXY.size + 1))\n', (12874, 12902), True, 'import numpy as np\n'), ((13597, 13657), 'numpy.array', 'np.array', (['[[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]'], {}), '([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]])\n', (13605, 13657), True, 'import numpy as np\n'), ((15376, 15436), 'numpy.array', 'np.array', (['[[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]]'], {}), '([[cs[k], -sn[k], 0], [sn[k], cs[k], 0], [0, 0, 1]])\n', (15384, 15436), True, 'import numpy as np\n'), ((16286, 16297), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (16292, 16297), True, 'import numpy as np\n'), ((16300, 16311), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (16306, 16311), True, 'import numpy as np\n'), ((16320, 16331), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (16326, 16331), True, 'import numpy as np\n'), ((16334, 16345), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (16340, 16345), True, 'import numpy as np\n'), ((3513, 3532), 'numpy.genfromtxt', 'np.genfromtxt', (['data'], {}), '(data)\n', (3526, 3532), True, 'import numpy as np\n'), ((3627, 3646), 'os.path.split', 'os.path.split', (['data'], {}), '(data)\n', (3640, 3646), False, 'import os\n'), ((5393, 5406), 'numpy.array', 'np.array', (['[P]'], {}), '([P])\n', (5401, 5406), True, 'import numpy as np\n'), ((5946, 6052), 'numpy.round', 'np.round', (["(mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] / mic[\n 'pixel_size']) ** 2)"], {}), "(mic['points_per_pixel'] * 2 * np.pi * (dp_s * mic['magnification'] /\n mic['pixel_size']) ** 2)\n", (5954, 6052), True, 'import numpy as np\n'), ((7937, 7952), 'numpy.round', 'np.round', (['Irand'], {}), '(Irand)\n', (7945, 7952), True, 'import numpy as np\n'), ((9694, 9730), 'numpy.cross', 'np.cross', (['Vrot', 'Vl'], {'axisa': '(1)', 'axisb': '(0)'}), '(Vrot, Vl, axisa=1, axisb=0)\n', (9702, 9730), True, 'import numpy as np\n'), ((10439, 10476), 'numpy.cross', 'np.cross', (['Vrot', 'Vl2'], {'axisa': '(1)', 'axisb': '(0)'}), '(Vrot, Vl2, axisa=1, axisb=0)\n', (10447, 10476), True, 'import numpy as np\n'), ((11390, 11407), 'numpy.linalg.inv', 'np.linalg.inv', (['T1'], {}), '(T1)\n', (11403, 11407), True, 'import numpy as np\n'), ((11756, 11774), 'numpy.linalg.inv', 'np.linalg.inv', (['T1c'], {}), '(T1c)\n', (11769, 11774), True, 'import numpy as np\n'), ((12034, 12051), 'numpy.linalg.inv', 'np.linalg.inv', (['Tc'], {}), '(Tc)\n', (12047, 12051), True, 'import numpy as np\n'), ((12340, 12357), 'numpy.linalg.inv', 'np.linalg.inv', (['T1'], {}), '(T1)\n', (12353, 12357), True, 'import numpy as np\n'), ((14861, 14883), 'numpy.ones', 'np.ones', (['Vdum.shape[1]'], {}), '(Vdum.shape[1])\n', (14868, 14883), True, 'import numpy as np\n'), ((15837, 15847), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (15844, 15847), True, 'import numpy as np\n'), ((1924, 1953), 'os.path.abspath', 'os.path.abspath', (['setting_file'], {}), '(setting_file)\n', (1939, 1953), False, 'import os\n'), ((2286, 2318), 'glob.glob', 'glob.glob', (["(data_files + '/*.txt')"], {}), "(data_files + '/*.txt')\n", (2295, 2318), False, 'import glob\n'), ((2899, 2934), 'os.path.abspath', 'os.path.abspath', (['destination_folder'], {}), '(destination_folder)\n', (2914, 2934), False, 'import os\n'), ((3587, 3600), 'numpy.array', 'np.array', (['[P]'], {}), '([P])\n', (3595, 3600), True, 'import numpy as np\n'), ((8352, 8388), 'numpy.arcsin', 'np.arcsin', (["mic['numerical_aperture']"], {}), "(mic['numerical_aperture'])\n", (8361, 8388), True, 'import numpy as np\n'), ((10026, 10044), 'numpy.arctan', 'np.arctan', (['(vy / vx)'], {}), '(vy / vx)\n', (10035, 10044), True, 'import numpy as np\n'), ((10109, 10121), 'numpy.cos', 'np.cos', (['th11'], {}), '(th11)\n', (10115, 10121), True, 'import numpy as np\n'), ((10160, 10172), 'numpy.sin', 'np.sin', (['th11'], {}), '(th11)\n', (10166, 10172), True, 'import numpy as np\n'), ((10774, 10792), 'numpy.arctan', 'np.arctan', (['(vy / vx)'], {}), '(vy / vx)\n', (10783, 10792), True, 'import numpy as np\n'), ((10859, 10871), 'numpy.cos', 'np.cos', (['th11'], {}), '(th11)\n', (10865, 10871), True, 'import numpy as np\n'), ((10910, 10922), 'numpy.sin', 'np.sin', (['th11'], {}), '(th11)\n', (10916, 10922), True, 'import numpy as np\n'), ((14242, 14254), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (14248, 14254), True, 'import numpy as np\n'), ((14259, 14271), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (14265, 14271), True, 'import numpy as np\n'), ((14350, 14362), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (14356, 14362), True, 'import numpy as np\n'), ((14391, 14404), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (14397, 14404), True, 'import numpy as np\n'), ((14451, 14464), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (14457, 14464), True, 'import numpy as np\n'), ((14466, 14479), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (14472, 14479), True, 'import numpy as np\n'), ((3764, 3820), 'os.path.join', 'os.path.join', (['self.destination_folder', "(tail[:-3] + 'tif')"], {}), "(self.destination_folder, tail[:-3] + 'tif')\n", (3776, 3820), False, 'import os\n'), ((3852, 3864), 'numpy.uint16', 'np.uint16', (['I'], {}), '(I)\n', (3861, 3864), True, 'import numpy as np\n'), ((6385, 6496), 'numpy.round', 'np.round', (["(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] /\n mic['pixel_size']) ** 2)"], {}), "(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic[\n 'magnification'] / mic['pixel_size']) ** 2)\n", (6393, 6496), True, 'import numpy as np\n'), ((14333, 14345), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (14339, 14345), True, 'import numpy as np\n'), ((14407, 14420), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (14413, 14420), True, 'import numpy as np\n'), ((3948, 4004), 'os.path.join', 'os.path.join', (['self.destination_folder', "(tail[:-3] + 'tif')"], {}), "(self.destination_folder, tail[:-3] + 'tif')\n", (3960, 4004), False, 'import os\n'), ((4036, 4047), 'numpy.uint8', 'np.uint8', (['I'], {}), '(I)\n', (4044, 4047), True, 'import numpy as np\n'), ((6782, 6893), 'numpy.round', 'np.round', (["(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic['magnification'] /\n mic['pixel_size']) ** 2)"], {}), "(mic['points_per_pixel'] * 2 * np.pi * (P[ii, 3] * mic[\n 'magnification'] / mic['pixel_size']) ** 2)\n", (6790, 6893), True, 'import numpy as np\n'), ((14578, 14594), 'numpy.min', 'np.min', (['Vf[1, :]'], {}), '(Vf[1, :])\n', (14584, 14594), True, 'import numpy as np\n'), ((14635, 14651), 'numpy.max', 'np.max', (['Vf[1, :]'], {}), '(Vf[1, :])\n', (14641, 14651), True, 'import numpy as np\n'), ((14692, 14708), 'numpy.min', 'np.min', (['Vf[2, :]'], {}), '(Vf[2, :])\n', (14698, 14708), True, 'import numpy as np\n'), ((14749, 14765), 'numpy.max', 'np.max', (['Vf[2, :]'], {}), '(Vf[2, :])\n', (14755, 14765), True, 'import numpy as np\n'), ((7193, 7218), 'numpy.round', 'np.round', (['(fact * n_points)'], {}), '(fact * n_points)\n', (7201, 7218), True, 'import numpy as np\n'), ((7458, 7483), 'numpy.round', 'np.round', (['(fact * n_points)'], {}), '(fact * n_points)\n', (7466, 7483), True, 'import numpy as np\n'), ((7064, 7089), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / ecc ** 2)'], {}), '(1 - 1 / ecc ** 2)\n', (7071, 7089), True, 'import numpy as np\n'), ((7134, 7159), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / ecc ** 2)'], {}), '(1 - 1 / ecc ** 2)\n', (7141, 7159), True, 'import numpy as np\n'), ((7337, 7358), 'numpy.sqrt', 'np.sqrt', (['(1 - ecc ** 2)'], {}), '(1 - ecc ** 2)\n', (7344, 7358), True, 'import numpy as np\n'), ((7403, 7424), 'numpy.sqrt', 'np.sqrt', (['(1 - ecc ** 2)'], {}), '(1 - ecc ** 2)\n', (7410, 7424), True, 'import numpy as np\n')] |
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django import forms
class UploadFileForm(forms.Form):
title = forms.CharField(max_length=50)
file = forms.FileField()
# If you don't do this you cannot use Bootstrap CSS
class LoginForm(AuthenticationForm):
username = forms.CharField(label="Username", max_length=16,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'}))
password = forms.CharField(label="Password", max_length=16,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password'}))
class SignUpForm(UserCreationForm):
full_name = forms.CharField(label="Full Name", max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'full_name'}))
email = forms.EmailField(label = "Email", max_length =50, widget=forms.EmailInput(attrs={'class': 'form-control', 'name': 'email'}))
class Meta:
model = User
fields = ("email", "full_name", "username", "<PASSWORD>", "<PASSWORD>")
def save(self, commit=True):
user = super(SignUpForm, self).save(commit=False)
user.full_name = self.cleaned_data["full_name"]
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class EmailSignupForm(UserCreationForm):
full_name = forms.CharField(label="Full Name", max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'full_name'}))
class Meta:
model = User
fields = ("full_name", "username", "<PASSWORD>", "<PASSWORD>")
def save(self, commit=True):
user = super(EmailSignupForm, self).save(commit=False)
user.full_name = self.cleaned_data["full_name"]
if commit:
user.save()
return user
class ChangePasswordForm(forms.Form):
security_code = forms.CharField(label="Security Code", max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'security_code'}))
password1 = forms.CharField(label="New Password", max_length=16,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password1'}))
password2 = forms.CharField(label="Re-enter New Password", max_length=16,
widget=forms.PasswordInput(attrs={'class': 'form-control', 'name': 'password2'}))
class Meta:
fields = ("security_code", "password1", "password2") | [
"django.forms.CharField",
"django.forms.PasswordInput",
"django.forms.EmailInput",
"django.forms.TextInput",
"django.forms.FileField"
] | [((192, 222), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (207, 222), False, 'from django import forms\n'), ((234, 251), 'django.forms.FileField', 'forms.FileField', ([], {}), '()\n', (249, 251), False, 'from django import forms\n'), ((445, 513), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'name': 'username'}"}), "(attrs={'class': 'form-control', 'name': 'username'})\n", (460, 513), False, 'from django import forms\n'), ((617, 689), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'form-control', 'name': 'password'}"}), "(attrs={'class': 'form-control', 'name': 'password'})\n", (636, 689), False, 'from django import forms\n'), ((835, 904), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'name': 'full_name'}"}), "(attrs={'class': 'form-control', 'name': 'full_name'})\n", (850, 904), False, 'from django import forms\n'), ((976, 1042), 'django.forms.EmailInput', 'forms.EmailInput', ([], {'attrs': "{'class': 'form-control', 'name': 'email'}"}), "(attrs={'class': 'form-control', 'name': 'email'})\n", (992, 1042), False, 'from django import forms\n'), ((1570, 1639), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'name': 'full_name'}"}), "(attrs={'class': 'form-control', 'name': 'full_name'})\n", (1585, 1639), False, 'from django import forms\n'), ((2119, 2192), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'name': 'security_code'}"}), "(attrs={'class': 'form-control', 'name': 'security_code'})\n", (2134, 2192), False, 'from django import forms\n'), ((2302, 2375), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'form-control', 'name': 'password1'}"}), "(attrs={'class': 'form-control', 'name': 'password1'})\n", (2321, 2375), False, 'from django import forms\n'), ((2493, 2566), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'form-control', 'name': 'password2'}"}), "(attrs={'class': 'form-control', 'name': 'password2'})\n", (2512, 2566), False, 'from django import forms\n')] |
import pandas as pd
from pandas import DataFrame
df = pd.read_csv('sp500_ohlc.csv', index_col = 'Date', parse_dates=True)
df['H-L'] = df.High - df.Low
# Giving us count (rows), mean (avg), std (standard deviation for the entire
# set), minimum for the set, maximum for the set, and some %s in that range.
print( df.describe())
x = input('enter to cont')
# gives us correlation data. Remember the 3d chart we plotted?
# now you can see if correlation of H-L and Volume also is correlated
# with price swings. Correlations for your correlations
print( df.corr())
x = input('enter to cont')
# covariance... now plenty of people know what correlation is, but what in the
# heck is covariance.
# Let's defined the two.
# covariance is the measure of how two variables change together.
# correlation is the measure of how two variables move in relation to eachother.
# so covariance is a more direct assessment of the relationship between two variables.
# Maybe a better way to put it is that covariance is the measure of the strength of correlation.
print( df.cov())
x = input('enter to cont')
print( df[['Volume','H-L']].corr())
x = input('enter to cont')
# see how it makes a table?
# so now, we can actually perform a service that some people actually pay for
# I once had a short freelance gig doing this
# so a popular form of analysis within especially forex is to compare correlations between
# the currencies. The idea here is that you pace one currency with another.
#
import datetime
import pandas.io.data
C = pd.io.data.get_data_yahoo('C',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
AAPL = pd.io.data.get_data_yahoo('AAPL',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
MSFT = pd.io.data.get_data_yahoo('MSFT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TSLA = pd.io.data.get_data_yahoo('TSLA',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
print( C.head())
x = input('enter to cont')
del C['Open']
# , 'high', 'low', 'close', 'volume'
del C['High']
del C['Low']
del C['Close']
del C['Volume']
corComp = C
corComp.rename(columns={'Adj Close': 'C'}, inplace=True)
corComp['AAPL'] = AAPL['Adj Close']
corComp['MSFT'] = MSFT['Adj Close']
corComp['TSLA'] = TSLA['Adj Close']
print( corComp.head())
x = input('enter to cont')
print( corComp.corr())
x = input('enter to cont')
C = pd.io.data.get_data_yahoo('C',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
AAPL = pd.io.data.get_data_yahoo('AAPL',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
MSFT = pd.io.data.get_data_yahoo('MSFT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TSLA = pd.io.data.get_data_yahoo('TSLA',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
BAC = pd.io.data.get_data_yahoo('BAC',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
BBRY = pd.io.data.get_data_yahoo('BBRY',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
CMG = pd.io.data.get_data_yahoo('CMG',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
EBAY = pd.io.data.get_data_yahoo('EBAY',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
JPM = pd.io.data.get_data_yahoo('JPM',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
SBUX = pd.io.data.get_data_yahoo('SBUX',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
TGT = pd.io.data.get_data_yahoo('TGT',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
WFC = pd.io.data.get_data_yahoo('WFC',
start=datetime.datetime(2011, 10, 1),
end=datetime.datetime(2014, 1, 1))
x = input('enter to cont')
print( C.head())
del C['Open']
# , 'high', 'low', 'close', 'volume'
del C['High']
del C['Low']
del C['Close']
del C['Volume']
corComp = C
corComp.rename(columns={'Adj Close': 'C'}, inplace=True)
corComp['BAC'] = BAC['Adj Close']
corComp['MSFT'] = MSFT['Adj Close']
corComp['TSLA'] = TSLA['Adj Close']
corComp['AAPL'] = AAPL['Adj Close']
corComp['BBRY'] = BBRY['Adj Close']
corComp['CMG'] = CMG['Adj Close']
corComp['EBAY'] = EBAY['Adj Close']
corComp['JPM'] = JPM['Adj Close']
corComp['SBUX'] = SBUX['Adj Close']
corComp['TGT'] = TGT['Adj Close']
corComp['WFC'] = WFC['Adj Close']
print( corComp.head())
x = input('enter to cont')
print( corComp.corr())
x = input('enter to cont')
fancy = corComp.corr()
fancy.to_csv('bigmoney.csv')
| [
"datetime.datetime",
"pandas.read_csv"
] | [((55, 120), 'pandas.read_csv', 'pd.read_csv', (['"""sp500_ohlc.csv"""'], {'index_col': '"""Date"""', 'parse_dates': '(True)'}), "('sp500_ohlc.csv', index_col='Date', parse_dates=True)\n", (66, 120), True, 'import pandas as pd\n'), ((1602, 1632), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (1619, 1632), False, 'import datetime\n'), ((1672, 1701), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (1689, 1701), False, 'import datetime\n'), ((1784, 1814), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (1801, 1814), False, 'import datetime\n'), ((1854, 1883), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (1871, 1883), False, 'import datetime\n'), ((1966, 1996), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (1983, 1996), False, 'import datetime\n'), ((2036, 2065), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (2053, 2065), False, 'import datetime\n'), ((2148, 2178), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (2165, 2178), False, 'import datetime\n'), ((2218, 2247), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (2235, 2247), False, 'import datetime\n'), ((2768, 2798), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (2785, 2798), False, 'import datetime\n'), ((2838, 2867), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (2855, 2867), False, 'import datetime\n'), ((2950, 2980), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (2967, 2980), False, 'import datetime\n'), ((3020, 3049), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (3037, 3049), False, 'import datetime\n'), ((3132, 3162), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (3149, 3162), False, 'import datetime\n'), ((3202, 3231), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (3219, 3231), False, 'import datetime\n'), ((3314, 3344), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (3331, 3344), False, 'import datetime\n'), ((3384, 3413), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (3401, 3413), False, 'import datetime\n'), ((3494, 3524), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (3511, 3524), False, 'import datetime\n'), ((3564, 3593), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (3581, 3593), False, 'import datetime\n'), ((3676, 3706), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (3693, 3706), False, 'import datetime\n'), ((3746, 3775), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (3763, 3775), False, 'import datetime\n'), ((3856, 3886), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (3873, 3886), False, 'import datetime\n'), ((3926, 3955), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (3943, 3955), False, 'import datetime\n'), ((4038, 4068), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (4055, 4068), False, 'import datetime\n'), ((4108, 4137), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (4125, 4137), False, 'import datetime\n'), ((4218, 4248), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (4235, 4248), False, 'import datetime\n'), ((4288, 4317), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (4305, 4317), False, 'import datetime\n'), ((4400, 4430), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (4417, 4430), False, 'import datetime\n'), ((4470, 4499), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (4487, 4499), False, 'import datetime\n'), ((4580, 4610), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (4597, 4610), False, 'import datetime\n'), ((4650, 4679), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (4667, 4679), False, 'import datetime\n'), ((4760, 4790), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(10)', '(1)'], {}), '(2011, 10, 1)\n', (4777, 4790), False, 'import datetime\n'), ((4830, 4859), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(1)'], {}), '(2014, 1, 1)\n', (4847, 4859), False, 'import datetime\n')] |
import cv2
import numpy as np
import threading
def test():
while 1:
img1=cv2.imread('captured car1.jpg')
print("{}".format(img1.shape))
print("{}".format(img1))
cv2.imshow('asd',img1)
cv2.waitKey(1)
t1 = threading.Thread(target=test)
t1.start()
| [
"threading.Thread",
"cv2.waitKey",
"cv2.imread",
"cv2.imshow"
] | [((250, 279), 'threading.Thread', 'threading.Thread', ([], {'target': 'test'}), '(target=test)\n', (266, 279), False, 'import threading\n'), ((86, 117), 'cv2.imread', 'cv2.imread', (['"""captured car1.jpg"""'], {}), "('captured car1.jpg')\n", (96, 117), False, 'import cv2\n'), ((198, 221), 'cv2.imshow', 'cv2.imshow', (['"""asd"""', 'img1'], {}), "('asd', img1)\n", (208, 221), False, 'import cv2\n'), ((229, 243), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (240, 243), False, 'import cv2\n')] |
# Copyright 2013 Cloudbase Solutions Srl
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from ceilometer.compute.virt import inspector
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
def __init__(self, host='.'):
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._init_cimv2_wmi_conn(host)
self._host_cpu_info = None
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _init_cimv2_wmi_conn(self, host):
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def get_host_cpu_info(self):
if not self._host_cpu_info:
host_cpus = self._conn_cimv2.Win32_Processor()
self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus))
return self._host_cpu_info
def get_all_vms(self):
vms = [(v.ElementName, v.Name) for v in
self._conn.Msvm_ComputerSystem(['ElementName', 'Name'],
Caption="Virtual Machine")]
return vms
def get_cpu_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0]
cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME)
cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def)
cpu_used = 0
if cpu_metric_aggr:
cpu_used = long(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
long(vm.OnTimeInMilliseconds))
def get_vnic_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC)
vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT)
metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME)
metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME)
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
metric_value_instances = self._get_metric_value_instances(
port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
self._BASE_METRICS_VALUE)
metric_values = self._sum_metric_values_by_defs(
metric_value_instances, [metric_def_in, metric_def_out])
yield {
'rx_mb': metric_values[0],
'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
def get_disk_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME)
metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_r, metric_def_w])
# Thi sis e.g. the VHD file location
if disk.HostResource:
host_resource = disk.HostResource[0]
yield {
# Values are in megabytes
'read_mb': metric_values[0],
'write_mb': metric_values[1],
'instance_id': disk.InstanceID,
'host_resource': host_resource
}
def _sum_metric_values(self, metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += long(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
metrics = self._filter_metrics(element_metrics, metric_def)
metric_values.append(self._sum_metric_values(metrics))
else:
# In case the metric is not defined on this host
metric_values.append(0)
return metric_values
def _get_metric_value_instances(self, elements, result_class):
instances = []
for el in elements:
associators = el.associators(wmi_result_class=result_class)
if associators:
instances.append(associators[0])
return instances
def _get_metric_values(self, element, metric_defs):
element_metrics = element.associators(
wmi_association_class=self._METRICS_ME)
return self._sum_metric_values_by_defs(element_metrics, metric_defs)
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
raise inspector.InstanceNotFoundException(
_('VM %s not found on Hyper-V') % vm_name)
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def _get_metrics(self, element, metric_def):
return self._filter_metrics(
element.associators(
wmi_association_class=self._METRICS_ME), metric_def)
def _filter_metrics(self, all_metrics, metric_def):
return [v for v in all_metrics if
v.MetricDefinitionId == metric_def.Id]
def _get_metric_def(self, metric_def):
metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def)
if metric:
return metric[0]
def _get_vm_setting_data(self, vm):
vm_settings = vm.associators(
wmi_result_class=self._VS_SETTING_DATA)
# Avoid snapshots
return [s for s in vm_settings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_vm_resources(self, vm, resource_class):
setting_data = self._get_vm_setting_data(vm)
return setting_data.associators(wmi_result_class=resource_class)
| [
"ceilometer.openstack.common.log.getLogger",
"ceilometer.openstack.common.gettextutils._",
"wmi.WMI"
] | [((1068, 1095), 'ceilometer.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1085, 1095), True, 'from ceilometer.openstack.common import log as logging\n'), ((2286, 2339), 'wmi.WMI', 'wmi.WMI', ([], {'moniker': "('//%s/root/virtualization/v2' % host)"}), "(moniker='//%s/root/virtualization/v2' % host)\n", (2293, 2339), False, 'import wmi\n'), ((2410, 2451), 'wmi.WMI', 'wmi.WMI', ([], {'moniker': "('//%s/root/cimv2' % host)"}), "(moniker='//%s/root/cimv2' % host)\n", (2417, 2451), False, 'import wmi\n'), ((6643, 6674), 'ceilometer.openstack.common.gettextutils._', '_', (['"""VM %s not found on Hyper-V"""'], {}), "('VM %s not found on Hyper-V')\n", (6644, 6674), False, 'from ceilometer.openstack.common.gettextutils import _\n'), ((6740, 6772), 'ceilometer.openstack.common.gettextutils._', '_', (['"""Duplicate VM name found: %s"""'], {}), "('Duplicate VM name found: %s')\n", (6741, 6772), False, 'from ceilometer.openstack.common.gettextutils import _\n')] |
# system
from io import IOBase, StringIO
import os
# 3rd party
import click
# internal
from days import DayFactory
# import logging
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
# ch = logging.StreamHandler()
# logger.addHandler(ch)
@click.group(invoke_without_command=True)
@click.option('-d', '--day', required=True, type=click.IntRange(1, 31), metavar="<1..31>", help="Day you want to select.")
@click.option('-p', '--puzzle', required=True, type=click.IntRange(1, 2), metavar="<1|2>", help="Puzzle you want to run.")
@click.option('-i', '--input', required=True, type=click.Path(exists=True), help="Path to puzzle data.")
def cli(day: int, puzzle: int, input: str):
filename = os.path.join(input, f"{day:02}_puzzle_{puzzle}.txt")
if os.path.exists(filename):
input_stream = open(filename, "r")
else:
input_stream = StringIO('')
avocd = DayFactory(day, input_stream)
try:
print(avocd.run(puzzle))
except NotImplementedError:
print(f"Puzzle {puzzle} for day {day} not implemented.")
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
cli()
| [
"os.path.exists",
"click.IntRange",
"click.group",
"os.path.join",
"days.DayFactory",
"click.Path",
"io.StringIO"
] | [((266, 306), 'click.group', 'click.group', ([], {'invoke_without_command': '(True)'}), '(invoke_without_command=True)\n', (277, 306), False, 'import click\n'), ((715, 767), 'os.path.join', 'os.path.join', (['input', 'f"""{day:02}_puzzle_{puzzle}.txt"""'], {}), "(input, f'{day:02}_puzzle_{puzzle}.txt')\n", (727, 767), False, 'import os\n'), ((773, 797), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (787, 797), False, 'import os\n'), ((888, 917), 'days.DayFactory', 'DayFactory', (['day', 'input_stream'], {}), '(day, input_stream)\n', (898, 917), False, 'from days import DayFactory\n'), ((865, 877), 'io.StringIO', 'StringIO', (['""""""'], {}), "('')\n", (873, 877), False, 'from io import IOBase, StringIO\n'), ((356, 377), 'click.IntRange', 'click.IntRange', (['(1)', '(31)'], {}), '(1, 31)\n', (370, 377), False, 'import click\n'), ((482, 502), 'click.IntRange', 'click.IntRange', (['(1)', '(2)'], {}), '(1, 2)\n', (496, 502), False, 'import click\n'), ((604, 627), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (614, 627), False, 'import click\n')] |
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in viscm
parameters = {'xp': [-5.4895292543686764, 14.790571669586654, 82.5546687431056, 29.15531114139253, -4.1316769886951761, -13.002076438907238],
'yp': [-35.948168839230306, -42.273376159885785, -28.845467523197698, 52.03426124197, 36.832712600868973, 40.792291220556734],
'min_JK': 16.8314150305,
'max_JK': 95}
cm_data = [[ 5.03832136e-02, 2.98028976e-02, 5.27974883e-01],
[ 6.35363639e-02, 2.84259729e-02, 5.33123681e-01],
[ 7.53531234e-02, 2.72063728e-02, 5.38007001e-01],
[ 8.62217979e-02, 2.61253206e-02, 5.42657691e-01],
[ 9.63786097e-02, 2.51650976e-02, 5.47103487e-01],
[ 1.05979704e-01, 2.43092436e-02, 5.51367851e-01],
[ 1.15123641e-01, 2.35562500e-02, 5.55467728e-01],
[ 1.23902903e-01, 2.28781011e-02, 5.59423480e-01],
[ 1.32380720e-01, 2.22583774e-02, 5.63250116e-01],
[ 1.40603076e-01, 2.16866674e-02, 5.66959485e-01],
[ 1.48606527e-01, 2.11535876e-02, 5.70561711e-01],
[ 1.56420649e-01, 2.06507174e-02, 5.74065446e-01],
[ 1.64069722e-01, 2.01705326e-02, 5.77478074e-01],
[ 1.71573925e-01, 1.97063415e-02, 5.80805890e-01],
[ 1.78950212e-01, 1.92522243e-02, 5.84054243e-01],
[ 1.86212958e-01, 1.88029767e-02, 5.87227661e-01],
[ 1.93374449e-01, 1.83540593e-02, 5.90329954e-01],
[ 2.00445260e-01, 1.79015512e-02, 5.93364304e-01],
[ 2.07434551e-01, 1.74421086e-02, 5.96333341e-01],
[ 2.14350298e-01, 1.69729276e-02, 5.99239207e-01],
[ 2.21196750e-01, 1.64970484e-02, 6.02083323e-01],
[ 2.27982971e-01, 1.60071509e-02, 6.04867403e-01],
[ 2.34714537e-01, 1.55015065e-02, 6.07592438e-01],
[ 2.41396253e-01, 1.49791041e-02, 6.10259089e-01],
[ 2.48032377e-01, 1.44393586e-02, 6.12867743e-01],
[ 2.54626690e-01, 1.38820918e-02, 6.15418537e-01],
[ 2.61182562e-01, 1.33075156e-02, 6.17911385e-01],
[ 2.67702993e-01, 1.27162163e-02, 6.20345997e-01],
[ 2.74190665e-01, 1.21091423e-02, 6.22721903e-01],
[ 2.80647969e-01, 1.14875915e-02, 6.25038468e-01],
[ 2.87076059e-01, 1.08554862e-02, 6.27294975e-01],
[ 2.93477695e-01, 1.02128849e-02, 6.29490490e-01],
[ 2.99855122e-01, 9.56079551e-03, 6.31623923e-01],
[ 3.06209825e-01, 8.90185346e-03, 6.33694102e-01],
[ 3.12543124e-01, 8.23900704e-03, 6.35699759e-01],
[ 3.18856183e-01, 7.57551051e-03, 6.37639537e-01],
[ 3.25150025e-01, 6.91491734e-03, 6.39512001e-01],
[ 3.31425547e-01, 6.26107379e-03, 6.41315649e-01],
[ 3.37683446e-01, 5.61830889e-03, 6.43048936e-01],
[ 3.43924591e-01, 4.99053080e-03, 6.44710195e-01],
[ 3.50149699e-01, 4.38202557e-03, 6.46297711e-01],
[ 3.56359209e-01, 3.79781761e-03, 6.47809772e-01],
[ 3.62553473e-01, 3.24319591e-03, 6.49244641e-01],
[ 3.68732762e-01, 2.72370721e-03, 6.50600561e-01],
[ 3.74897270e-01, 2.24514897e-03, 6.51875762e-01],
[ 3.81047116e-01, 1.81356205e-03, 6.53068467e-01],
[ 3.87182639e-01, 1.43446923e-03, 6.54176761e-01],
[ 3.93304010e-01, 1.11388259e-03, 6.55198755e-01],
[ 3.99410821e-01, 8.59420809e-04, 6.56132835e-01],
[ 4.05502914e-01, 6.78091517e-04, 6.56977276e-01],
[ 4.11580082e-01, 5.77101735e-04, 6.57730380e-01],
[ 4.17642063e-01, 5.63847476e-04, 6.58390492e-01],
[ 4.23688549e-01, 6.45902780e-04, 6.58956004e-01],
[ 4.29719186e-01, 8.31008207e-04, 6.59425363e-01],
[ 4.35733575e-01, 1.12705875e-03, 6.59797077e-01],
[ 4.41732123e-01, 1.53984779e-03, 6.60069009e-01],
[ 4.47713600e-01, 2.07954744e-03, 6.60240367e-01],
[ 4.53677394e-01, 2.75470302e-03, 6.60309966e-01],
[ 4.59622938e-01, 3.57374415e-03, 6.60276655e-01],
[ 4.65549631e-01, 4.54518084e-03, 6.60139383e-01],
[ 4.71456847e-01, 5.67758762e-03, 6.59897210e-01],
[ 4.77343929e-01, 6.97958743e-03, 6.59549311e-01],
[ 4.83210198e-01, 8.45983494e-03, 6.59094989e-01],
[ 4.89054951e-01, 1.01269996e-02, 6.58533677e-01],
[ 4.94877466e-01, 1.19897486e-02, 6.57864946e-01],
[ 5.00677687e-01, 1.40550640e-02, 6.57087561e-01],
[ 5.06454143e-01, 1.63333443e-02, 6.56202294e-01],
[ 5.12206035e-01, 1.88332232e-02, 6.55209222e-01],
[ 5.17932580e-01, 2.15631918e-02, 6.54108545e-01],
[ 5.23632990e-01, 2.45316468e-02, 6.52900629e-01],
[ 5.29306474e-01, 2.77468735e-02, 6.51586010e-01],
[ 5.34952244e-01, 3.12170300e-02, 6.50165396e-01],
[ 5.40569510e-01, 3.49501310e-02, 6.48639668e-01],
[ 5.46157494e-01, 3.89540334e-02, 6.47009884e-01],
[ 5.51715423e-01, 4.31364795e-02, 6.45277275e-01],
[ 5.57242538e-01, 4.73307585e-02, 6.43443250e-01],
[ 5.62738096e-01, 5.15448092e-02, 6.41509389e-01],
[ 5.68201372e-01, 5.57776706e-02, 6.39477440e-01],
[ 5.73631859e-01, 6.00281369e-02, 6.37348841e-01],
[ 5.79028682e-01, 6.42955547e-02, 6.35126108e-01],
[ 5.84391137e-01, 6.85790261e-02, 6.32811608e-01],
[ 5.89718606e-01, 7.28775875e-02, 6.30407727e-01],
[ 5.95010505e-01, 7.71902878e-02, 6.27916992e-01],
[ 6.00266283e-01, 8.15161895e-02, 6.25342058e-01],
[ 6.05485428e-01, 8.58543713e-02, 6.22685703e-01],
[ 6.10667469e-01, 9.02039303e-02, 6.19950811e-01],
[ 6.15811974e-01, 9.45639838e-02, 6.17140367e-01],
[ 6.20918555e-01, 9.89336721e-02, 6.14257440e-01],
[ 6.25986869e-01, 1.03312160e-01, 6.11305174e-01],
[ 6.31016615e-01, 1.07698641e-01, 6.08286774e-01],
[ 6.36007543e-01, 1.12092335e-01, 6.05205491e-01],
[ 6.40959444e-01, 1.16492495e-01, 6.02064611e-01],
[ 6.45872158e-01, 1.20898405e-01, 5.98867442e-01],
[ 6.50745571e-01, 1.25309384e-01, 5.95617300e-01],
[ 6.55579615e-01, 1.29724785e-01, 5.92317494e-01],
[ 6.60374266e-01, 1.34143997e-01, 5.88971318e-01],
[ 6.65129493e-01, 1.38566428e-01, 5.85582301e-01],
[ 6.69845385e-01, 1.42991540e-01, 5.82153572e-01],
[ 6.74522060e-01, 1.47418835e-01, 5.78688247e-01],
[ 6.79159664e-01, 1.51847851e-01, 5.75189431e-01],
[ 6.83758384e-01, 1.56278163e-01, 5.71660158e-01],
[ 6.88318440e-01, 1.60709387e-01, 5.68103380e-01],
[ 6.92840088e-01, 1.65141174e-01, 5.64521958e-01],
[ 6.97323615e-01, 1.69573215e-01, 5.60918659e-01],
[ 7.01769334e-01, 1.74005236e-01, 5.57296144e-01],
[ 7.06177590e-01, 1.78437000e-01, 5.53656970e-01],
[ 7.10548747e-01, 1.82868306e-01, 5.50003579e-01],
[ 7.14883195e-01, 1.87298986e-01, 5.46338299e-01],
[ 7.19181339e-01, 1.91728906e-01, 5.42663338e-01],
[ 7.23443604e-01, 1.96157962e-01, 5.38980786e-01],
[ 7.27670428e-01, 2.00586086e-01, 5.35292612e-01],
[ 7.31862231e-01, 2.05013174e-01, 5.31600995e-01],
[ 7.36019424e-01, 2.09439071e-01, 5.27908434e-01],
[ 7.40142557e-01, 2.13863965e-01, 5.24215533e-01],
[ 7.44232102e-01, 2.18287899e-01, 5.20523766e-01],
[ 7.48288533e-01, 2.22710942e-01, 5.16834495e-01],
[ 7.52312321e-01, 2.27133187e-01, 5.13148963e-01],
[ 7.56303937e-01, 2.31554749e-01, 5.09468305e-01],
[ 7.60263849e-01, 2.35975765e-01, 5.05793543e-01],
[ 7.64192516e-01, 2.40396394e-01, 5.02125599e-01],
[ 7.68090391e-01, 2.44816813e-01, 4.98465290e-01],
[ 7.71957916e-01, 2.49237220e-01, 4.94813338e-01],
[ 7.75795522e-01, 2.53657797e-01, 4.91170517e-01],
[ 7.79603614e-01, 2.58078397e-01, 4.87539124e-01],
[ 7.83382636e-01, 2.62499662e-01, 4.83917732e-01],
[ 7.87132978e-01, 2.66921859e-01, 4.80306702e-01],
[ 7.90855015e-01, 2.71345267e-01, 4.76706319e-01],
[ 7.94549101e-01, 2.75770179e-01, 4.73116798e-01],
[ 7.98215577e-01, 2.80196901e-01, 4.69538286e-01],
[ 8.01854758e-01, 2.84625750e-01, 4.65970871e-01],
[ 8.05466945e-01, 2.89057057e-01, 4.62414580e-01],
[ 8.09052419e-01, 2.93491117e-01, 4.58869577e-01],
[ 8.12611506e-01, 2.97927865e-01, 4.55337565e-01],
[ 8.16144382e-01, 3.02368130e-01, 4.51816385e-01],
[ 8.19651255e-01, 3.06812282e-01, 4.48305861e-01],
[ 8.23132309e-01, 3.11260703e-01, 4.44805781e-01],
[ 8.26587706e-01, 3.15713782e-01, 4.41315901e-01],
[ 8.30017584e-01, 3.20171913e-01, 4.37835947e-01],
[ 8.33422053e-01, 3.24635499e-01, 4.34365616e-01],
[ 8.36801237e-01, 3.29104836e-01, 4.30905052e-01],
[ 8.40155276e-01, 3.33580106e-01, 4.27454836e-01],
[ 8.43484103e-01, 3.38062109e-01, 4.24013059e-01],
[ 8.46787726e-01, 3.42551272e-01, 4.20579333e-01],
[ 8.50066132e-01, 3.47048028e-01, 4.17153264e-01],
[ 8.53319279e-01, 3.51552815e-01, 4.13734445e-01],
[ 8.56547103e-01, 3.56066072e-01, 4.10322469e-01],
[ 8.59749520e-01, 3.60588229e-01, 4.06916975e-01],
[ 8.62926559e-01, 3.65119408e-01, 4.03518809e-01],
[ 8.66077920e-01, 3.69660446e-01, 4.00126027e-01],
[ 8.69203436e-01, 3.74211795e-01, 3.96738211e-01],
[ 8.72302917e-01, 3.78773910e-01, 3.93354947e-01],
[ 8.75376149e-01, 3.83347243e-01, 3.89975832e-01],
[ 8.78422895e-01, 3.87932249e-01, 3.86600468e-01],
[ 8.81442916e-01, 3.92529339e-01, 3.83228622e-01],
[ 8.84435982e-01, 3.97138877e-01, 3.79860246e-01],
[ 8.87401682e-01, 4.01761511e-01, 3.76494232e-01],
[ 8.90339687e-01, 4.06397694e-01, 3.73130228e-01],
[ 8.93249647e-01, 4.11047871e-01, 3.69767893e-01],
[ 8.96131191e-01, 4.15712489e-01, 3.66406907e-01],
[ 8.98983931e-01, 4.20391986e-01, 3.63046965e-01],
[ 9.01807455e-01, 4.25086807e-01, 3.59687758e-01],
[ 9.04601295e-01, 4.29797442e-01, 3.56328796e-01],
[ 9.07364995e-01, 4.34524335e-01, 3.52969777e-01],
[ 9.10098088e-01, 4.39267908e-01, 3.49610469e-01],
[ 9.12800095e-01, 4.44028574e-01, 3.46250656e-01],
[ 9.15470518e-01, 4.48806744e-01, 3.42890148e-01],
[ 9.18108848e-01, 4.53602818e-01, 3.39528771e-01],
[ 9.20714383e-01, 4.58417420e-01, 3.36165582e-01],
[ 9.23286660e-01, 4.63250828e-01, 3.32800827e-01],
[ 9.25825146e-01, 4.68103387e-01, 3.29434512e-01],
[ 9.28329275e-01, 4.72975465e-01, 3.26066550e-01],
[ 9.30798469e-01, 4.77867420e-01, 3.22696876e-01],
[ 9.33232140e-01, 4.82779603e-01, 3.19325444e-01],
[ 9.35629684e-01, 4.87712357e-01, 3.15952211e-01],
[ 9.37990034e-01, 4.92666544e-01, 3.12575440e-01],
[ 9.40312939e-01, 4.97642038e-01, 3.09196628e-01],
[ 9.42597771e-01, 5.02639147e-01, 3.05815824e-01],
[ 9.44843893e-01, 5.07658169e-01, 3.02433101e-01],
[ 9.47050662e-01, 5.12699390e-01, 2.99048555e-01],
[ 9.49217427e-01, 5.17763087e-01, 2.95662308e-01],
[ 9.51343530e-01, 5.22849522e-01, 2.92274506e-01],
[ 9.53427725e-01, 5.27959550e-01, 2.88883445e-01],
[ 9.55469640e-01, 5.33093083e-01, 2.85490391e-01],
[ 9.57468770e-01, 5.38250172e-01, 2.82096149e-01],
[ 9.59424430e-01, 5.43431038e-01, 2.78700990e-01],
[ 9.61335930e-01, 5.48635890e-01, 2.75305214e-01],
[ 9.63202573e-01, 5.53864931e-01, 2.71909159e-01],
[ 9.65023656e-01, 5.59118349e-01, 2.68513200e-01],
[ 9.66798470e-01, 5.64396327e-01, 2.65117752e-01],
[ 9.68525639e-01, 5.69699633e-01, 2.61721488e-01],
[ 9.70204593e-01, 5.75028270e-01, 2.58325424e-01],
[ 9.71835007e-01, 5.80382015e-01, 2.54931256e-01],
[ 9.73416145e-01, 5.85761012e-01, 2.51539615e-01],
[ 9.74947262e-01, 5.91165394e-01, 2.48151200e-01],
[ 9.76427606e-01, 5.96595287e-01, 2.44766775e-01],
[ 9.77856416e-01, 6.02050811e-01, 2.41387186e-01],
[ 9.79232922e-01, 6.07532077e-01, 2.38013359e-01],
[ 9.80556344e-01, 6.13039190e-01, 2.34646316e-01],
[ 9.81825890e-01, 6.18572250e-01, 2.31287178e-01],
[ 9.83040742e-01, 6.24131362e-01, 2.27937141e-01],
[ 9.84198924e-01, 6.29717516e-01, 2.24595006e-01],
[ 9.85300760e-01, 6.35329876e-01, 2.21264889e-01],
[ 9.86345421e-01, 6.40968508e-01, 2.17948456e-01],
[ 9.87332067e-01, 6.46633475e-01, 2.14647532e-01],
[ 9.88259846e-01, 6.52324832e-01, 2.11364122e-01],
[ 9.89127893e-01, 6.58042630e-01, 2.08100426e-01],
[ 9.89935328e-01, 6.63786914e-01, 2.04858855e-01],
[ 9.90681261e-01, 6.69557720e-01, 2.01642049e-01],
[ 9.91364787e-01, 6.75355082e-01, 1.98452900e-01],
[ 9.91984990e-01, 6.81179025e-01, 1.95294567e-01],
[ 9.92540939e-01, 6.87029567e-01, 1.92170500e-01],
[ 9.93031693e-01, 6.92906719e-01, 1.89084459e-01],
[ 9.93456302e-01, 6.98810484e-01, 1.86040537e-01],
[ 9.93813802e-01, 7.04740854e-01, 1.83043180e-01],
[ 9.94103226e-01, 7.10697814e-01, 1.80097207e-01],
[ 9.94323596e-01, 7.16681336e-01, 1.77207826e-01],
[ 9.94473934e-01, 7.22691379e-01, 1.74380656e-01],
[ 9.94553260e-01, 7.28727890e-01, 1.71621733e-01],
[ 9.94560594e-01, 7.34790799e-01, 1.68937522e-01],
[ 9.94494964e-01, 7.40880020e-01, 1.66334918e-01],
[ 9.94355411e-01, 7.46995448e-01, 1.63821243e-01],
[ 9.94140989e-01, 7.53136955e-01, 1.61404226e-01],
[ 9.93850778e-01, 7.59304390e-01, 1.59091984e-01],
[ 9.93482190e-01, 7.65498551e-01, 1.56890625e-01],
[ 9.93033251e-01, 7.71719833e-01, 1.54807583e-01],
[ 9.92505214e-01, 7.77966775e-01, 1.52854862e-01],
[ 9.91897270e-01, 7.84239120e-01, 1.51041581e-01],
[ 9.91208680e-01, 7.90536569e-01, 1.49376885e-01],
[ 9.90438793e-01, 7.96858775e-01, 1.47869810e-01],
[ 9.89587065e-01, 8.03205337e-01, 1.46529128e-01],
[ 9.88647741e-01, 8.09578605e-01, 1.45357284e-01],
[ 9.87620557e-01, 8.15977942e-01, 1.44362644e-01],
[ 9.86509366e-01, 8.22400620e-01, 1.43556679e-01],
[ 9.85314198e-01, 8.28845980e-01, 1.42945116e-01],
[ 9.84031139e-01, 8.35315360e-01, 1.42528388e-01],
[ 9.82652820e-01, 8.41811730e-01, 1.42302653e-01],
[ 9.81190389e-01, 8.48328902e-01, 1.42278607e-01],
[ 9.79643637e-01, 8.54866468e-01, 1.42453425e-01],
[ 9.77994918e-01, 8.61432314e-01, 1.42808191e-01],
[ 9.76264977e-01, 8.68015998e-01, 1.43350944e-01],
[ 9.74443038e-01, 8.74622194e-01, 1.44061156e-01],
[ 9.72530009e-01, 8.81250063e-01, 1.44922913e-01],
[ 9.70532932e-01, 8.87896125e-01, 1.45918663e-01],
[ 9.68443477e-01, 8.94563989e-01, 1.47014438e-01],
[ 9.66271225e-01, 9.01249365e-01, 1.48179639e-01],
[ 9.64021057e-01, 9.07950379e-01, 1.49370428e-01],
[ 9.61681481e-01, 9.14672479e-01, 1.50520343e-01],
[ 9.59275646e-01, 9.21406537e-01, 1.51566019e-01],
[ 9.56808068e-01, 9.28152065e-01, 1.52409489e-01],
[ 9.54286813e-01, 9.34907730e-01, 1.52921158e-01],
[ 9.51726083e-01, 9.41670605e-01, 1.52925363e-01],
[ 9.49150533e-01, 9.48434900e-01, 1.52177604e-01],
[ 9.46602270e-01, 9.55189860e-01, 1.50327944e-01],
[ 9.44151742e-01, 9.61916487e-01, 1.46860789e-01],
[ 9.41896120e-01, 9.68589814e-01, 1.40955606e-01],
[ 9.40015097e-01, 9.75158357e-01, 1.31325517e-01]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| [
"viscm.viscm",
"numpy.linspace",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show"
] | [((16621, 16673), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['__file__', 'cm_data'], {}), '(__file__, cm_data)\n', (16654, 16673), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((17022, 17032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17030, 17032), True, 'import matplotlib.pyplot as plt\n'), ((16812, 16826), 'viscm.viscm', 'viscm', (['test_cm'], {}), '(test_cm)\n', (16817, 16826), False, 'from viscm import viscm\n'), ((16935, 16959), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(256)'], {}), '(0, 100, 256)\n', (16946, 16959), True, 'import numpy as np\n')] |
from sqlalchemy import Integer, Text, DateTime, func, Boolean, text
from models.database_models import Base, Column
class Comment(Base):
__tablename__ = "comment"
id = Column(Integer, primary_key=True, )
user_id = Column(Integer, nullable=False, comment="่ฏ่ฎบ็จๆท็ ID")
post_id = Column(Integer, nullable=False, comment="Post ๆ็ซ ็ ID")
content = Column(Text, nullable=False, comment="็จๆท็่ฏ่ฎบ")
create_time = Column(DateTime, server_default=func.now(), comment="ๅๅปบๆถ้ด")
update_time = Column(DateTime, server_default=func.now(), onupdate=func.now(), comment="ๆดๆฐๆถ้ด")
deleted = Column(Boolean, default=False, server_default=text('0'), nullable=False, comment="่ฏฅ้กน็ฎๆฏๅฆ่ขซๅ ้ค")
| [
"models.database_models.Column",
"sqlalchemy.func.now",
"sqlalchemy.text"
] | [((180, 213), 'models.database_models.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (186, 213), False, 'from models.database_models import Base, Column\n'), ((230, 281), 'models.database_models.Column', 'Column', (['Integer'], {'nullable': '(False)', 'comment': '"""่ฏ่ฎบ็จๆท็ ID"""'}), "(Integer, nullable=False, comment='่ฏ่ฎบ็จๆท็ ID')\n", (236, 281), False, 'from models.database_models import Base, Column\n'), ((296, 350), 'models.database_models.Column', 'Column', (['Integer'], {'nullable': '(False)', 'comment': '"""Post ๆ็ซ ็ ID"""'}), "(Integer, nullable=False, comment='Post ๆ็ซ ็ ID')\n", (302, 350), False, 'from models.database_models import Base, Column\n'), ((365, 410), 'models.database_models.Column', 'Column', (['Text'], {'nullable': '(False)', 'comment': '"""็จๆท็่ฏ่ฎบ"""'}), "(Text, nullable=False, comment='็จๆท็่ฏ่ฎบ')\n", (371, 410), False, 'from models.database_models import Base, Column\n'), ((462, 472), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (470, 472), False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n'), ((540, 550), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (548, 550), False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n'), ((561, 571), 'sqlalchemy.func.now', 'func.now', ([], {}), '()\n', (569, 571), False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n'), ((649, 658), 'sqlalchemy.text', 'text', (['"""0"""'], {}), "('0')\n", (653, 658), False, 'from sqlalchemy import Integer, Text, DateTime, func, Boolean, text\n')] |
from typing import Callable, Collection, Iterable, List, Union
from data.anagram import anagram_iter
from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer
Transformer = Callable[['bloom_node.BloomNode'], 'bloom_node.BloomNode']
_SPACE_MASK = bloom_mask.for_alpha(' ')
def merge_fn(
host: 'bloom_node.BloomNode',
sources: List['bloom_node.BloomNode'],
extra: list,
whitelist: Collection = None,
blacklist: Collection = None,
**kwargs) -> None:
del kwargs
assert len(sources) == 1
exit_node = sources[0]
assert len(extra) == 1
state = _normalize_state(exit_node, extra[0])
children = list(state)
# TODO: Need a cleaner way to inject and rerun these nodes.
if len(children) == 1:
host.op = _op_mixin.Op(_op_mixin.OP_IDENTITY, children)
else:
host.op = _op_mixin.Op(_op_mixin.OP_ADD, children)
# HACK: This duplicates BloomNode._expand, essentially.
for key, reduced in bloom_node_reducer.reduce(
host, whitelist=whitelist, blacklist=blacklist):
host.link(key, reduced)
class _AnagramTransformIndex(object):
"""Singleton object used during anagram traversal."""
def __init__(
self,
exit_node: 'bloom_node.BloomNode',
root: anagram_iter.AnagramIter) -> None:
self._exit_node = exit_node
reference = bloom_node.BloomNode()
reference.distance(0)
reference.weight(1, True)
reference_choice_paths = {}
for choice, _ in root.available():
reference_choice_paths[choice] = choice(reference)
self._reference_choice_paths = reference_choice_paths
self._child_cache = {}
def iter(
self,
anagrams: anagram_iter.AnagramIter,
) -> Iterable['bloom_node.BloomNode']:
for child_choice, child_anagrams in anagrams.items():
key = (child_choice, child_anagrams)
if key not in self._child_cache:
self._child_cache[key] = self._make_child(child_choice, child_anagrams)
yield self._child_cache[key]
def _make_child(
self,
choice: Transformer,
anagrams: anagram_iter.AnagramIter) -> 'bloom_node.BloomNode':
children = list(anagrams.available())
if not children:
return choice(self._exit_node)
elif len(children) == 1:
child_choice, child_duplicates = children[0]
node = self._exit_node
while child_duplicates:
node = child_choice(node)
child_duplicates -= 1
return choice(node)
# Compute requirements from exits.
node = self._exit_node // _AnagramState(self, anagrams)
node.provide_mask = self._exit_node.provide_mask
node.require_mask = self._exit_node.require_mask
node.lengths_mask = self._exit_node.lengths_mask
node.annotate({'anagrams': anagrams})
node.max_weight = self._exit_node.max_weight
nodes_with_spaces = []
for child_choice, child_duplicates in children:
path = self._reference_choice_paths[child_choice]
if path.require_mask and path.require_mask & _SPACE_MASK:
nodes_with_spaces.append(path)
node.provide_mask |= path.provide_mask
node.require_mask |= path.require_mask
node.lengths_mask = bloom_mask.lengths_product(
node.lengths_mask, path.lengths_mask, duplicates=child_duplicates)
if nodes_with_spaces:
# Distance and provide masks should be correct. Reset required values.
# Any route to any of the spaces is now okay but 1+ must be taken.
node.require_mask = bloom_mask.REQUIRE_NOTHING
for node_with_spaces in nodes_with_spaces:
# Only require what all node_with_spaces require.
node.require_mask &= node_with_spaces.require_mask
return choice(node)
class _AnagramState(object):
def __init__(
self,
index: _AnagramTransformIndex,
anagrams: anagram_iter.AnagramIter):
self._index = index
self._anagrams = anagrams
def __iter__(self) -> Iterable['bloom_node.BloomNode']:
yield from self._index.iter(self._anagrams)
def __repr__(self) -> str:
return '_AnagramState(%s)' % self._anagrams
__str__ = __repr__
def _normalize_state(
exit_node: 'bloom_node.BloomNode',
index: Union[Iterable, anagram_iter.AnagramIter]) -> _AnagramState:
if isinstance(index, _AnagramState):
return index
# `index` is an iterable list of ???, one-by-one these will be taken as a
# route to the `exit_node`.
initial_anagrams = anagram_iter.from_choices(index)
index = _AnagramTransformIndex(exit_node, initial_anagrams)
return _AnagramState(index, initial_anagrams)
| [
"data.graph.bloom_mask.lengths_product",
"data.anagram.anagram_iter.from_choices",
"data.graph.bloom_mask.for_alpha",
"data.graph._op_mixin.Op",
"data.graph.bloom_node_reducer.reduce",
"data.graph.bloom_node.BloomNode"
] | [((267, 292), 'data.graph.bloom_mask.for_alpha', 'bloom_mask.for_alpha', (['""" """'], {}), "(' ')\n", (287, 292), False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((946, 1019), 'data.graph.bloom_node_reducer.reduce', 'bloom_node_reducer.reduce', (['host'], {'whitelist': 'whitelist', 'blacklist': 'blacklist'}), '(host, whitelist=whitelist, blacklist=blacklist)\n', (971, 1019), False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((4373, 4405), 'data.anagram.anagram_iter.from_choices', 'anagram_iter.from_choices', (['index'], {}), '(index)\n', (4398, 4405), False, 'from data.anagram import anagram_iter\n'), ((757, 802), 'data.graph._op_mixin.Op', '_op_mixin.Op', (['_op_mixin.OP_IDENTITY', 'children'], {}), '(_op_mixin.OP_IDENTITY, children)\n', (769, 802), False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((825, 865), 'data.graph._op_mixin.Op', '_op_mixin.Op', (['_op_mixin.OP_ADD', 'children'], {}), '(_op_mixin.OP_ADD, children)\n', (837, 865), False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((1316, 1338), 'data.graph.bloom_node.BloomNode', 'bloom_node.BloomNode', ([], {}), '()\n', (1336, 1338), False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n'), ((3131, 3229), 'data.graph.bloom_mask.lengths_product', 'bloom_mask.lengths_product', (['node.lengths_mask', 'path.lengths_mask'], {'duplicates': 'child_duplicates'}), '(node.lengths_mask, path.lengths_mask, duplicates\n =child_duplicates)\n', (3157, 3229), False, 'from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer\n')] |
import pathlib
import os
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# specify requirements of your package here
REQUIREMENTS = ['biopython', 'numpy', 'pandas']
setup(name='stacksPairwise',
version='0.0.0',
description='Calculate pairwise divergence (pairwise pi) from Stacks `samples.fa` output fle',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/gibsonmatt/stacks-pairwise',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['stacksPairwise'],
install_requires=REQUIREMENTS,
entry_points={
"console_scripts": [
"stacksPairwise=stacksPairwise.__main__:main"
]
},
keywords='genetics genotyping sequencing Stacks'
)
| [
"setuptools.setup",
"pathlib.Path"
] | [((296, 843), 'setuptools.setup', 'setup', ([], {'name': '"""stacksPairwise"""', 'version': '"""0.0.0"""', 'description': '"""Calculate pairwise divergence (pairwise pi) from Stacks `samples.fa` output fle"""', 'long_description': 'README', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/gibsonmatt/stacks-pairwise"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['stacksPairwise']", 'install_requires': 'REQUIREMENTS', 'entry_points': "{'console_scripts': ['stacksPairwise=stacksPairwise.__main__:main']}", 'keywords': '"""genetics genotyping sequencing Stacks"""'}), "(name='stacksPairwise', version='0.0.0', description=\n 'Calculate pairwise divergence (pairwise pi) from Stacks `samples.fa` output fle'\n , long_description=README, long_description_content_type=\n 'text/markdown', url='https://github.com/gibsonmatt/stacks-pairwise',\n author='<NAME>', author_email='<EMAIL>', license='MIT', packages=[\n 'stacksPairwise'], install_requires=REQUIREMENTS, entry_points={\n 'console_scripts': ['stacksPairwise=stacksPairwise.__main__:main']},\n keywords='genetics genotyping sequencing Stacks')\n", (301, 843), False, 'from setuptools import setup\n'), ((99, 121), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (111, 121), False, 'import pathlib\n')] |
from numpy import array
from pickle import load
from pandas import read_csv
import os
from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper
# Importing random forest model
modelpath = os.path.dirname(os.path.abspath(__file__)) + '/RFC.dump'
Rf = load(open(modelpath, 'rb'))
# The function generate list of shuflled matrix
def make_shuffle_matrix(matrix, cpu, iterat):
"""
The functuion generate massive of shuffled matrix.
Parameters
----------
matrix : pandas DataFrame
PSSM profile.
cpu : int
Number of tred used.
iterat : int
Number of iterations of shuffling.
Returns
-------
module_shuffling_matrix : list
List of matrix, shuffled by module.
substrate_shuffling_matrix : list
List of matrix, shuffled by substrate.
"""
module_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='module', iterations=iterat, threads=cpu)
substrate_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='substrate', iterations=iterat, threads=cpu)
return module_shuffling_matrix, substrate_shuffling_matrix
# The fujnction finds suquence with maximum possible value, results from alignment
def get_MaxSeq(matrix, variant_seq):
"""
The functuion parallel calculation of scores for shuffled matrix.
Parameters
----------
matrix : pandas DataFrame
PSSM profile.
variant_seq : list
Variant of core peptide chain.
Returns
-------
shuffled_scores : list
List of scores for shuffled matrix.
"""
MaxSeq = []
subs = matrix.keys()[1: ]
# Find sequence, wich have maximum alignment score
for idx in matrix.index:
MAX_value = max(list(matrix.iloc[idx][1:]))
for key in subs:
if matrix[key][idx] == MAX_value:
MaxSeq.append(key) # If two smonomer have same value
break
# Making two variants of MaxSeq
MaxSeq_full = MaxSeq.copy()
MaxSeq_nan = MaxSeq.copy()
for max_sub_idx in range(len(MaxSeq)):
if variant_seq[max_sub_idx] == 'nan':
MaxSeq_nan[max_sub_idx] = 'nan' # Adding nan to MaxSeq
return MaxSeq_full, MaxSeq_nan
# The function gives an information about clusters
def get_cluster_info(table, BGC_ID, target_file):
"""
The functuion return information about cluster.
Parameters
----------
table : pandas DataFrame
Table with meta inforamtion about NRPS clusters.
BGC_ID : str
PSSM cluster ID.
target_file : pandas DataFrame
PSSM profile.
Returns
-------
Name : str
Cluster ID.
Coord_cluster : str
Coordinate of cluster.
strand : str
Strand of cluster.
"""
for ind in table[table['ID'].str.contains(BGC_ID)].index:
Name = table[table['ID'].str.contains(target_file.split('.')[0].split('_A_')[1])]['Name'][ind]
Coord_cluster = table['Coordinates of cluster'][ind]
strand = table['Gen strand'][ind]
break
return Name, Coord_cluster, strand
# Calculate scores
def calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat):
"""
Calculating scores.
Parameters
----------
variant_seq : list
Variant of core peptide chain.
matrix : pandas DataFrame
PSSM profile.
substrate_shuffling_matrix : list
List of matrix, shuffled by substrate.
module_shuffling_matrix : list
List of matrix, shuffled by module.
cpu : int
Number of threads used.
iterat : int
Number of iterations of shuffling.
Returns
-------
Sln_score : float
Mln_score : float
Slt_score : float
Mlt_score : float
Sdn_score : float
Mdn_score : float
Sdt_score : float
Mdt_score : float
Scores, which calculated with shuffling matrix by different variants.
M - module shuffling S - substrate shuffling
l - logarithmic transformation of score d - raw score
n - MaxSeq with nan replacement t - MaxSeq without nan replacement
Relative_score : float
Relative score (Probability of target class)
Binary : float
Binary score of cluster matching.
"""
# Finding suquence with maximum possible value, results from alignment
MaxSeq_full, MaxSeq_nan = get_MaxSeq(matrix, variant_seq)
# Calculating shuffled scores
Sln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Mln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Slt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Mlt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Sdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Mdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Sdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Mdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
# Calculating scores for target sequence
log_target_score = get_score(variant_seq, matrix, type_value='log')
non_log_target_score = get_score(variant_seq, matrix, type_value=None)
# Calculating features scores
Sln_score = len(Sln_shuffled_score[Sln_shuffled_score < log_target_score])/len(Sln_shuffled_score)
Mln_score = len(Mln_shuffled_score[Mln_shuffled_score < log_target_score])/len(Mln_shuffled_score)
Slt_score = len(Slt_shuffled_score[Slt_shuffled_score < log_target_score])/len(Slt_shuffled_score)
Mlt_score = len(Mlt_shuffled_score[Mlt_shuffled_score < log_target_score])/len(Mlt_shuffled_score)
Sdn_score = len(Sdn_shuffled_score[Sdn_shuffled_score < non_log_target_score])/len(Sdn_shuffled_score)
Mdn_score = len(Mdn_shuffled_score[Mdn_shuffled_score < non_log_target_score])/len(Mdn_shuffled_score)
Sdt_score = len(Sdt_shuffled_score[Sdt_shuffled_score < non_log_target_score])/len(Sdt_shuffled_score)
Mdt_score = len(Mdt_shuffled_score[Mdt_shuffled_score < non_log_target_score])/len(Mdt_shuffled_score)
# Calculating Relative score
Relative_score = round(Rf.predict_proba([[Sln_score, Mln_score,
Sdn_score, Mdn_score,
Sdt_score, Mdt_score,
Slt_score, Mlt_score
]])[0][1], 3)
Binary = Rf.predict([[Sln_score, Mln_score,
Sdn_score, Mdn_score,
Sdt_score, Mdt_score,
Slt_score, Mlt_score
]])[0]
return Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary
def give_results(tsv_out, folder, files, table, ID, PeptideSeq, skip, cpu, iterat):
"""
The functuion return information about cluster.
Parameters
----------
tsv_out : dict
Empty dictionary for adding results.
folder : str
Path to PSSMs.
files : list
List of PSSMs.
table : pandas DataFrame
Table with meta inforamtion about NRPS clusters.
ID : str
Name of substance.
PeptideSeq : dict
Core peptide chains for different biosynthesis types (e.g. A, B, or C).
kip : int
Number of presumptive skip.
cpu : int
Number of threads used.
iterat : int
Number of iterations of shuffling.
Returns
-------
tsv_out : dict
Full dictionary for adding results.
"""
for target_file in files:
try:
BGC_ID = target_file.split('.')[0].split('_A_')[1]
except:
continue
if '_A_' not in target_file:
continue
Name, Coord_cluster, strand = get_cluster_info(table, BGC_ID, target_file) # Getting information about cluster
BGC = read_csv(folder + target_file, sep='\t')
# Skipping mode
if skip == 0:
BGC = [BGC]
else:
BGC == skipper(BGC, skip)
for matrix in BGC:
# Check quality of matrix
if len(matrix) == 1:
continue
check = 0
values = matrix.drop(matrix.columns[0], axis=1).values
for i in values:
if all(i) == 0:
check += 1
if check == len(values): # If thes condition is True, the matrix of unrecognized monomers
continue
# Generating shuffling matrix
module_shuffling_matrix, substrate_shuffling_matrix = make_shuffle_matrix(matrix, cpu, iterat)
for BS_type in PeptideSeq:# For every biosynthesis profile pathways
if PeptideSeq[BS_type] == None: # If in sequence only nan monomers
continue
if len(PeptideSeq[BS_type]) == 0: # If have not the variant
continue
# Check correctness of PeptideSeq
length_max= get_max_aminochain(PeptideSeq[BS_type])
EPs = make_combine(PeptideSeq[BS_type], length_max, matrix, delta=3)
if EPs is None: # If length sequnce can't be scaled to cluster size
continue
for variant_seq in EPs:
Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary = calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat)
#Recordind dictionary
tsv_out['Chromosome ID'].append(Name)
tsv_out['Coordinates of cluster'].append(Coord_cluster)
tsv_out['Strand'].append(strand)
tsv_out['Substance'].append(ID)
tsv_out['BGC ID'].append(BGC_ID)
tsv_out['Putative linearized NRP sequence'].append('--'.join(variant_seq))
tsv_out['Biosynthesis profile'].append('Type {}'.format(BS_type))
tsv_out['Sln score'].append(Sln_score) #shaffling substrates in matrix with log score and nan in maximally possible sequence
tsv_out['Mln score'].append(Mln_score) #shaffling modules matrix with log score and nan in maximally possible sequence
tsv_out['Sdn score'].append(Sdn_score) #shaffling substrates matrix without log score and nan in maximally possible sequence
tsv_out['Mdn score'].append(Mdn_score) #shaffling modules matrix without log score and nan in maximally possible sequence
tsv_out['Sdt score'].append(Sdt_score) #shaffling substrates matrix without log score in maximally possible sequence
tsv_out['Mdt score'].append(Mdt_score) #shaffling modules matrix without log score in maximally possible sequence
tsv_out['Slt score'].append(Slt_score) #shaffling substrates matrix with log score in maximally possible sequence
tsv_out['Mlt score'].append(Mlt_score) #shaffling modules matrix with log score in maximally possible sequence
tsv_out['Relative score'].append(Relative_score) #Final score
tsv_out['Binary'].append(Binary) #Binary value
return tsv_out
| [
"BioCAT.src.Combinatorics.multi_thread_shuffling",
"BioCAT.src.Combinatorics.multi_thread_calculating_scores",
"pandas.read_csv",
"BioCAT.src.Combinatorics.get_max_aminochain",
"BioCAT.src.Combinatorics.skipper",
"BioCAT.src.Combinatorics.make_combine",
"BioCAT.src.Combinatorics.get_score",
"os.path.abspath"
] | [((939, 1029), 'BioCAT.src.Combinatorics.multi_thread_shuffling', 'multi_thread_shuffling', (['matrix'], {'ShufflingType': '"""module"""', 'iterations': 'iterat', 'threads': 'cpu'}), "(matrix, ShufflingType='module', iterations=iterat,\n threads=cpu)\n", (961, 1029), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((1059, 1152), 'BioCAT.src.Combinatorics.multi_thread_shuffling', 'multi_thread_shuffling', (['matrix'], {'ShufflingType': '"""substrate"""', 'iterations': 'iterat', 'threads': 'cpu'}), "(matrix, ShufflingType='substrate', iterations=iterat,\n threads=cpu)\n", (1081, 1152), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5872, 5920), 'BioCAT.src.Combinatorics.get_score', 'get_score', (['variant_seq', 'matrix'], {'type_value': '"""log"""'}), "(variant_seq, matrix, type_value='log')\n", (5881, 5920), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5948, 5995), 'BioCAT.src.Combinatorics.get_score', 'get_score', (['variant_seq', 'matrix'], {'type_value': 'None'}), '(variant_seq, matrix, type_value=None)\n', (5957, 5995), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((295, 320), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (310, 320), False, 'import os\n'), ((4615, 4740), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_nan', 'substrate_shuffling_matrix'], {'type_value': '"""log"""', 'iterations': 'iterat', 'threads': 'cpu'}), "(MaxSeq_nan, substrate_shuffling_matrix,\n type_value='log', iterations=iterat, threads=cpu)\n", (4646, 4740), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((4769, 4891), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_nan', 'module_shuffling_matrix'], {'type_value': '"""log"""', 'iterations': 'iterat', 'threads': 'cpu'}), "(MaxSeq_nan, module_shuffling_matrix,\n type_value='log', iterations=iterat, threads=cpu)\n", (4800, 4891), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((4920, 5046), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_full', 'substrate_shuffling_matrix'], {'type_value': '"""log"""', 'iterations': 'iterat', 'threads': 'cpu'}), "(MaxSeq_full, substrate_shuffling_matrix,\n type_value='log', iterations=iterat, threads=cpu)\n", (4951, 5046), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5075, 5198), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_full', 'module_shuffling_matrix'], {'type_value': '"""log"""', 'iterations': 'iterat', 'threads': 'cpu'}), "(MaxSeq_full, module_shuffling_matrix,\n type_value='log', iterations=iterat, threads=cpu)\n", (5106, 5198), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5227, 5351), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_nan', 'substrate_shuffling_matrix'], {'type_value': 'None', 'iterations': 'iterat', 'threads': 'cpu'}), '(MaxSeq_nan, substrate_shuffling_matrix,\n type_value=None, iterations=iterat, threads=cpu)\n', (5258, 5351), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5380, 5501), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_nan', 'module_shuffling_matrix'], {'type_value': 'None', 'iterations': 'iterat', 'threads': 'cpu'}), '(MaxSeq_nan, module_shuffling_matrix,\n type_value=None, iterations=iterat, threads=cpu)\n', (5411, 5501), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5530, 5655), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_full', 'substrate_shuffling_matrix'], {'type_value': 'None', 'iterations': 'iterat', 'threads': 'cpu'}), '(MaxSeq_full, substrate_shuffling_matrix,\n type_value=None, iterations=iterat, threads=cpu)\n', (5561, 5655), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((5684, 5806), 'BioCAT.src.Combinatorics.multi_thread_calculating_scores', 'multi_thread_calculating_scores', (['MaxSeq_full', 'module_shuffling_matrix'], {'type_value': 'None', 'iterations': 'iterat', 'threads': 'cpu'}), '(MaxSeq_full, module_shuffling_matrix,\n type_value=None, iterations=iterat, threads=cpu)\n', (5715, 5806), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((8769, 8809), 'pandas.read_csv', 'read_csv', (['(folder + target_file)'], {'sep': '"""\t"""'}), "(folder + target_file, sep='\\t')\n", (8777, 8809), False, 'from pandas import read_csv\n'), ((8964, 8982), 'BioCAT.src.Combinatorics.skipper', 'skipper', (['BGC', 'skip'], {}), '(BGC, skip)\n', (8971, 8982), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((9962, 10001), 'BioCAT.src.Combinatorics.get_max_aminochain', 'get_max_aminochain', (['PeptideSeq[BS_type]'], {}), '(PeptideSeq[BS_type])\n', (9980, 10001), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n'), ((10024, 10086), 'BioCAT.src.Combinatorics.make_combine', 'make_combine', (['PeptideSeq[BS_type]', 'length_max', 'matrix'], {'delta': '(3)'}), '(PeptideSeq[BS_type], length_max, matrix, delta=3)\n', (10036, 10086), False, 'from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper\n')] |
import signal
import requests
import time
from math import floor
shutdown = False
MAIN_TAKER = 0.0065
MAIN_MAKER = 0.002
ALT_TAKER = 0.005
ALT_MAKER = 0.0035
TAKER = (MAIN_TAKER + ALT_TAKER)*2
MAKER = MAIN_MAKER + ALT_MAKER
TAKEMAIN = MAIN_TAKER - ALT_MAKER
TAKEALT = ALT_TAKER - MAIN_MAKER
BUFFER = 0.01
NaN = float('nan')
class ApiException(Exception):
pass
class Book(object):
def __init__(self, sym, json):
global NaN
self.sym = sym
self.json = json
# could be cached
self.bids = self.json['bids']
self.asks = self.json['asks']
self.ask_price = 1
self.asks_quantity_left = 0
self.bid_price = 1
self.bids_quantity_left = 0
if self.bids:
self.bid_price = self.bids[0]['price']
if self.asks:
self.ask_price = self.asks[0]['price']
def bids_room(self):
if self.bids:
quantity = sum([b['quantity']
for b in self.bids if b['price'] == self.bid_price])
filled = sum([b['quantity_filled']
for b in self.bids if b['price'] == self.bid_price])
return quantity - filled
else:
return 0
def asks_room(self):
if self.asks:
quantity = sum([b['quantity']
for b in self.asks if b['price'] == self.ask_price])
filled = sum([b['quantity_filled']
for b in self.asks if b['price'] == self.ask_price])
return quantity - filled
else:
return 0
class Limits(dict):
def __init__(self, json):
self.update(json)
self.gross_limit = int(json['gross_limit'])
self.net_limit = int(json['net_limit'])
self.gross = int(json['gross'])
self.net = int(json['net'])
class OHLC(dict):
def __init__(self, sym, json):
self.sym = sym
self.update(json)
self.tick = json['tick']
self.open = json['open']
self.high = json['high']
self.low = json['low']
self.close = json['close']
class Shock(dict):
def __init__(self, news, currtick):
self.ticker = news['ticker']
self.elapsed = currtick - news['tick']
headline = news['headline']
try:
self.amount = float(headline[-6:].replace('$', ''))
except:
self.amount = 0
class Session(object):
def __init__(self, url, key):
self.url = url
self.key = key
self.tick = -1
def __enter__(self):
self.session = requests.Session()
self.session.headers.update({'X-API-Key': self.key})
return self
def __exit__(self, type, value, traceback):
self.session.close()
def get_tick(self):
while True:
resp = self.session.get(self.url + '/v1/case', params=None)
if not resp.ok:
raise ApiException('could not get tick: ' + str(resp))
json = resp.json()
if json['status'] == 'STOPPED' or shutdown:
return False
if json['tick'] != self.tick:
self.tick = json['tick']
print('.', self.tick)
return True
# this timer is unnecessary, network latency should be enough
time.sleep(0.1)
def get_book(self, sym):
resp = self.session.get(
self.url + '/v1/securities/book', params={'ticker': sym})
if not resp.ok:
raise ApiException('could not get book: ' + str(resp))
return Book(sym, resp.json())
def send_order(self, sym, side, price, size):
resp = self.session.post(self.url + '/v1/orders', params={
'ticker': sym, 'type': 'LIMIT', 'action': side, 'quantity': size, 'price': price})
if resp.ok:
print('sent order', side, sym, size, '@', price)
else:
print('failed to send order', side, sym,
size, '@', price, ':', resp.text)
def getLimit(self):
resp = self.session.get(self.url+'/v1/limits')
if not resp.ok:
raise ApiException('could not get limit: '+str(resp))
return Limits(resp.json()[0])
def getSecurities(self, sym=None):
if sym is None:
resp = self.session.get(self.url+'/v1/securities')
else:
resp = self.session.get(
self.url+'/v1/securities', params={'ticker': sym})
if not resp.ok:
raise ApiException('could not get position: '+str(resp))
json = resp.json()
return {sec['ticker']: {k: sec[k] for k in [
"position",
"vwap",
"nlv",
"last",
"bid",
"bid_size",
"ask",
"ask_size",
"unrealized",
"realized"
]} for sec in json}
def get_OHLC(self, sym, ticks=50):
resp = self.session.get(
self.url + '/v1/securities/history', params={'ticker': sym,'limit':ticks})
if not resp.ok:
raise ApiException('could not get OHLC: ' + str(resp))
return [OHLC(sym, ohlc) for ohlc in resp.json()]
def buy(self, sym, price, size):
self.send_order(sym, 'BUY', price, size)
def sell(self, sym, price, size):
self.send_order(sym, 'SELL', price, size)
def send_market(self, sym, side, size):
resp = self.session.post(self.url + '/v1/orders', params={
'ticker': sym, 'type': 'MARKET', 'action': side, 'quantity': size})
if resp.ok:
json = resp.json()
print('market order', side, sym, size, '@', json['vwap'])
return json['vwap']
else:
print('failed to send order', side, sym,
size, '@Market:', resp.text)
return 0
def buyM(self, sym, size):
return self.send_market(sym, 'BUY', size)
def sellM(self, sym, size):
return self.send_market(sym, 'SELL', size)
def getNews(self):
resp = self.session.get(self.url + '/v1/news', params={'limit': 10})
if not resp.ok:
raise ApiException('failed to get news', resp.text)
else:
json = resp.json()
# only care about recent news
return [Shock(news, self.tick) for news in json if news['tick'] > self.tick-4]
def getTrader(self):
resp = self.session.get(self.url + '/v1/trader')
if not resp.ok:
raise ApiException('failed to get trader info', resp.text)
else:
json = resp.json()
return json
def main():
# price does change in every tick
# check position
# plain arbitradge
# index arbitrage
# shock handling
# wave riding
# pairTickers = [('WMT-M', 'WMT-A'), ('CAT-M', 'CAT-A'), ('MMM-M', 'MMM-A')]
with Session('http://localhost:9998', 'VHK3DEDE') as session:
while session.get_tick():
try:
shock_runner(session)
exchange_arbitrage(session, "WMT-M", "WMT-A")
exchange_arbitrage(session, "CAT-M", "CAT-A")
exchange_arbitrage(session, "MMM-M", "MMM-A")
index_arbitrage(session, ['WMT', 'MMM', 'CAT'])
except Exception as ex:
print("error", str(ex))
# trader = session.getTrader()
# print(trader['nlv'])
# TODO: position cleaner: try to reduce gross position loss-free
# TODO: implement range runner for the last x ticks
def avg(arr):
return sum(arr)/float(len(arr))
def window_trend(left,right):
leftavg = avg(left)
rightavg = avg(right)
if rightavg > leftavg:
return 1
elif rightavg < leftavg:
return -1
else:
return 0
def splitarr(arr):
n = len(arr)
left = arr[:n//2]
right = arr[n//2:]
return left,right
def wwindow_trend(prices):
left, right = splitarr(prices)
trend = window_trend(left,right)
lleft, lright = splitarr(left)
rleft, rright = splitarr(right)
trendl = window_trend(lleft,lright)
trendr = window_trend(rleft,rright)
return trend + trendl + trendr
def trend_runner(session, ticker):
if session.tick<20:
return
# short term trend
prices = session.get_OHLC(ticker, 20)
highs = [price.high for price in prices]
lows = [price.low for price in prices]
highTrend = wwindow_trend(highs)
lowTrend = wwindow_trend(lows)
if highTrend+lowTrend < -4:
# volatile, but no trend
session.buyM(ticker,1000)
if highTrend+lowTrend > 4:
session.sellM(ticker,1000)
print(ticker,"short hightrend",highTrend,"lowtrend",lowTrend)
if session.tick<100:
return
prices = session.get_OHLC(ticker, 100)
highs = [price.high for price in prices]
lows = [price.low for price in prices]
highTrend = wwindow_trend(highs)
lowTrend = wwindow_trend(lows)
# grown too much
if highTrend+lowTrend < -4:
# volatile, but no trend
session.sellM(ticker,1000)
# dropped too much
if highTrend+lowTrend > 4:
session.buyM(ticker,1000)
print(ticker,"long hightrend",highTrend,"lowtrend",lowTrend)
def shock_runner(session):
shocks = session.getNews()
quantity = 50000
for shock in sorted(shocks, key=lambda s: s.elapsed):
Mticker = shock.ticker+"-M"
Aticker = shock.ticker+"-A"
if shock.elapsed < 2:
if shock.amount > MAIN_TAKER + BUFFER*2:
session.buyM(Mticker, quantity)
session.buyM(Aticker, quantity)
elif - shock.amount > MAIN_TAKER + BUFFER*2:
session.sellM(Mticker, quantity)
session.sellM(Aticker, quantity)
print('shock', shock.ticker, shock.amount)
if shock.elapsed == 2:
if shock.amount > MAIN_TAKER + BUFFER*2:
session.sellM(Mticker, quantity)
session.sellM(Aticker, quantity)
elif - shock.amount > MAIN_TAKER + BUFFER*2:
session.buyM(Mticker, quantity)
session.buyM(Aticker, quantity)
print('post shock', shock.ticker, shock.amount)
TAKER4 = MAIN_TAKER * 5
def index_arbitrage(session, tickers):
secs = session.getSecurities()
ETF = secs['ETF']
etfBid = ETF['bid']
etfAsk = ETF['ask']
bestBids = {}
bestBidsQ = {}
bestAsks = {}
bestAsksQ = {}
for ticker in tickers:
tickerM = ticker+"-M"
tickerA = ticker+"-A"
Mticker = secs[tickerM]
Aticker = secs[tickerA]
Mbid = Mticker['bid']
Abid = Aticker['bid']
Mask = Mticker['ask']
Aask = Aticker['ask']
if Mbid >= Abid:
bestBids[tickerM] = Mbid
bestBidsQ[tickerM] = Mticker['bid_size']
else:
bestBids[tickerA] = Abid
bestBidsQ[tickerA] = Aticker['bid_size']
if Mask <= Aask:
bestAsks[tickerM] = Mask
bestAsksQ[tickerM] = Mticker['ask_size']
else:
bestAsks[tickerA] = Aask
bestAsksQ[tickerA] = Aticker['ask_size']
compositBid = sum(bestBids.values())
compositBidQ = min(bestBidsQ.values())
compositAsk = sum(bestAsks.values())
compositAskQ = min(bestAsksQ.values())
boughtprice = 0
soldprice = 0
if etfBid - compositAsk > TAKER4+BUFFER:
quantity = ETF['bid_size'] if ETF['bid_size'] < compositAskQ else compositAskQ
if quantity == 0:
return
quantity = min([quantity, 50000])
soldprice = session.sellM('ETF', quantity)
for ticker in bestAsks:
boughtprice += session.buyM(ticker, quantity)
print('Plan ETF', etfBid, 'Stocks', compositAsk)
print('Actual ETF', soldprice, 'Stocks', boughtprice)
elif compositBid - etfAsk > TAKER4+BUFFER:
quantity = ETF['ask_size'] if ETF['ask_size'] < compositBidQ else compositBidQ
if quantity == 0:
return
quantity = min([quantity, 50000])
for ticker in bestBids:
soldprice += session.sellM(ticker, quantity)
boughtprice = session.buyM('ETF', quantity)
print('Plan Stocks', compositBid, 'ETF', etfAsk)
print('Actual Stocks', soldprice, 'ETF', boughtprice)
# TODO: send limit orders and use market to cover unfilled ones after
def exchange_arbitrage(session, mticker, aticker):
global NaN
mbook = session.get_book(mticker)
masks_room = mbook.asks_room()
mbids_room = mbook.bids_room()
abook = session.get_book(aticker)
aasks_room = abook.asks_room()
abids_room = abook.bids_room()
# a lot of room, make market orders
if mbook.bid_price - abook.ask_price > TAKER+BUFFER*2:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sellM(mbook.sym, quantity)
session.buyM(abook.sym, quantity)
elif abook.bid_price - mbook.ask_price > TAKER+BUFFER*2:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sellM(abook.sym, quantity)
session.buyM(mbook.sym, quantity)
# only a little room, make limit orders
if mbook.bid_price - abook.ask_price > BUFFER:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sell(mbook.sym, mbook.bid_price, quantity)
session.buy(abook.sym, abook.ask_price, quantity)
elif abook.bid_price - mbook.ask_price > BUFFER:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sell(abook.sym, abook.bid_price, quantity)
session.buy(mbook.sym, mbook.ask_price, quantity)
def sigint(signum, frame):
global shutdown
signal.signal(signal.SIGINT, signal.SIG_DFL)
shutdown = True
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint)
main()
| [
"signal.signal",
"requests.Session",
"time.sleep"
] | [((14446, 14490), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (14459, 14490), False, 'import signal\n'), ((14549, 14585), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'sigint'], {}), '(signal.SIGINT, sigint)\n', (14562, 14585), False, 'import signal\n'), ((2705, 2723), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2721, 2723), False, 'import requests\n'), ((3474, 3489), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3484, 3489), False, 'import time\n')] |
"""
==========================================
Genrating feedthrough from single instance
==========================================
This example demostrates how to generate a feedthrough wire connection for
a given scalar or vector wires.
**Initial Design**
.. hdl-diagram:: ../../../examples/basic/_initial_design.v
:type: netlistsvg
:align: center
:module: top
**Output1** ``wire0`` feedthough from ``inst_2_1``
.. hdl-diagram:: ../../../examples/basic/_output_wire.v
:type: netlistsvg
:align: center
:module: top
**Output2** ``bus_in`` feedthrough from ``inst_1_0``
.. hdl-diagram:: ../../../examples/basic/_output_bus.v
:type: netlistsvg
:align: center
:module: top
"""
from os import path
import spydrnet as sdn
import spydrnet_physical as sdnphy
netlist = sdnphy.load_netlist_by_name('basic_hierarchy')
top = netlist.top_instance.reference
cable0 = next(top.get_cables("wire0"))
inst2 = next(top.get_instances("inst_2_0"))
sdn.compose(netlist, '_initial_design.v', skip_constraints=True)
top.create_feedthrough(inst2, cable0)
top.create_unconn_wires()
sdn.compose(netlist, '_output_wire.v', skip_constraints=True)
netlist = sdnphy.load_netlist_by_name('basic_hierarchy')
top = netlist.top_instance.reference
bus_in = next(top.get_cables("bus_in"))
inst1 = next(top.get_instances("inst_1_0"))
cables = top.create_feedthrough(inst1, bus_in)
top.create_unconn_wires()
sdn.compose(netlist, '_output_bus.v', skip_constraints=True)
| [
"spydrnet.compose",
"spydrnet_physical.load_netlist_by_name"
] | [((801, 847), 'spydrnet_physical.load_netlist_by_name', 'sdnphy.load_netlist_by_name', (['"""basic_hierarchy"""'], {}), "('basic_hierarchy')\n", (828, 847), True, 'import spydrnet_physical as sdnphy\n'), ((970, 1034), 'spydrnet.compose', 'sdn.compose', (['netlist', '"""_initial_design.v"""'], {'skip_constraints': '(True)'}), "(netlist, '_initial_design.v', skip_constraints=True)\n", (981, 1034), True, 'import spydrnet as sdn\n'), ((1101, 1162), 'spydrnet.compose', 'sdn.compose', (['netlist', '"""_output_wire.v"""'], {'skip_constraints': '(True)'}), "(netlist, '_output_wire.v', skip_constraints=True)\n", (1112, 1162), True, 'import spydrnet as sdn\n'), ((1175, 1221), 'spydrnet_physical.load_netlist_by_name', 'sdnphy.load_netlist_by_name', (['"""basic_hierarchy"""'], {}), "('basic_hierarchy')\n", (1202, 1221), True, 'import spydrnet_physical as sdnphy\n'), ((1418, 1478), 'spydrnet.compose', 'sdn.compose', (['netlist', '"""_output_bus.v"""'], {'skip_constraints': '(True)'}), "(netlist, '_output_bus.v', skip_constraints=True)\n", (1429, 1478), True, 'import spydrnet as sdn\n')] |
from __future__ import annotations
from typing import Optional, Union
from tools import tools
from exceptions import workflow_exceptions
class Workflow:
"""A class to represent a workflow.
Workflow class provides set of methods to manage state of the workflow.
It allows for tool insertions, removals and modifications.
When workflow is run data flow is built and each tool linked to the workflow
instance is executed in determined order. Tool outputs are then consolidated
in a JSON format.
"""
TOOL_CHOICES = {
"generic": tools.GenericTool,
"large_generic": tools.LargeGenericTool,
"input": tools.InputTool,
}
def __init__(self) -> None:
"""Initializes Workflow class with root tool.
Workflow class is initialized with root tool with tool ID `0`. `_root`
points to root tool directly.
"""
self._root = tools.RootTool(id=0)
self._tools = {0: self._root}
self._used_ids = {0}
def insert_tool(
self,
tool_choice: str,
input_ids: Optional[Union[list[int], int]] = None,
output_ids: Optional[Union[list[int], int]] = None,
coordinates: Optional[tuple[int, int]] = None,
) -> tools.Tool:
"""Inserts a new tool to the current workflow.
Args:
tool_choice (str): determines what tool is created (based on the
available choices defined within the Workflow class).
input_ids (list[int], int]): starting input or inputs for the tool
identified by their IDs. Defaults to None.
output_ids (list[int], int): starting output or outputs for the tool
identified by their IDs. Defaults to None.
coordinates (tuple[int, int]): coordinates for the tool on canvas.
Defaults to None.
Raises:
workflow_exceptions.ToolNotAvailable: indicates that provided string
does not refer to an available tool from the Workflow class.
Returns:
tools.Tool: instance of a Tool's class.
"""
try:
tool_class = self.TOOL_CHOICES[tool_choice]
except KeyError:
raise workflow_exceptions.ToolNotAvailable
next_id = self._get_next_tool_id()
tool = tool_class(id=next_id)
self._tools[next_id] = tool
self._add_tool_id(next_id)
if input_ids is not None:
self.add_tool_input(tool_id=tool.id, input_ids=input_ids)
if output_ids is not None:
output_ids = self._clean_tool_ids(output_ids)
for output_id in output_ids:
self.add_tool_input(tool_id=output_id, input_ids=tool.id)
if coordinates is not None:
self.set_tool_coordinates(tool_id=tool.id, coordinates=coordinates)
return tool
def remove_tool(self, tool_ids: Union[list[int], int]) -> None:
"""Removes existing tool from the current workflow.
Removes the tool from the workflow and updates inputs and outputs of the
linked tool instances.
Args:
tool_ids (list[int], int): tool ID or IDs that ought to be removed.
Raises:
workflow_exceptions.RootCannotBeDeleted: indicates that selected
tool for removal is a root which cannot be deleted.
"""
tool_ids = self._clean_tool_ids(tool_ids)
for tool_id in tool_ids:
tool = self._get_tool_by_id(tool_id)
if tool.is_root:
raise workflow_exceptions.RootCannotBeDeleted
# remove tool from linked tools' inputs
tool_outputs = tool.outputs
for output_id in tool_outputs:
self.remove_tool_input(tool_id=output_id, input_ids=tool.id)
# remove tool from linked tools' outputs
tool_inputs = tool.inputs
for input_id in tool_inputs:
self.remove_tool_input(tool_id=tool.id, input_ids=input_id)
del self._tools[tool_id]
def add_tool_input(
self, tool_id: int, input_ids: Union[list[int], int]
) -> tools.Tool:
"""Adds new input(s) for the tool existing in the current workflow.
Args:
tool_id (int): tool ID to which input(s) should be added.
input_ids (list[int], int]): input(s) to be added to the tool
identified by their IDs.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
input_ids = self._clean_tool_ids(input_ids)
for input_id in input_ids:
tool.add_input(input_id)
self._tools[input_id].add_output(tool_id)
return tool
def remove_tool_input(
self, tool_id: int, input_ids: Union[list[int], int]
) -> tools.Tool:
"""Removes input(s) from the tool existing in the current workflow.
Args:
tool_id (int): tool ID from which input(s) should be removed.
input_ids (list[int], int]): input(s) to be removed from the tool
identified by their IDs.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
input_ids = self._clean_tool_ids(input_ids)
for input_id in input_ids:
tool.remove_input(input_id)
self._tools[input_id].remove_output(tool_id)
return tool
def set_tool_config(self, tool_id: int, data: dict) -> tools.Tool:
"""Sets tool's config to passed data dict.
Args:
tool_id (int): tool ID for which config should be set.
data (dict): dict of parameters for given tool.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
tool.config = data
return tool
def set_tool_coordinates(
self, tool_id: int, coordinates: Optional[tuple[int, int]] = None
) -> tools.Tool:
"""Sets (x, y) coordinates for the tool existing in the current workflow.
If no coordinates are passed to this method, default coordinates will be
calculated using `_get_default_coordinates()` internal method.
Args:
tool_id (int): tool ID for which coordinates are to be set.
coordinates (tuple[int, int]): tuple of (x, y) coordinates.
Defaults to None.
Returns:
tools.Tool: instance of a Tool's class.
"""
# I need to decide where to put a check if coordinates will fit a canvas
tool = self._get_tool_by_id(tool_id)
coordinates = (
coordinates if coordinates is not None else self._get_default_coordinates()
)
tool.coordinates = coordinates
return tool
def _get_default_coordinates(self) -> tuple[int, int]:
# might require more sophisticated logic in the future
return (0, 0)
def _get_tool_by_id(self, tool_id: int) -> tools.Tool:
"""Returns an instance of a Tool class selected by its ID.
Args:
tool_id (int): tool ID.
Raises:
workflow_exceptions.ToolDoesNotExist: indicates that for provided ID
there is no tool in this workflow.
Returns:
tools.Tool: instance of a Tool's class.
"""
try:
tool = self._tools[tool_id]
except KeyError:
raise workflow_exceptions.ToolDoesNotExist
return tool
def _clean_tool_ids(self, tool_ids: Union[list[int], int]) -> list[int]:
"""Returns a validated list of tool ID(s).
Checks whether passed tool ID(s) exist in the current workflow
and returns the list of tool IDs. If at least one of the provided tool
IDs is not found, it raises an exception.
Args:
tool_ids (list[int], int): tool ID(s) to be cleaned.
Raises:
workflow_exceptions.ToolDoesNotExist: indicates that at least one of
the provided tool IDs is not present in the current workflow.
Returns:
list[int]: list of checked tool IDs.
"""
cleaned_tool_ids = (
list(set(tool_ids)) if isinstance(tool_ids, list) else [tool_ids]
)
if any(tool_id not in self._tools for tool_id in cleaned_tool_ids):
raise workflow_exceptions.ToolDoesNotExist
return cleaned_tool_ids
def _add_tool_id(self, tool_id: int) -> None:
"""Adds an ID to the used ID pool.
Args:
tool_id (int): ID to be added to the used ID pool.
"""
self._used_ids.add(tool_id)
def _get_next_tool_id(self) -> int:
"""Returns a next available ID to be used for a tool instance.
Returns:
int: next available tool ID.
"""
return max(self._used_ids) + 1
def _build_flow(self) -> None:
NotImplementedError
def __len__(self) -> int:
return len(self._tools) - 1
| [
"tools.tools.RootTool"
] | [((916, 936), 'tools.tools.RootTool', 'tools.RootTool', ([], {'id': '(0)'}), '(id=0)\n', (930, 936), False, 'from tools import tools\n')] |
from django.apps import apps
from django.test import override_settings
from wagtail_live.signals import live_page_update
def test_live_page_update_signal_receivers():
assert len(live_page_update.receivers) == 0
@override_settings(
WAGTAIL_LIVE_PUBLISHER="tests.testapp.publishers.DummyWebsocketPublisher"
)
def test_live_page_update_signal_receivers_websocket():
app_config = apps.get_app_config("wagtail_live")
app_config.ready()
try:
# Receiver should be connected, no IndexError
receiver = live_page_update.receivers[0]
finally:
live_page_update.disconnect(receiver)
| [
"django.test.override_settings",
"django.apps.apps.get_app_config",
"wagtail_live.signals.live_page_update.disconnect"
] | [((221, 318), 'django.test.override_settings', 'override_settings', ([], {'WAGTAIL_LIVE_PUBLISHER': '"""tests.testapp.publishers.DummyWebsocketPublisher"""'}), "(WAGTAIL_LIVE_PUBLISHER=\n 'tests.testapp.publishers.DummyWebsocketPublisher')\n", (238, 318), False, 'from django.test import override_settings\n'), ((393, 428), 'django.apps.apps.get_app_config', 'apps.get_app_config', (['"""wagtail_live"""'], {}), "('wagtail_live')\n", (412, 428), False, 'from django.apps import apps\n'), ((586, 623), 'wagtail_live.signals.live_page_update.disconnect', 'live_page_update.disconnect', (['receiver'], {}), '(receiver)\n', (613, 623), False, 'from wagtail_live.signals import live_page_update\n')] |
# -*- coding: utf-8 -*-
"""
Script Name:
Author: <NAME>/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import os
from PySide2.QtWidgets import (QFrame, QStyle, QAbstractItemView, QSizePolicy, QLineEdit, QPlainTextEdit,
QGraphicsItem, QGraphicsView, QGraphicsScene, QRubberBand, QCalendarWidget, )
from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime
from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor
SingleSelection = QCalendarWidget.SingleSelection
NoSelection = QCalendarWidget.NoSelection
SingleLetterDay = QCalendarWidget.SingleLetterDayNames
ShortDay = QCalendarWidget.ShortDayNames
LongDay = QCalendarWidget.LongDayNames
NoHoriHeader = QCalendarWidget.NoHorizontalHeader
NoVertHeader = QCalendarWidget.NoVerticalHeader
IsoWeekNum = QCalendarWidget.ISOWeekNumbers
SelectMode = QCalendarWidget.SelectionMode
HoriHeaderFm = QCalendarWidget.HorizontalHeaderFormat
VertHeaderFm = QCalendarWidget.VerticalHeaderFormat
DayOfWeek = Qt.DayOfWeek
Sunday = Qt.Sunday
Monday = Qt.Monday
Tuesday = Qt.Tuesday
Wednesday = Qt.Wednesday
Thursday = Qt.Thursday
Friday = Qt.Friday
Saturday = Qt.Saturday
ICONSIZE = 32
ICONBUFFER = -1
BTNTAGSIZE = QSize(87, 20)
TAGBTNSIZE = QSize(87-1, 20-1)
BTNICONSIZE = QSize(ICONSIZE, ICONSIZE)
ICONBTNSIZE = QSize(ICONSIZE+ICONBUFFER, ICONSIZE+ICONBUFFER)
DAMG_LOGO_COLOR = QColor(0, 114, 188, 255)
# Basic color
GlobalColor = Qt.GlobalColor
WHITE = QColor(Qt.white)
LIGHTGRAY = QColor(Qt.lightGray)
GRAY = QColor(Qt.gray)
DARKGRAY = QColor(Qt.darkGray)
BLACK = QColor(Qt.black)
RED = QColor(Qt.red)
GREEN = QColor(Qt.green)
BLUE = QColor(Qt.blue)
DARKRED = QColor(Qt.darkRed)
DARKGREEN = QColor(Qt.darkGreen)
DARKBLUE = QColor(Qt.darkBlue)
CYAN = QColor(Qt.cyan)
MAGENTA = QColor(Qt.magenta)
YELLOW = QColor(Qt.yellow)
DARKCYAN = QColor(Qt.darkCyan)
DARKMAGENTA = QColor(Qt.darkMagenta)
DARKYELLOW = QColor(Qt.darkYellow)
# Dark Palette color
Color_BACKGROUND_LIGHT = QColor('#505F69')
COLOR_BACKGROUND_NORMAL = QColor('#32414B')
COLOR_BACKGROUND_DARK = QColor('#19232D')
COLOR_FOREGROUND_LIGHT = QColor('#F0F0F0')
COLOR_FOREGROUND_NORMAL = QColor('#AAAAAA')
COLOR_FOREGROUND_DARK = QColor('#787878')
COLOR_SELECTION_LIGHT = QColor('#148CD2')
COLOR_SELECTION_NORMAL = QColor('#1464A0')
COLOR_SELECTION_DARK = QColor('#14506E')
# Nice color
blush = QColor(246, 202, 203, 255)
petal = QColor(247, 170, 189, 255)
petunia = QColor(231, 62, 151, 255)
deep_pink = QColor(229, 2, 120, 255)
melon = QColor(241, 118, 110, 255)
pomegranate = QColor(178, 27, 32, 255)
poppy_red = QColor(236, 51, 39, 255)
orange_red = QColor(240, 101, 53, 255)
olive = QColor(174, 188, 43, 255)
spring = QColor(227, 229, 121, 255)
yellow = QColor(255, 240, 29, 255)
mango = QColor(254, 209, 26, 255)
cantaloupe = QColor(250, 176, 98, 255)
tangelo = QColor(247, 151, 47, 255)
burnt_orange = QColor(236, 137, 36, 255)
bright_orange = QColor(242, 124, 53, 255)
moss = QColor(176, 186, 39, 255)
sage = QColor(212, 219, 145, 255)
apple = QColor(178, 215, 140, 255)
grass = QColor(111, 178, 68, 255)
forest = QColor(69, 149, 62, 255)
peacock = QColor(21, 140, 167, 255)
teal = QColor(24, 157, 193, 255)
aqua = QColor(153, 214, 218, 255)
violet = QColor(55, 52, 144, 255)
deep_blue = QColor(15, 86, 163, 255)
hydrangea = QColor(150, 191, 229, 255)
sky = QColor(139, 210, 244, 255)
dusk = QColor(16, 102, 162, 255)
midnight = QColor(14, 90, 131, 255)
seaside = QColor(87, 154, 188, 255)
poolside = QColor(137, 203, 225, 255)
eggplant = QColor(86, 5, 79, 255)
lilac = QColor(222, 192, 219, 255)
chocolate = QColor(87, 43, 3, 255)
blackout = QColor(19, 17, 15, 255)
stone = QColor(125, 127, 130, 255)
gravel = QColor(181, 182, 185, 255)
pebble = QColor(217, 212, 206, 255)
sand = QColor(185, 172, 151, 255)
ignoreARM = Qt.IgnoreAspectRatio
scrollAsNeed = Qt.ScrollBarAsNeeded
scrollOff = Qt.ScrollBarAlwaysOff
scrollOn = Qt.ScrollBarAlwaysOn
SiPoMin = QSizePolicy.Minimum # Size policy
SiPoMax = QSizePolicy.Maximum
SiPoExp = QSizePolicy.Expanding
SiPoPre = QSizePolicy.Preferred
SiPoIgn = QSizePolicy.Ignored
frameStyle = QFrame.Sunken | QFrame.Panel
center = Qt.AlignCenter # Alignment
right = Qt.AlignRight
left = Qt.AlignLeft
top = Qt.AlignTop
bottom = Qt.AlignBottom
hori = Qt.Horizontal
vert = Qt.Vertical
dockL = Qt.LeftDockWidgetArea # Docking area
dockR = Qt.RightDockWidgetArea
dockT = Qt.TopDockWidgetArea
dockB = Qt.BottomDockWidgetArea
dockAll = Qt.AllDockWidgetAreas
datetTimeStamp = QDateTime.currentDateTime().toString("hh:mm - dd MMMM yy") # datestamp
PRS = dict(password = QLineEdit.Password, center = center , left = left , right = right,
spmax = SiPoMax , sppre = SiPoPre, spexp = SiPoExp, spign = SiPoIgn,
expanding = QSizePolicy.Expanding, spmin = SiPoMin,)
# -------------------------------------------------------------------------------------------------------------
""" Event """
NO_WRAP = QPlainTextEdit.NoWrap
NO_FRAME = QPlainTextEdit.NoFrame
ELIDE_RIGHT = Qt.ElideRight
ELIDE_NONE = Qt.ElideNone
# -------------------------------------------------------------------------------------------------------------
""" Window state """
StateNormal = Qt.WindowNoState
StateMax = Qt.WindowMaximized
StateMin = Qt.WindowMinimized
State_Selected = QStyle.State_Selected
# -------------------------------------------------------------------------------------------------------------
""" Nodegraph setting variables """
ASPEC_RATIO = Qt.KeepAspectRatio
SMOOTH_TRANS = Qt.SmoothTransformation
SCROLLBAROFF = Qt.ScrollBarAlwaysOff # Scrollbar
SCROLLBARON = Qt.ScrollBarAlwaysOn
SCROLLBARNEED = Qt.ScrollBarAsNeeded
WORD_WRAP = Qt.TextWordWrap
INTERSECT_ITEM_SHAPE = Qt.IntersectsItemShape
CONTAIN_ITEM_SHAPE = Qt.ContainsItemShape
MATCH_EXACTLY = Qt.MatchExactly
DRAG_ONLY = QAbstractItemView.DragOnly
# -------------------------------------------------------------------------------------------------------------
""" UI flags """
ITEMENABLE = Qt.ItemIsEnabled
ITEMMOVEABLE = QGraphicsItem.ItemIsMovable
ITEMSENDGEOCHANGE = QGraphicsItem.ItemSendsGeometryChanges
ITEMSCALECHANGE = QGraphicsItem.ItemScaleChange
ITEMPOSCHANGE = QGraphicsItem.ItemPositionChange
DEVICECACHE = QGraphicsItem.DeviceCoordinateCache
SELECTABLE = QGraphicsItem.ItemIsSelectable
MOVEABLE = QGraphicsItem.ItemIsMovable
FOCUSABLE = QGraphicsItem.ItemIsFocusable
PANEL = QGraphicsItem.ItemIsPanel
NOINDEX = QGraphicsScene.NoIndex # Scene
RUBBER_DRAG = QGraphicsView.RubberBandDrag # Viewer
RUBBER_REC = QRubberBand.Rectangle
POS_CHANGE = QGraphicsItem.ItemPositionChange
NODRAG = QGraphicsView.NoDrag
NOFRAME = QGraphicsView.NoFrame
ANCHOR_NO = QGraphicsView.NoAnchor
ANCHOR_UNDERMICE = QGraphicsView.AnchorUnderMouse
ANCHOR_CENTER = QGraphicsView.AnchorViewCenter
CACHE_BG = QGraphicsView.CacheBackground
UPDATE_VIEWRECT = QGraphicsView.BoundingRectViewportUpdate
UPDATE_FULLVIEW = QGraphicsView.FullViewportUpdate
UPDATE_SMARTVIEW = QGraphicsView.SmartViewportUpdate
UPDATE_BOUNDINGVIEW = QGraphicsView.BoundingRectViewportUpdate
UPDATE_MINIMALVIEW = QGraphicsView.MinimalViewportUpdate
STAY_ON_TOP = Qt.WindowStaysOnTopHint
STRONG_FOCUS = Qt.StrongFocus
SPLASHSCREEN = Qt.SplashScreen
FRAMELESS = Qt.FramelessWindowHint
CUSTOMIZE = Qt.CustomizeWindowHint
CLOSEBTN = Qt.WindowCloseButtonHint
MINIMIZEBTN = Qt.WindowMinimizeButtonHint
AUTO_COLOR = Qt.AutoColor
# -------------------------------------------------------------------------------------------------------------
""" Drawing """
ANTIALIAS = QPainter.Antialiasing # Painter
ANTIALIAS_TEXT = QPainter.TextAntialiasing
ANTIALIAS_HIGH_QUALITY = QPainter.HighQualityAntialiasing
SMOOTH_PIXMAP_TRANSFORM = QPainter.SmoothPixmapTransform
NON_COSMETIC_PEN = QPainter.NonCosmeticDefaultPen
NO_BRUSH = Qt.NoBrush # Brush
NO_PEN = Qt.NoPen # Pen
ROUND_CAP = Qt.RoundCap
ROUND_JOIN = Qt.RoundJoin
PATTERN_SOLID = Qt.SolidPattern # Pattern
LINE_SOLID = Qt.SolidLine # Line
LINE_DASH = Qt.DashLine
LINE_DOT = Qt.DotLine
LINE_DASH_DOT = Qt.DashDotDotLine
TRANSPARENT = Qt.transparent
TRANSPARENT_MODE = Qt.TransparentMode
# -------------------------------------------------------------------------------------------------------------
""" Meta Object """
QUEUEDCONNECTION = Qt.QueuedConnection
# -------------------------------------------------------------------------------------------------------------
""" Keyboard and cursor """
TEXT_BOLD = QFont.Bold
TEXT_NORMAL = QFont.Normal
MONO_SPACE = QFont.Monospace
TEXT_MENEOMIC = Qt.TextShowMnemonic
KEY_PRESS = QEvent.KeyPress
KEY_RELEASE = QEvent.KeyRelease
KEY_ALT = Qt.Key_Alt
KEY_DEL = Qt.Key_Delete
KEY_TAB = Qt.Key_Tab
KEY_SHIFT = Qt.Key_Shift
KEY_CTRL = Qt.Key_Control
KEY_BACKSPACE = Qt.Key_Backspace
KEY_ENTER = Qt.Key_Enter
KEY_RETURN = Qt.Key_Return
KEY_F = Qt.Key_F
KEY_S = Qt.Key_S
ALT_MODIFIER = Qt.AltModifier
CTRL_MODIFIER = Qt.ControlModifier
SHIFT_MODIFIER = Qt.ShiftModifier
NO_MODIFIER = Qt.NoModifier
CLOSE_HAND_CUSOR = Qt.ClosedHandCursor
SIZEF_CURSOR = Qt.SizeFDiagCursor
windows = os.name = 'nt'
DMK = Qt.AltModifier if windows else CTRL_MODIFIER
MOUSE_LEFT = Qt.LeftButton
MOUSE_RIGHT = Qt.RightButton
MOUSE_MIDDLE = Qt.MiddleButton
NO_BUTTON = Qt.NoButton
ARROW_NONE = Qt.NoArrow # Cursor
CURSOR_ARROW = Qt.ArrowCursor
CURSOR_SIZEALL = Qt.SizeAllCursor
MOVE_OPERATION = QTextCursor.MoveOperation
MOVE_ANCHOR = QTextCursor.MoveMode.MoveAnchor
KEEP_ANCHOR = QTextCursor.MoveMode.KeepAnchor
ACTION_MOVE = Qt.MoveAction # Action
ignoreARM = Qt.IgnoreAspectRatio
# -------------------------------------------------------------------------------------------------------------
""" Set number """
RELATIVE_SIZE = Qt.RelativeSize # Size
INI = QSettings.IniFormat
NATIVE = QSettings.NativeFormat
INVALID = QSettings.InvalidFormat
SYS_SCOPE = QSettings.SystemScope
USER_SCOPE = QSettings.UserScope
# -------------------------------------------------------------------------------------------------------------
# Created by <NAME> on 5/6/2020 - 3:13 AM
# ยฉ 2017 - 2020 DAMGteam. All rights reserved | [
"PySide2.QtGui.QColor",
"PySide2.QtCore.QDateTime.currentDateTime",
"PySide2.QtCore.QSize"
] | [((1731, 1744), 'PySide2.QtCore.QSize', 'QSize', (['(87)', '(20)'], {}), '(87, 20)\n', (1736, 1744), False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((1775, 1796), 'PySide2.QtCore.QSize', 'QSize', (['(87 - 1)', '(20 - 1)'], {}), '(87 - 1, 20 - 1)\n', (1780, 1796), False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((1823, 1848), 'PySide2.QtCore.QSize', 'QSize', (['ICONSIZE', 'ICONSIZE'], {}), '(ICONSIZE, ICONSIZE)\n', (1828, 1848), False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((1879, 1930), 'PySide2.QtCore.QSize', 'QSize', (['(ICONSIZE + ICONBUFFER)', '(ICONSIZE + ICONBUFFER)'], {}), '(ICONSIZE + ICONBUFFER, ICONSIZE + ICONBUFFER)\n', (1884, 1930), False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n'), ((1959, 1983), 'PySide2.QtGui.QColor', 'QColor', (['(0)', '(114)', '(188)', '(255)'], {}), '(0, 114, 188, 255)\n', (1965, 1983), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2076, 2092), 'PySide2.QtGui.QColor', 'QColor', (['Qt.white'], {}), '(Qt.white)\n', (2082, 2092), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2123, 2143), 'PySide2.QtGui.QColor', 'QColor', (['Qt.lightGray'], {}), '(Qt.lightGray)\n', (2129, 2143), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2174, 2189), 'PySide2.QtGui.QColor', 'QColor', (['Qt.gray'], {}), '(Qt.gray)\n', (2180, 2189), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2220, 2239), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkGray'], {}), '(Qt.darkGray)\n', (2226, 2239), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2270, 2286), 'PySide2.QtGui.QColor', 'QColor', (['Qt.black'], {}), '(Qt.black)\n', (2276, 2286), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2317, 2331), 'PySide2.QtGui.QColor', 'QColor', (['Qt.red'], {}), '(Qt.red)\n', (2323, 2331), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2362, 2378), 'PySide2.QtGui.QColor', 'QColor', (['Qt.green'], {}), '(Qt.green)\n', (2368, 2378), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2409, 2424), 'PySide2.QtGui.QColor', 'QColor', (['Qt.blue'], {}), '(Qt.blue)\n', (2415, 2424), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2455, 2473), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkRed'], {}), '(Qt.darkRed)\n', (2461, 2473), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2504, 2524), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkGreen'], {}), '(Qt.darkGreen)\n', (2510, 2524), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2555, 2574), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkBlue'], {}), '(Qt.darkBlue)\n', (2561, 2574), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2605, 2620), 'PySide2.QtGui.QColor', 'QColor', (['Qt.cyan'], {}), '(Qt.cyan)\n', (2611, 2620), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2651, 2669), 'PySide2.QtGui.QColor', 'QColor', (['Qt.magenta'], {}), '(Qt.magenta)\n', (2657, 2669), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2700, 2717), 'PySide2.QtGui.QColor', 'QColor', (['Qt.yellow'], {}), '(Qt.yellow)\n', (2706, 2717), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2748, 2767), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkCyan'], {}), '(Qt.darkCyan)\n', (2754, 2767), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2798, 2820), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkMagenta'], {}), '(Qt.darkMagenta)\n', (2804, 2820), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2851, 2872), 'PySide2.QtGui.QColor', 'QColor', (['Qt.darkYellow'], {}), '(Qt.darkYellow)\n', (2857, 2872), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2925, 2942), 'PySide2.QtGui.QColor', 'QColor', (['"""#505F69"""'], {}), "('#505F69')\n", (2931, 2942), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((2973, 2990), 'PySide2.QtGui.QColor', 'QColor', (['"""#32414B"""'], {}), "('#32414B')\n", (2979, 2990), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3021, 3038), 'PySide2.QtGui.QColor', 'QColor', (['"""#19232D"""'], {}), "('#19232D')\n", (3027, 3038), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3070, 3087), 'PySide2.QtGui.QColor', 'QColor', (['"""#F0F0F0"""'], {}), "('#F0F0F0')\n", (3076, 3087), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3118, 3135), 'PySide2.QtGui.QColor', 'QColor', (['"""#AAAAAA"""'], {}), "('#AAAAAA')\n", (3124, 3135), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3166, 3183), 'PySide2.QtGui.QColor', 'QColor', (['"""#787878"""'], {}), "('#787878')\n", (3172, 3183), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3215, 3232), 'PySide2.QtGui.QColor', 'QColor', (['"""#148CD2"""'], {}), "('#148CD2')\n", (3221, 3232), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3263, 3280), 'PySide2.QtGui.QColor', 'QColor', (['"""#1464A0"""'], {}), "('#1464A0')\n", (3269, 3280), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3311, 3328), 'PySide2.QtGui.QColor', 'QColor', (['"""#14506E"""'], {}), "('#14506E')\n", (3317, 3328), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3373, 3399), 'PySide2.QtGui.QColor', 'QColor', (['(246)', '(202)', '(203)', '(255)'], {}), '(246, 202, 203, 255)\n', (3379, 3399), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3430, 3456), 'PySide2.QtGui.QColor', 'QColor', (['(247)', '(170)', '(189)', '(255)'], {}), '(247, 170, 189, 255)\n', (3436, 3456), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3487, 3512), 'PySide2.QtGui.QColor', 'QColor', (['(231)', '(62)', '(151)', '(255)'], {}), '(231, 62, 151, 255)\n', (3493, 3512), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3543, 3567), 'PySide2.QtGui.QColor', 'QColor', (['(229)', '(2)', '(120)', '(255)'], {}), '(229, 2, 120, 255)\n', (3549, 3567), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3598, 3624), 'PySide2.QtGui.QColor', 'QColor', (['(241)', '(118)', '(110)', '(255)'], {}), '(241, 118, 110, 255)\n', (3604, 3624), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3655, 3679), 'PySide2.QtGui.QColor', 'QColor', (['(178)', '(27)', '(32)', '(255)'], {}), '(178, 27, 32, 255)\n', (3661, 3679), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3710, 3734), 'PySide2.QtGui.QColor', 'QColor', (['(236)', '(51)', '(39)', '(255)'], {}), '(236, 51, 39, 255)\n', (3716, 3734), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3765, 3790), 'PySide2.QtGui.QColor', 'QColor', (['(240)', '(101)', '(53)', '(255)'], {}), '(240, 101, 53, 255)\n', (3771, 3790), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3821, 3846), 'PySide2.QtGui.QColor', 'QColor', (['(174)', '(188)', '(43)', '(255)'], {}), '(174, 188, 43, 255)\n', (3827, 3846), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3877, 3903), 'PySide2.QtGui.QColor', 'QColor', (['(227)', '(229)', '(121)', '(255)'], {}), '(227, 229, 121, 255)\n', (3883, 3903), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3934, 3959), 'PySide2.QtGui.QColor', 'QColor', (['(255)', '(240)', '(29)', '(255)'], {}), '(255, 240, 29, 255)\n', (3940, 3959), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((3990, 4015), 'PySide2.QtGui.QColor', 'QColor', (['(254)', '(209)', '(26)', '(255)'], {}), '(254, 209, 26, 255)\n', (3996, 4015), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4046, 4071), 'PySide2.QtGui.QColor', 'QColor', (['(250)', '(176)', '(98)', '(255)'], {}), '(250, 176, 98, 255)\n', (4052, 4071), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4102, 4127), 'PySide2.QtGui.QColor', 'QColor', (['(247)', '(151)', '(47)', '(255)'], {}), '(247, 151, 47, 255)\n', (4108, 4127), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4158, 4183), 'PySide2.QtGui.QColor', 'QColor', (['(236)', '(137)', '(36)', '(255)'], {}), '(236, 137, 36, 255)\n', (4164, 4183), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4214, 4239), 'PySide2.QtGui.QColor', 'QColor', (['(242)', '(124)', '(53)', '(255)'], {}), '(242, 124, 53, 255)\n', (4220, 4239), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4270, 4295), 'PySide2.QtGui.QColor', 'QColor', (['(176)', '(186)', '(39)', '(255)'], {}), '(176, 186, 39, 255)\n', (4276, 4295), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4326, 4352), 'PySide2.QtGui.QColor', 'QColor', (['(212)', '(219)', '(145)', '(255)'], {}), '(212, 219, 145, 255)\n', (4332, 4352), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4383, 4409), 'PySide2.QtGui.QColor', 'QColor', (['(178)', '(215)', '(140)', '(255)'], {}), '(178, 215, 140, 255)\n', (4389, 4409), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4440, 4465), 'PySide2.QtGui.QColor', 'QColor', (['(111)', '(178)', '(68)', '(255)'], {}), '(111, 178, 68, 255)\n', (4446, 4465), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4496, 4520), 'PySide2.QtGui.QColor', 'QColor', (['(69)', '(149)', '(62)', '(255)'], {}), '(69, 149, 62, 255)\n', (4502, 4520), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4551, 4576), 'PySide2.QtGui.QColor', 'QColor', (['(21)', '(140)', '(167)', '(255)'], {}), '(21, 140, 167, 255)\n', (4557, 4576), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4607, 4632), 'PySide2.QtGui.QColor', 'QColor', (['(24)', '(157)', '(193)', '(255)'], {}), '(24, 157, 193, 255)\n', (4613, 4632), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4663, 4689), 'PySide2.QtGui.QColor', 'QColor', (['(153)', '(214)', '(218)', '(255)'], {}), '(153, 214, 218, 255)\n', (4669, 4689), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4720, 4744), 'PySide2.QtGui.QColor', 'QColor', (['(55)', '(52)', '(144)', '(255)'], {}), '(55, 52, 144, 255)\n', (4726, 4744), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4775, 4799), 'PySide2.QtGui.QColor', 'QColor', (['(15)', '(86)', '(163)', '(255)'], {}), '(15, 86, 163, 255)\n', (4781, 4799), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4830, 4856), 'PySide2.QtGui.QColor', 'QColor', (['(150)', '(191)', '(229)', '(255)'], {}), '(150, 191, 229, 255)\n', (4836, 4856), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4887, 4913), 'PySide2.QtGui.QColor', 'QColor', (['(139)', '(210)', '(244)', '(255)'], {}), '(139, 210, 244, 255)\n', (4893, 4913), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((4944, 4969), 'PySide2.QtGui.QColor', 'QColor', (['(16)', '(102)', '(162)', '(255)'], {}), '(16, 102, 162, 255)\n', (4950, 4969), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5000, 5024), 'PySide2.QtGui.QColor', 'QColor', (['(14)', '(90)', '(131)', '(255)'], {}), '(14, 90, 131, 255)\n', (5006, 5024), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5055, 5080), 'PySide2.QtGui.QColor', 'QColor', (['(87)', '(154)', '(188)', '(255)'], {}), '(87, 154, 188, 255)\n', (5061, 5080), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5111, 5137), 'PySide2.QtGui.QColor', 'QColor', (['(137)', '(203)', '(225)', '(255)'], {}), '(137, 203, 225, 255)\n', (5117, 5137), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5168, 5190), 'PySide2.QtGui.QColor', 'QColor', (['(86)', '(5)', '(79)', '(255)'], {}), '(86, 5, 79, 255)\n', (5174, 5190), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5221, 5247), 'PySide2.QtGui.QColor', 'QColor', (['(222)', '(192)', '(219)', '(255)'], {}), '(222, 192, 219, 255)\n', (5227, 5247), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5278, 5300), 'PySide2.QtGui.QColor', 'QColor', (['(87)', '(43)', '(3)', '(255)'], {}), '(87, 43, 3, 255)\n', (5284, 5300), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5331, 5354), 'PySide2.QtGui.QColor', 'QColor', (['(19)', '(17)', '(15)', '(255)'], {}), '(19, 17, 15, 255)\n', (5337, 5354), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5385, 5411), 'PySide2.QtGui.QColor', 'QColor', (['(125)', '(127)', '(130)', '(255)'], {}), '(125, 127, 130, 255)\n', (5391, 5411), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5442, 5468), 'PySide2.QtGui.QColor', 'QColor', (['(181)', '(182)', '(185)', '(255)'], {}), '(181, 182, 185, 255)\n', (5448, 5468), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5499, 5525), 'PySide2.QtGui.QColor', 'QColor', (['(217)', '(212)', '(206)', '(255)'], {}), '(217, 212, 206, 255)\n', (5505, 5525), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((5556, 5582), 'PySide2.QtGui.QColor', 'QColor', (['(185)', '(172)', '(151)', '(255)'], {}), '(185, 172, 151, 255)\n', (5562, 5582), False, 'from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor\n'), ((6890, 6917), 'PySide2.QtCore.QDateTime.currentDateTime', 'QDateTime.currentDateTime', ([], {}), '()\n', (6915, 6917), False, 'from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime\n')] |
"""Contains tests for finpack/core/cli.py
"""
__copyright__ = "Copyright (C) 2021 <NAME>"
import os
import unittest
from importlib import metadata
from docopt import docopt
from finpack.core import cli
class TestCli(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.DATA_DIR = "temp"
os.mkdir(cls.DATA_DIR)
@classmethod
def tearDownClass(cls):
os.rmdir(cls.DATA_DIR)
def test_version_option(self):
argv = ["--version"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["--version"])
def test_init_no_options(self):
argv = ["init"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
def test_init_with_filepath_option(self):
argv = ["init", "--filepath=temp/data.csv"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertEqual(args["--filepath"], "temp/data.csv")
def test_init_with_sample_dataset_option(self):
argv = ["init", "--sample-dataset"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertTrue(args["--sample-dataset"])
def test_init_with_overwrite_option(self):
argv = ["init", "--overwrite"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertTrue(args["--overwrite"])
def test_balsheet_no_option(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
def test_balsheet_with_filepath_option(self):
argv = ["balsheet", "--filepath=temp/data2.csv"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--filepath"], "temp/data2.csv")
def test_balsheet_with_levels_default(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--levels"], "3")
def test_balsheet_with_levels_option(self):
argv = ["balsheet", "--levels=2"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--levels"], "2")
def test_balsheet_with_date_default(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--date"], "today")
def test_balsheet_with_date_option(self):
argv = ["balsheet", "--date=2021-12-01"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--date"], "2021-12-01")
| [
"os.rmdir",
"os.mkdir",
"docopt.docopt"
] | [((322, 344), 'os.mkdir', 'os.mkdir', (['cls.DATA_DIR'], {}), '(cls.DATA_DIR)\n', (330, 344), False, 'import os\n'), ((399, 421), 'os.rmdir', 'os.rmdir', (['cls.DATA_DIR'], {}), '(cls.DATA_DIR)\n', (407, 421), False, 'import os\n'), ((503, 533), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (509, 533), False, 'from docopt import docopt\n'), ((655, 685), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (661, 685), False, 'from docopt import docopt\n'), ((840, 870), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (846, 870), False, 'from docopt import docopt\n'), ((1085, 1115), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (1091, 1115), False, 'from docopt import docopt\n'), ((1308, 1338), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (1314, 1338), False, 'from docopt import docopt\n'), ((1507, 1537), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (1513, 1537), False, 'from docopt import docopt\n'), ((1705, 1735), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (1711, 1735), False, 'from docopt import docopt\n'), ((1936, 1966), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (1942, 1966), False, 'from docopt import docopt\n'), ((2165, 2195), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (2171, 2195), False, 'from docopt import docopt\n'), ((2379, 2409), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (2385, 2409), False, 'from docopt import docopt\n'), ((2615, 2645), 'docopt.docopt', 'docopt', (['cli.__doc__'], {'argv': 'argv'}), '(cli.__doc__, argv=argv)\n', (2621, 2645), False, 'from docopt import docopt\n')] |
"""
Author: <NAME>
"""
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
def affinity_graph(X):
'''
This function returns a numpy array.
'''
ni, nd = X.shape
A = np.zeros((ni, ni))
for i in range(ni):
for j in range(i+1, ni):
dist = ((X[i] - X[j])**2).sum() # compute L2 distance
A[i][j] = dist
A[j][i] = dist # by symmetry
return A
def knn_graph(X, knn=4):
'''
This function returns a numpy array.
'''
ni, nd = X.shape
nbrs = NearestNeighbors(n_neighbors=(knn+1), algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
A = np.zeros((ni, ni))
for dist, ind in zip(distances, indices):
i0 = ind[0]
for i in range(1,knn+1):
d = dist[i]
A[i0, i] = d
A[i, i0] = d # by symmetry
return A
def sparse_affinity_graph(X):
'''
TODO: This function returns a numpy sparse matrix.
'''
ni, nd = X.shape
A = np.zeros((ni, ni))
for i in range(ni):
for j in range(i+1, ni):
dist = ((X[i] - X[j])**2).sum() # compute L2 distance
A[i][j] = dist
A[j][i] = dist # by symmetry
return A
def laplacian_graph(X, mode='affinity', knn=3, eta=0.01, sigma=2.5):
'''
The unnormalized graph Laplacian, L = D โ W.
'''
if mode == 'affinity':
W = affinity_graph(X)
W[abs(W) > eta] = 0
elif mode == 'nearestneighbor':
W = knn_graph(X, knn=knn)
elif mode == 'gaussian':
W = affinity_graph(X)
bandwidth = 2.0*(sigma**2)
W = np.exp(W) / bandwidth
else:
pass
D = np.diag(W.sum(axis=1))
L = D - W
return L
| [
"numpy.exp",
"numpy.zeros",
"sklearn.neighbors.NearestNeighbors"
] | [((208, 226), 'numpy.zeros', 'np.zeros', (['(ni, ni)'], {}), '((ni, ni))\n', (216, 226), True, 'import numpy as np\n'), ((606, 624), 'numpy.zeros', 'np.zeros', (['(ni, ni)'], {}), '((ni, ni))\n', (614, 624), True, 'import numpy as np\n'), ((896, 914), 'numpy.zeros', 'np.zeros', (['(ni, ni)'], {}), '((ni, ni))\n', (904, 914), True, 'import numpy as np\n'), ((492, 552), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(knn + 1)', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=knn + 1, algorithm='ball_tree')\n", (508, 552), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1422, 1431), 'numpy.exp', 'np.exp', (['W'], {}), '(W)\n', (1428, 1431), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from pytest import approx
from pymt.component.grid import GridMixIn
class Port:
def __init__(self, name, uses=None, provides=None):
self._name = name
self._uses = uses or []
self._provides = provides or []
def get_component_name(self):
return self._name
def get_input_item_count(self):
return len(self._uses)
def get_input_item_list(self):
return self._uses
def get_output_item_count(self):
return len(self._provides)
def get_output_item_list(self):
return self._provides
def test_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.input_items == ["invar"]
assert c.output_items == ["outvar"]
def test_no_exchange_items():
class Component(GridMixIn):
def __init__(self):
self._port = Port("test")
super().__init__()
c = Component()
assert c.input_items == []
assert c.output_items == []
def test_raster_1d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (3,)
def get_grid_spacing(self, grid_id):
return (2.0,)
def get_grid_origin(self, grid_id):
return (3.0,)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x("invar") == approx(np.array([3.0, 5.0, 7.0]))
def test_raster_2d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_spacing(self, grid_id):
return (2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-2d", uses=["invar"], provides=["outvar"])
super().__init__()
c = Component()
assert c.name == "test-2d"
assert c.get_grid_type(0) == "RASTER"
assert c.get_x(0) == approx(np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]))
assert c.get_y(0) == approx(np.array([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]))
assert np.all(c.get_connectivity(0) == np.array([0, 1, 4, 3, 1, 2, 5, 4]))
assert np.all(c.get_offset(0) == np.array([4, 8]))
def test_raster_3d():
class RasterPort(Port):
def get_grid_shape(self, grid_id):
return (2, 2, 3)
def get_grid_spacing(self, grid_id):
return (1.0, 2.0, 1.0)
def get_grid_origin(self, grid_id):
return (0.0, 0.0, 0.0)
class Component(GridMixIn):
def __init__(self):
self._port = RasterPort("test-3d", uses=["invar"])
super().__init__()
c = Component()
assert c.get_x(0) == approx(
np.array(
[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]
)
)
assert c.get_y(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]
)
)
assert c.get_z(0) == approx(
np.array(
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]
)
)
def test_rectilinear():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return (0.0, 3.0, 4)
def get_grid_y(self, grid_id):
return (2.0, 7.0)
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
assert c.get_x(0) == approx(np.array([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]))
assert c.get_y(0) == approx(np.array([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]))
def test_structured():
class StructuredPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])
class Component(GridMixIn):
def __init__(self):
self._port = StructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "STRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 1.0, 2.0, 1.0, 2.0, 3.0]))
def test_unstructured():
class UnstructuredPort(Port):
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
return np.array([0.0, 0.0, 1.0, 1.0, 0.0])
def get_grid_connectivity(self, grid_id):
return np.array([0, 1, 3, 2, 4, 3, 1])
def get_grid_offset(self, grid_id):
return np.array([4, 7])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
assert c.get_x(0) == approx(np.array([0.0, 1.0, 0.0, 1.0, 2.0]))
assert c.get_y(0) == approx(np.array([0.0, 0.0, 1.0, 1.0, 0.0]))
def test_get_grid_shape_is_none():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
return None
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_get_grid_shape_raises():
class UnstructuredPort(Port):
def get_grid_shape(self, grid_id):
raise NotImplementedError("get_grid_shape")
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
class Component(GridMixIn):
def __init__(self):
self._port = UnstructuredPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "UNSTRUCTURED"
def test_structured_1d():
class RectilinearPort(Port):
def get_grid_shape(self, grid_id):
return (2, 3)
def get_grid_x(self, grid_id):
return np.array([0.0, 1.0, 2.0])
def get_grid_y(self, grid_id):
raise NotImplementedError("get_grid_y")
def get_grid_z(self, grid_id):
raise NotImplementedError("get_grid_z")
class Component(GridMixIn):
def __init__(self):
self._port = RectilinearPort("test", uses=["invar"])
super().__init__()
c = Component()
assert c.get_grid_type(0) == "RECTILINEAR"
with pytest.raises(IndexError):
c.get_z(0)
| [
"numpy.array",
"pytest.raises"
] | [((7100, 7125), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (7113, 7125), False, 'import pytest\n'), ((1613, 1638), 'numpy.array', 'np.array', (['[3.0, 5.0, 7.0]'], {}), '([3.0, 5.0, 7.0])\n', (1621, 1638), True, 'import numpy as np\n'), ((2214, 2258), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]'], {}), '([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])\n', (2222, 2258), True, 'import numpy as np\n'), ((2292, 2336), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]'], {}), '([[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]])\n', (2300, 2336), True, 'import numpy as np\n'), ((2381, 2415), 'numpy.array', 'np.array', (['[0, 1, 4, 3, 1, 2, 5, 4]'], {}), '([0, 1, 4, 3, 1, 2, 5, 4])\n', (2389, 2415), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.array', 'np.array', (['[4, 8]'], {}), '([4, 8])\n', (2462, 2470), True, 'import numpy as np\n'), ((2974, 3061), 'numpy.array', 'np.array', (['[[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]]]'], {}), '([[[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]], [[0.0, 1.0, 2.0], [0.0, 1.0, \n 2.0]]])\n', (2982, 3061), True, 'import numpy as np\n'), ((3126, 3213), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]]]'], {}), '([[[0.0, 0.0, 0.0], [2.0, 2.0, 2.0]], [[0.0, 0.0, 0.0], [2.0, 2.0, \n 2.0]]])\n', (3134, 3213), True, 'import numpy as np\n'), ((3278, 3365), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]'], {}), '([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, \n 1.0]]])\n', (3286, 3365), True, 'import numpy as np\n'), ((3917, 3961), 'numpy.array', 'np.array', (['[[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]]'], {}), '([[0.0, 3.0, 4.0], [0.0, 3.0, 4.0]])\n', (3925, 3961), True, 'import numpy as np\n'), ((3995, 4039), 'numpy.array', 'np.array', (['[[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]]'], {}), '([[2.0, 2.0, 2.0], [7.0, 7.0, 7.0]])\n', (4003, 4039), True, 'import numpy as np\n'), ((4226, 4266), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])\n', (4234, 4266), True, 'import numpy as np\n'), ((4326, 4366), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 1.0, 2.0, 3.0]'], {}), '([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])\n', (4334, 4366), True, 'import numpy as np\n'), ((4622, 4662), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0, 0.0, 1.0, 2.0])\n', (4630, 4662), True, 'import numpy as np\n'), ((4696, 4736), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 1.0, 2.0, 3.0]'], {}), '([0.0, 1.0, 2.0, 1.0, 2.0, 3.0])\n', (4704, 4736), True, 'import numpy as np\n'), ((4857, 4892), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 2.0])\n', (4865, 4892), True, 'import numpy as np\n'), ((4952, 4987), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 0.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 0.0])\n', (4960, 4987), True, 'import numpy as np\n'), ((5058, 5089), 'numpy.array', 'np.array', (['[0, 1, 3, 2, 4, 3, 1]'], {}), '([0, 1, 3, 2, 4, 3, 1])\n', (5066, 5089), True, 'import numpy as np\n'), ((5154, 5170), 'numpy.array', 'np.array', (['[4, 7]'], {}), '([4, 7])\n', (5162, 5170), True, 'import numpy as np\n'), ((5430, 5465), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 2.0])\n', (5438, 5465), True, 'import numpy as np\n'), ((5499, 5534), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 0.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 0.0])\n', (5507, 5534), True, 'import numpy as np\n'), ((5733, 5758), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (5741, 5758), True, 'import numpy as np\n'), ((6214, 6239), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (6222, 6239), True, 'import numpy as np\n'), ((6656, 6681), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (6664, 6681), True, 'import numpy as np\n')] |
"""Exercise 1
Usage:
$ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --dropout 0.1 0.1 --hidden_layer_sizes 200 100
To know which GPU to use, you can check it with the command
$ nvidia-smi
"""
import argparse
import os
import mlflow
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, models
import warnings
warnings.filterwarnings("ignore")
from auxiliary import process_features, load_dataset, build_columns, log_dir_name
TARGET_COL = 'AdoptionSpeed'
def read_args():
parser = argparse.ArgumentParser(
description='Training a MLP on the petfinder dataset')
# Here you have some examples of classifier parameters. You can add
# more arguments or change these if you need to.
parser.add_argument('--experiment_name', type=str, default='Base model',
help='Name of the experiment, used in mlflow.')
parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str,
help='Directory with the training and test files.')
parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int,
help='Number of hidden units of each hidden layer.')
parser.add_argument('--epochs', default=50, type=int,
help='Number of epochs to train.')
parser.add_argument('--dropout', nargs='+', default=[0.5], type=float,
help='Dropout ratio for every layer.')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of instances in each batch.')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate.')
args = parser.parse_args()
assert len(args.hidden_layer_sizes) == len(args.dropout)
return args
def print_args(args):
print('-------------------------------------------')
print('PARAMS ------------------------------------')
print('-------------------------------------------')
print('--experiment_name ', args.experiment_name)
print('--dataset_dir ', args.dataset_dir)
print('--epochs ', args.epochs)
print('--hidden_layer_sizes', args.hidden_layer_sizes)
print('--dropout ', args.dropout)
print('--batch_size ', args.batch_size)
print('--learning_rate ', args.learning_rate)
print('-------------------------------------------')
def main():
args = read_args()
print_args(args)
experiment_name = args.experiment_name
batch_size = args.batch_size
learning_rate = args.learning_rate
hidden_layer_sizes = args.hidden_layer_sizes
dropout = args.dropout
epochs = args.epochs
### Output directory
dir_name = log_dir_name(args)
print()
print(dir_name)
print()
output_dir = os.path.join('experiments', experiment_name, dir_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir)
nlabels = dataset[TARGET_COL].unique().shape[0]
columns = [
'Gender', 'Color1', 'Vaccinated', 'Dewormed',
'Breed1',
'Age', 'Fee', 'Quantity']
one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns)
# TODO (optional) put these three types of columns in the same dictionary with "column types"
X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns)
direct_features_input_shape = (X_train['direct_features'].shape[1],)
X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns)
###########################################################################################################
### TODO: Shuffle train dataset - Done
###########################################################################################################
shuffle_len = X_train['direct_features'].shape[0]
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_len).batch(batch_size)
###########################################################################################################
dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices(process_features(
test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size)
###########################################################################################################
### TODO: Build the Keras model - Done
###########################################################################################################
tf.keras.backend.clear_session()
# Add one input and one embedding for each embedded column
embedding_layers = []
inputs = []
for embedded_col, max_value in embedded_columns.items():
input_layer = layers.Input(shape=(1,), name=embedded_col)
inputs.append(input_layer)
# Define the embedding layer
embedding_size = int(max_value / 4)
embedding_layers.append(
tf.squeeze(layers.Embedding(input_dim=max_value, output_dim=embedding_size)(input_layer), axis=-2))
print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col))
# Add the direct features already calculated
direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features')
inputs.append(direct_features_input)
# Concatenate everything together
features = layers.concatenate(embedding_layers + [direct_features_input])
denses = []
dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features)
denses.append(dense1)
if len(hidden_layer_sizes) > 1:
for hidden_layer_size in hidden_layer_sizes[1:]:
dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1])
denses.append(dense)
output_layer = layers.Dense(nlabels, activation='softmax')(dense1)
model = models.Model(inputs=inputs, outputs=output_layer)
###########################################################################################################
###########################################################################################################
### TODO: Fit the model - Done
###########################################################################################################
mlflow.set_experiment(experiment_name)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
logdir = "logs/scalars/" + dir_name
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
with mlflow.start_run(nested=True):
# Log model hiperparameters first
mlflow.log_param('hidden_layer_size', hidden_layer_sizes)
mlflow.log_param('dropout', dropout)
mlflow.log_param('embedded_columns', embedded_columns)
mlflow.log_param('one_hot_columns', one_hot_columns)
mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet
mlflow.log_param('epochs', epochs)
mlflow.log_param('batch_size', batch_size)
mlflow.log_param('learning_rate', learning_rate)
# Train
history = model.fit(train_ds, epochs=epochs,
validation_data=dev_ds,
callbacks=[tensorboard_callback])
#######################################################################################################
### TODO: analyze history to see if model converges/overfits
#######################################################################################################
output_csv = os.path.join(output_dir, 'history.pickle')
with open(output_csv, 'bw') as f:
pickle.dump(history.history, f)
#######################################################################################################
#######################################################################################################
### TODO: Evaluate the model, calculating the metrics. - Done
#######################################################################################################
loss, accuracy = model.evaluate(dev_ds)
print("*** Dev loss: {} - accuracy: {}".format(loss, accuracy))
mlflow.log_metric('loss', loss)
mlflow.log_metric('accuracy', accuracy)
predictions = model.predict(test_ds)
#######################################################################################################
#######################################################################################################
### TODO: Convert predictions to classes - Done
#######################################################################################################
prediction_classes = np.argmax(predictions, axis=1)
#######################################################################################################
#######################################################################################################
### TODO: Save the results for submission - Done
#######################################################################################################
output_csv = os.path.join(output_dir, 'submit.csv')
submissions = pd.DataFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID)
submissions.to_csv(output_csv)
#######################################################################################################
###########################################################################################################
print('All operations completed')
if __name__ == '__main__':
main()
| [
"mlflow.set_experiment",
"mlflow.log_param",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Input",
"os.path.exists",
"argparse.ArgumentParser",
"tensorflow.data.Dataset.from_tensor_slices",
"mlflow.log_metric",
"auxiliary.log_dir_name",
"auxiliary.load_dataset",
"tensorflow.keras.models.Model",
"mlflow.start_run",
"pandas.DataFrame",
"tensorflow.keras.callbacks.TensorBoard",
"numpy.argmax",
"tensorflow.keras.layers.Embedding",
"warnings.filterwarnings",
"pickle.dump",
"os.makedirs",
"auxiliary.build_columns",
"os.path.join",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.backend.clear_session",
"auxiliary.process_features"
] | [((475, 508), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (498, 508), False, 'import warnings\n'), ((654, 732), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training a MLP on the petfinder dataset"""'}), "(description='Training a MLP on the petfinder dataset')\n", (677, 732), False, 'import argparse\n'), ((2879, 2897), 'auxiliary.log_dir_name', 'log_dir_name', (['args'], {}), '(args)\n', (2891, 2897), False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((2959, 3013), 'os.path.join', 'os.path.join', (['"""experiments"""', 'experiment_name', 'dir_name'], {}), "('experiments', experiment_name, dir_name)\n", (2971, 3013), False, 'import os\n'), ((3131, 3161), 'auxiliary.load_dataset', 'load_dataset', (['args.dataset_dir'], {}), '(args.dataset_dir)\n', (3143, 3161), False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((3395, 3426), 'auxiliary.build_columns', 'build_columns', (['dataset', 'columns'], {}), '(dataset, columns)\n', (3408, 3426), False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((3549, 3626), 'auxiliary.process_features', 'process_features', (['dataset', 'one_hot_columns', 'numeric_columns', 'embedded_columns'], {}), '(dataset, one_hot_columns, numeric_columns, embedded_columns)\n', (3565, 3626), False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((3719, 3804), 'auxiliary.process_features', 'process_features', (['dev_dataset', 'one_hot_columns', 'numeric_columns', 'embedded_columns'], {}), '(dev_dataset, one_hot_columns, numeric_columns,\n embedded_columns)\n', (3735, 3804), False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((4881, 4913), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (4911, 4913), True, 'import tensorflow as tf\n'), ((5581, 5652), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'direct_features_input_shape', 'name': '"""direct_features"""'}), "(shape=direct_features_input_shape, name='direct_features')\n", (5593, 5652), False, 'from tensorflow.keras import layers, models\n'), ((5756, 5818), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['(embedding_layers + [direct_features_input])'], {}), '(embedding_layers + [direct_features_input])\n', (5774, 5818), False, 'from tensorflow.keras import layers, models\n'), ((6233, 6282), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'inputs', 'outputs': 'output_layer'}), '(inputs=inputs, outputs=output_layer)\n', (6245, 6282), False, 'from tensorflow.keras import layers, models\n'), ((6664, 6702), 'mlflow.set_experiment', 'mlflow.set_experiment', (['experiment_name'], {}), '(experiment_name)\n', (6685, 6702), False, 'import mlflow\n'), ((6720, 6773), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6744, 6773), True, 'import tensorflow as tf\n'), ((6950, 6996), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (6980, 6996), True, 'import tensorflow as tf\n'), ((3025, 3051), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3039, 3051), False, 'import os\n'), ((3061, 3084), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3072, 3084), False, 'import os\n'), ((5103, 5146), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(1,)', 'name': 'embedded_col'}), '(shape=(1,), name=embedded_col)\n', (5115, 5146), False, 'from tensorflow.keras import layers, models\n'), ((5849, 5903), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['hidden_layer_sizes[0]'], {'activation': '"""relu"""'}), "(hidden_layer_sizes[0], activation='relu')\n", (5861, 5903), False, 'from tensorflow.keras import layers, models\n'), ((6168, 6211), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['nlabels'], {'activation': '"""softmax"""'}), "(nlabels, activation='softmax')\n", (6180, 6211), False, 'from tensorflow.keras import layers, models\n'), ((7007, 7036), 'mlflow.start_run', 'mlflow.start_run', ([], {'nested': '(True)'}), '(nested=True)\n', (7023, 7036), False, 'import mlflow\n'), ((7088, 7145), 'mlflow.log_param', 'mlflow.log_param', (['"""hidden_layer_size"""', 'hidden_layer_sizes'], {}), "('hidden_layer_size', hidden_layer_sizes)\n", (7104, 7145), False, 'import mlflow\n'), ((7154, 7190), 'mlflow.log_param', 'mlflow.log_param', (['"""dropout"""', 'dropout'], {}), "('dropout', dropout)\n", (7170, 7190), False, 'import mlflow\n'), ((7199, 7253), 'mlflow.log_param', 'mlflow.log_param', (['"""embedded_columns"""', 'embedded_columns'], {}), "('embedded_columns', embedded_columns)\n", (7215, 7253), False, 'import mlflow\n'), ((7262, 7314), 'mlflow.log_param', 'mlflow.log_param', (['"""one_hot_columns"""', 'one_hot_columns'], {}), "('one_hot_columns', one_hot_columns)\n", (7278, 7314), False, 'import mlflow\n'), ((7323, 7375), 'mlflow.log_param', 'mlflow.log_param', (['"""numeric_columns"""', 'numeric_columns'], {}), "('numeric_columns', numeric_columns)\n", (7339, 7375), False, 'import mlflow\n'), ((7407, 7441), 'mlflow.log_param', 'mlflow.log_param', (['"""epochs"""', 'epochs'], {}), "('epochs', epochs)\n", (7423, 7441), False, 'import mlflow\n'), ((7450, 7492), 'mlflow.log_param', 'mlflow.log_param', (['"""batch_size"""', 'batch_size'], {}), "('batch_size', batch_size)\n", (7466, 7492), False, 'import mlflow\n'), ((7501, 7549), 'mlflow.log_param', 'mlflow.log_param', (['"""learning_rate"""', 'learning_rate'], {}), "('learning_rate', learning_rate)\n", (7517, 7549), False, 'import mlflow\n'), ((8058, 8100), 'os.path.join', 'os.path.join', (['output_dir', '"""history.pickle"""'], {}), "(output_dir, 'history.pickle')\n", (8070, 8100), False, 'import os\n'), ((8723, 8754), 'mlflow.log_metric', 'mlflow.log_metric', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (8740, 8754), False, 'import mlflow\n'), ((8763, 8802), 'mlflow.log_metric', 'mlflow.log_metric', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (8780, 8802), False, 'import mlflow\n'), ((9278, 9308), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (9287, 9308), True, 'import numpy as np\n'), ((9725, 9763), 'os.path.join', 'os.path.join', (['output_dir', '"""submit.csv"""'], {}), "(output_dir, 'submit.csv')\n", (9737, 9763), False, 'import os\n'), ((9786, 9864), 'pandas.DataFrame', 'pd.DataFrame', (['prediction_classes'], {'columns': '[TARGET_COL]', 'index': 'test_dataset.PID'}), '(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID)\n', (9798, 9864), True, 'import pandas as pd\n'), ((4365, 4415), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_dev, y_dev)'], {}), '((X_dev, y_dev))\n', (4399, 4415), True, 'import tensorflow as tf\n'), ((8155, 8186), 'pickle.dump', 'pickle.dump', (['history.history', 'f'], {}), '(history.history, f)\n', (8166, 8186), False, 'import pickle\n'), ((6053, 6103), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['hidden_layer_size'], {'activation': '"""relu"""'}), "(hidden_layer_size, activation='relu')\n", (6065, 6103), False, 'from tensorflow.keras import layers, models\n'), ((4145, 4199), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_train, y_train)'], {}), '((X_train, y_train))\n', (4179, 4199), True, 'import tensorflow as tf\n'), ((4483, 4580), 'auxiliary.process_features', 'process_features', (['test_dataset', 'one_hot_columns', 'numeric_columns', 'embedded_columns'], {'test': '(True)'}), '(test_dataset, one_hot_columns, numeric_columns,\n embedded_columns, test=True)\n', (4499, 4580), False, 'from auxiliary import process_features, load_dataset, build_columns, log_dir_name\n'), ((5319, 5383), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', ([], {'input_dim': 'max_value', 'output_dim': 'embedding_size'}), '(input_dim=max_value, output_dim=embedding_size)\n', (5335, 5383), False, 'from tensorflow.keras import layers, models\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from pkg_resources import parse_version
from warnings import warn
from copy import deepcopy
import networkx as nx
from networkx.readwrite import json_graph
from catpy.applications.base import CatmaidClientApplication
NX_VERSION_INFO = parse_version(nx.__version__)._key[1]
err_msg = (
"Tried to treat the edge's source/target fields as indices into the list of nodes, but failed. "
"See issue #26 [1]. "
"Has CATMAID upgraded to networkx 2.x? [2]\n\n"
"[1]: https://github.com/catmaid/catpy/issues/26\n"
"[2]: https://github.com/catmaid/CATMAID/blob/master/django/requirements.txt"
)
def convert_nodelink_data(jso):
"""NetworkX serialises graphs differently in v1.x and v2.x.
This converts v1-style data (as emitted by CATMAID) to v2-style data.
See issue #26 https://github.com/catmaid/catpy/issues/26
Parameters
----------
jso : dict
Returns
-------
dict
"""
if NX_VERSION_INFO < (2, 0):
warn(
"You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON,"
" but you are using networkx v1"
)
out = deepcopy(jso)
for edge in out["links"]:
for label in ["source", "target"]:
try:
edge[label] = out["nodes"][edge[label]]["id"]
except (KeyError, IndexError):
raise RuntimeError(err_msg)
return out
class ExportWidget(CatmaidClientApplication):
def get_swc(self, skeleton_id, linearize_ids=False):
"""
Get a single skeleton in SWC format.
Parameters
----------
skeleton_id : int or str
linearize_ids : bool
Returns
-------
str
"""
return self.get(
(self.project_id, "skeleton", skeleton_id, "swc"),
{"linearize_ids": "true" if linearize_ids else "false"},
)
def get_connector_archive(self, *args, **kwargs):
"""Not implemented: requires an async job"""
raise NotImplementedError("Requires an async job")
def get_treenode_archive(self, *args, **kwargs):
"""Not implemented: requires an async job"""
raise NotImplementedError("Requires an async job")
def get_networkx_dict(self, *skeleton_ids):
"""
Get the data for a networkx graph of the given skeletons in node-link format.
In networkx 1.x, as used by CATMAID and therefore returned by this method,
"source" and "target" in the dicts in "links" refer to nodes by their indices in the "nodes" array.
See ``convert_nodelink_data`` function to convert into networkx 2.x-compatible format.
https://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.readwrite.json_graph.node_link_data.html
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
dict
"""
return self.post(
(self.project_id, "graphexport", "json"),
data={"skeleton_list": list(skeleton_ids)},
)
def get_networkx(self, *skeleton_ids):
"""
Get a networkx MultiDiGraph of the given skeletons.
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
networkx.MultiDiGraph
"""
data = self.get_networkx_dict(*skeleton_ids)
if NX_VERSION_INFO >= (2, 0):
data = convert_nodelink_data(data)
return json_graph.node_link_graph(data, directed=True)
def get_neuroml(self, skeleton_ids, skeleton_inputs=tuple()):
"""
Get NeuroML v1.8.1 (level 3, NetworkML) for the given skeletons, possibly with their input synapses
constrained to another set of skeletons.
N.B. If len(skeleton_ids) > 1, skeleton_inputs will be ignored and only synapses within the first skeleton
set will be used in the model.
Parameters
----------
skeleton_ids : array-like
Skeletons whose NeuroML to return
skeleton_inputs : array-like, optional
If specified, only input synapses from these skeletons will be added to the NeuroML
Returns
-------
str
NeuroML output string
"""
data = {"skids": list(skeleton_ids)}
if skeleton_inputs:
if len(skeleton_ids) > 1:
warn(
"More than one skeleton ID was selected: ignoring skeleton input constraints"
)
else:
data["inputs"] = list(skeleton_inputs)
return self.post((self.project_id, "neuroml", "neuroml_level3_v181"), data=data)
def get_treenode_and_connector_geometry(self, *skeleton_ids):
"""
Get the treenode and connector information for the given skeletons. The returned dictionary will be of the form
{
"skeletons": {
skeleton_id1: {
"treenodes": {
treenode_id1: {
"location": [x, y, z],
"parent_id": id_of_parent_treenode
},
treenode_id2: ...
},
"connectors": {
connector_id1: {
"location": [x, y, z],
"presynaptic_to": [list, of, treenode, ids],
"postsynaptic_to": [list, of, treenode, ids]
},
connector_id2: ...
}
},
skeleton_id2: ...
}
}
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
dict
"""
# todo: factor API call into MorphologyFetcher
skeletons = dict()
warnings = set()
relation_names = {0: "presnaptic_to", 1: "postsynaptic_to"}
for skeleton_id in skeleton_ids:
data = self.get(
"{}/{}/1/0/compact-skeleton".format(self.project_id, skeleton_id)
)
skeleton = {"treenodes": dict(), "connectors": dict()}
for treenode in data[0]:
skeleton["treenodes"][int(treenode[0])] = {
"location": treenode[3:6],
"parent_id": None if treenode[1] is None else int(treenode[1]),
}
for connector in data[1]:
# NOT the database relation ID
# {pre: 0, post: 1, gj: 2}
relation_number = connector[2]
if relation_number not in relation_names:
continue
conn_id = int(connector[1])
if conn_id not in skeleton["connectors"]:
skeleton["connectors"][conn_id] = {
rn: [] for rn in relation_names.values()
}
skeleton["connectors"][conn_id]["location"] = connector[3:6]
skeleton["connectors"][conn_id][relation_names[relation_number]].append(
connector[0]
)
skeletons[int(skeleton_id)] = skeleton
warn(
"Skeleton representations contained some unknown treenode->connector relation IDs:\n\t"
"\n\t".join(sorted(warnings))
)
return {"skeletons": skeletons}
| [
"warnings.warn",
"pkg_resources.parse_version",
"networkx.readwrite.json_graph.node_link_graph",
"copy.deepcopy"
] | [((1205, 1218), 'copy.deepcopy', 'deepcopy', (['jso'], {}), '(jso)\n', (1213, 1218), False, 'from copy import deepcopy\n'), ((303, 332), 'pkg_resources.parse_version', 'parse_version', (['nx.__version__'], {}), '(nx.__version__)\n', (316, 332), False, 'from pkg_resources import parse_version\n'), ((1038, 1166), 'warnings.warn', 'warn', (['"""You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON, but you are using networkx v1"""'], {}), "(\n 'You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON, but you are using networkx v1'\n )\n", (1042, 1166), False, 'from warnings import warn\n'), ((3582, 3629), 'networkx.readwrite.json_graph.node_link_graph', 'json_graph.node_link_graph', (['data'], {'directed': '(True)'}), '(data, directed=True)\n', (3608, 3629), False, 'from networkx.readwrite import json_graph\n'), ((4504, 4597), 'warnings.warn', 'warn', (['"""More than one skeleton ID was selected: ignoring skeleton input constraints"""'], {}), "(\n 'More than one skeleton ID was selected: ignoring skeleton input constraints'\n )\n", (4508, 4597), False, 'from warnings import warn\n')] |
from typing import Optional
from watchmen_auth import PrincipalService
from watchmen_data_kernel.cache import CacheService
from watchmen_data_kernel.common import DataKernelException
from watchmen_data_kernel.external_writer import find_external_writer_create, register_external_writer_creator
from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator
from watchmen_meta.system import ExternalWriterService as ExternalWriterStorageService
from watchmen_model.common import ExternalWriterId
from watchmen_model.system import ExternalWriter
def register_external_writer(external_writer: ExternalWriter) -> None:
create = find_external_writer_create(external_writer.type)
if create is None:
raise DataKernelException(f'Creator not found for external writer[{external_writer.dict()}].')
register_external_writer_creator(external_writer.writerCode, create())
class ExternalWriterService:
def __init__(self, principal_service: PrincipalService):
self.principalService = principal_service
def find_by_id(self, writer_id: ExternalWriterId) -> Optional[ExternalWriter]:
external_writer = CacheService.external_writer().get(writer_id)
if external_writer is not None:
if external_writer.tenantId != self.principalService.get_tenant_id():
raise DataKernelException(
f'External writer[id={writer_id}] not belongs to '
f'current tenant[id={self.principalService.get_tenant_id()}].')
register_external_writer(external_writer)
return external_writer
storage_service = ExternalWriterStorageService(
ask_meta_storage(), ask_snowflake_generator(), self.principalService)
storage_service.begin_transaction()
try:
# noinspection PyTypeChecker
external_writer: ExternalWriter = storage_service.find_by_id(writer_id)
if external_writer is None:
return None
CacheService.external_writer().put(external_writer)
register_external_writer(external_writer)
return external_writer
finally:
storage_service.close_transaction()
| [
"watchmen_data_kernel.cache.CacheService.external_writer",
"watchmen_data_kernel.external_writer.find_external_writer_create",
"watchmen_meta.common.ask_snowflake_generator",
"watchmen_meta.common.ask_meta_storage"
] | [((640, 689), 'watchmen_data_kernel.external_writer.find_external_writer_create', 'find_external_writer_create', (['external_writer.type'], {}), '(external_writer.type)\n', (667, 689), False, 'from watchmen_data_kernel.external_writer import find_external_writer_create, register_external_writer_creator\n'), ((1547, 1565), 'watchmen_meta.common.ask_meta_storage', 'ask_meta_storage', ([], {}), '()\n', (1563, 1565), False, 'from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator\n'), ((1567, 1592), 'watchmen_meta.common.ask_snowflake_generator', 'ask_snowflake_generator', ([], {}), '()\n', (1590, 1592), False, 'from watchmen_meta.common import ask_meta_storage, ask_snowflake_generator\n'), ((1113, 1143), 'watchmen_data_kernel.cache.CacheService.external_writer', 'CacheService.external_writer', ([], {}), '()\n', (1141, 1143), False, 'from watchmen_data_kernel.cache import CacheService\n'), ((1820, 1850), 'watchmen_data_kernel.cache.CacheService.external_writer', 'CacheService.external_writer', ([], {}), '()\n', (1848, 1850), False, 'from watchmen_data_kernel.cache import CacheService\n')] |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import enchant
import os
import pickle
import re
class SpellChecker:
"""
A basic spell checker.
"""
# These must be all lower case for comparisons
uimsgs = {
# OK words
"adaptively", "adaptivity",
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"chamfer",
"couldn", # couldn't
"decrement",
"derivate",
"deterministically",
"doesn", # doesn't
"duplications",
"effector",
"equi", # equi-angular, etc.
"fader",
"globbing",
"hasn", # hasn't
"hetero",
"hoc", # ad-hoc
"incompressible",
"indices",
"instantiation",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"ons", # add-ons
"pong", # ping pong
"scalable",
"shadeless",
"shouldn", # shouldn't
"smoothen",
"spacings",
"teleport", "teleporting",
"vertices",
"wasn", # wasn't
# Merged words
"antialiasing", "antialias",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoexec",
"autoexecution",
"autogenerated",
"autolock",
"automasking",
"autoname",
"autopack",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitdepth",
"bitflag", "bitflags",
"bitrate",
"blackbody",
"blendfile",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"customdata",
"dataset", "datasets",
"de",
"deadzone",
"deconstruct",
"defocus",
"denoise", "denoised", "denoising", "denoiser",
"deselect", "deselecting", "deselection",
"despill", "despilling",
"dirtree",
"editcurve",
"editmesh",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hardlight",
"hemi",
"hostname",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightprobe", "lightprobes",
"lightless",
"lineset",
"linestyle", "linestyles",
"localview",
"lookup", "lookups",
"mathutils",
"micropolygon",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multiframe",
"multilayer",
"multipaint",
"multires", "multiresolution",
"multisampling",
"multiscatter",
"multitexture",
"multithreaded",
"multiuser",
"multiview",
"namespace",
"nodetree", "nodetrees",
"keyconfig",
"offscreen",
"online",
"playhead",
"popup", "popups",
"pre",
"precache", "precaching",
"precalculate",
"precomputing",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"promillage",
"pushdown",
"raytree",
"readonly",
"realtime",
"reinject", "reinjected",
"rekey",
"remesh",
"reprojection", "reproject", "reprojecting",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"retiming",
"rigidbody",
"ringnoise",
"rolloff",
"runtime",
"scanline",
"screenshot", "screenshots",
"seekability",
"selfcollision",
"shadowbuffer", "shadowbuffers",
"singletexture",
"spellcheck", "spellchecking",
"startup",
"stateful",
"starfield",
"studiolight",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"subitem",
"submode",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"tradeoff",
"un",
"unassociate", "unassociated",
"unbake",
"unclosed",
"uncomment",
"unculled",
"undeformed",
"undistort", "undistorted", "undistortion",
"ungroup", "ungrouped",
"unhide",
"unindent",
"unkeyed",
"unlink", "unlinked",
"unmute",
"unphysical",
"unpremultiply",
"unprojected",
"unprotect",
"unreacted",
"unreferenced",
"unregister",
"unselect", "unselected", "unselectable",
"unsets",
"unshadowed",
"unspill",
"unstitchable", "unstitch",
"unsubdivided", "unsubdivide",
"untrusted",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
"workspace", "workspaces",
# Neologisms, slangs
"affectable",
"animatable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"effectors",
"expander",
"instancer",
"keyer",
"lacunarity",
"linkable",
"numerics",
"occluder", "occluders",
"overridable",
"passepartout",
"perspectively",
"pixelate",
"pointiness",
"polycount",
"polygonization", "polygonalization", # yuck!
"scalings",
"selectable", "selectability",
"shaper",
"smoothen", "smoothening",
"spherize", "spherized",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderers", "renderable", "renderability",
# Really bad!!!
"convertor",
"fullscr",
# Abbreviations
"aero",
"amb",
"anim",
"aov",
"app",
"bbox", "bboxes",
"bksp", # Backspace
"bool",
"calc",
"cfl",
"config", "configs",
"const",
"coord", "coords",
"degr",
"diff",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"fract",
"frs",
"grless",
"http",
"init",
"irr", # Irradiance
"kbit", "kb",
"lang", "langs",
"lclick", "rclick",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mbs", # mouse button 'select'.
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"pano",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sce",
"sel",
"spec",
"struct", "structs",
"subdiv",
"sys",
"tex",
"texcoord",
"tmr", # timer
"tri", "tris",
"udim", "udims",
"upres", # Upresolution
"usd",
"uv", "uvs", "uvw", "uw", "uvmap",
"ve",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"vram",
"xor",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"affine",
"albedo",
"anamorphic",
"anisotropic", "anisotropy",
"bitangent",
"boid", "boids",
"ceil",
"compressibility",
"curvilinear",
"equiangular",
"equisolid",
"euler", "eulers",
"fribidi",
"gettext",
"hashable",
"hotspot",
"interocular",
"intrinsics",
"irradiance",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"msgfmt",
"nand", "xnor",
"normals",
"numpad",
"octahedral",
"octree",
"omnidirectional",
"opengl",
"openmp",
"parametrization",
"photoreceptor",
"poly",
"polyline", "polylines",
"probabilistically",
"pulldown", "pulldowns",
"quantized",
"quartic",
"quaternion", "quaternions",
"quintic",
"samplerate",
"sawtooth",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"subtractive",
"superellipse",
"tooltip", "tooltips",
"trackpad",
"tuple",
"unicode",
"viewport", "viewports",
"viscoelastic",
"vorticity",
"waveform", "waveforms",
"wildcard", "wildcards",
"wintab", # Some Windows tablet API
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"bindpose",
"binormal",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chroma",
"chrominance",
"clearcoat",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"cubemap", "cubemaps",
"cuda",
"deinterlace",
"dropoff",
"duotone",
"dv",
"eigenvectors",
"emissive",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"kerning",
"lightmap",
"linearlight",
"lossless", "lossy",
"luminance",
"mantaflow",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"pinlight",
"qi",
"radiosity",
"raycasting",
"raytrace", "raytracing", "raytraced",
"refractions",
"remesher", "remeshing", "remesh",
"renderfarm",
"scanfill",
"shader", "shaders",
"shadowmap", "shadowmaps",
"softlight",
"specular", "specularity",
"spillmap",
"sobel",
"stereoscopy",
"texel",
"timecode",
"tonemap",
"toon",
"transmissive",
"vividlight",
"volumetrics",
"voronoi",
"voxel", "voxels",
"vsync",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"audaspace",
"azone", # action zone
"backwire",
"bbone",
"bendy", # bones
"bmesh",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"depsgraph",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"dyntopo",
"editbone",
"editmode",
"eevee",
"fcurve", "fcurves",
"fedge", "fedges",
"filmic",
"fluidsim",
"freestyle",
"enum", "enums",
"gizmogroup",
"gons", # N-Gons
"gpencil",
"idcol",
"keyframe", "keyframes", "keyframing", "keyframed",
"lookdev",
"luminocity",
"mathvis",
"metaball", "metaballs", "mball",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"overscan",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"qe", # keys...
"shaderfx", "shaderfxs",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"subdiv",
"subtype",
"sunsky",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"userpref",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"vse",
"wasd", "wasdqe", # keys...
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# UOC (Ugly Operator Categories)
"cachefile",
"paintcurve",
"ptcache",
"dpaint",
# Algorithm/library names
"ashikhmin", # Ashikhmin-Shirley
"arsloe", # Texel-Marsen-Arsloe
"beckmann",
"blackman", # Blackman-Harris
"blosc",
"burley", # Christensen-Burley
"catmull",
"catrom",
"chebychev",
"courant",
"cryptomatte", "crypto",
"embree",
"hosek",
"kutta",
"lennard",
"marsen", # Texel-Marsen-Arsloe
"mikktspace",
"minkowski",
"minnaert",
"moskowitz", # Pierson-Moskowitz
"musgrave",
"nayar",
"netravali",
"nishita",
"ogawa",
"oren",
"peucker", # Ramer-Douglas-Peucker
"pierson", # Pierson-Moskowitz
"preetham",
"prewitt",
"ramer", # Ramer-Douglas-Peucker
"runge",
"sobol",
"verlet",
"wilkie",
"worley",
# Acronyms
"aa", "msaa",
"ao",
"api",
"asc", "cdl",
"ascii",
"atrac",
"avx",
"bsdf",
"bssrdf",
"bw",
"ccd",
"cmd",
"cmos",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fk",
"fov",
"fft",
"futura",
"fx",
"gfx",
"ggx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr", "hdri", "hdris",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva", "hsl",
"id",
"ies",
"ior",
"itu",
"jonswap",
"lhs",
"lmb", "mmb", "rmb",
"kb",
"mocap",
"msgid", "msgids",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rdp",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"ssao",
"ssr",
"svn",
"tma",
"ui",
"unix",
"vbo", "vbos",
"vr",
"wxyz",
"xr",
"ycc", "ycca",
"yrgb",
"yuv", "yuva",
# Blender acronyms
"bli",
"bpy",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dwaa",
"dwab",
"dxf",
"eps",
"exr",
"fbx",
"fbxnode",
"ffmpeg",
"flac",
"gltf",
"gzip",
"ico",
"jpg", "jpeg", "jpegs",
"json",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png", "pngs",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"vp9",
"wav",
"webm",
"xiph",
"xml",
"xna",
"xvid",
}
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret
| [
"os.path.exists",
"pickle.dump",
"re.compile",
"enchant.Dict",
"pickle.load"
] | [((17963, 17987), 're.compile', 're.compile', (['_valid_words'], {}), '(_valid_words)\n', (17973, 17987), False, 'import re\n'), ((18213, 18231), 'enchant.Dict', 'enchant.Dict', (['lang'], {}), '(lang)\n', (18225, 18231), False, 'import enchant\n'), ((18334, 18355), 'os.path.exists', 'os.path.exists', (['cache'], {}), '(cache)\n', (18348, 18355), False, 'import os\n'), ((18535, 18556), 'os.path.exists', 'os.path.exists', (['cache'], {}), '(cache)\n', (18549, 18556), False, 'import os\n'), ((18615, 18641), 'pickle.dump', 'pickle.dump', (['self.cache', 'f'], {}), '(self.cache, f)\n', (18626, 18641), False, 'import pickle\n'), ((18432, 18446), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18443, 18446), False, 'import pickle\n')] |
from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM
from os.path import exists, join, realpath
from os import uname
import glob
import sh
class LibX264Recipe(Recipe):
version = 'x264-snapshot-20170608-2245-stable' # using mirror url since can't use ftp
url = 'http://mirror.yandex.ru/mirrors/ftp.videolan.org/x264/snapshots/{version}.tar.bz2'
md5sum = 'adf3b87f759b5cc9f100f8cf99276f77'
def should_build(self, arch):
build_dir = self.get_build_dir(arch.arch)
return not exists(join(build_dir, 'lib', 'libx264.a'))
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
configure = sh.Command('./configure')
shprint(configure,
'--cross-prefix=arm-linux-androideabi-',
'--host=arm-linux',
'--disable-asm',
'--disable-cli',
'--enable-pic',
'--disable-shared',
'--enable-static',
'--prefix={}'.format(realpath('.')),
_env=env)
shprint(sh.make, '-j4', _env=env)
shprint(sh.make, 'install', _env=env)
recipe = LibX264Recipe()
| [
"pythonforandroid.toolchain.shprint",
"os.path.realpath",
"os.path.join",
"sh.Command"
] | [((745, 770), 'sh.Command', 'sh.Command', (['"""./configure"""'], {}), "('./configure')\n", (755, 770), False, 'import sh\n'), ((1191, 1224), 'pythonforandroid.toolchain.shprint', 'shprint', (['sh.make', '"""-j4"""'], {'_env': 'env'}), "(sh.make, '-j4', _env=env)\n", (1198, 1224), False, 'from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM\n'), ((1237, 1274), 'pythonforandroid.toolchain.shprint', 'shprint', (['sh.make', '"""install"""'], {'_env': 'env'}), "(sh.make, 'install', _env=env)\n", (1244, 1274), False, 'from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM\n'), ((544, 579), 'os.path.join', 'join', (['build_dir', '"""lib"""', '"""libx264.a"""'], {}), "(build_dir, 'lib', 'libx264.a')\n", (548, 579), False, 'from os.path import exists, join, realpath\n'), ((1133, 1146), 'os.path.realpath', 'realpath', (['"""."""'], {}), "('.')\n", (1141, 1146), False, 'from os.path import exists, join, realpath\n')] |
#coding=utf-8
try:
if __name__.startswith('qgb.Win'):
from .. import py
else:
import py
except Exception as ei:
raise ei
raise EnvironmentError(__name__)
if py.is2():
import _winreg as winreg
from _winreg import *
else:
import winreg
from winreg import *
def get(skey,name,root=HKEY_CURRENT_USER,returnType=True):
''' from qgb.Win import reg
reg.get(r'Software\Microsoft\Windows\CurrentVersion\Internet Settings','ProxyEnable')
reg.get(r'HKLM\SYSTEM\CurrentControlSet\Services\LanmanServer\Parameters\Size' )
There are seven predefined root keys, traditionally named according to their constant handles defined in the Win32 API
skeyไธ่ฝๅ
ๅซ name๏ผๅฆๅ FileNotFoundError: [WinError 2] ็ณป็ปๆพไธๅฐๆๅฎ็ๆไปถใ
'''
r = OpenKey(root,skey)
r = QueryValueEx(r,name)
if returnType:return r[0],'{} : {}'.format(REG_TYPE[r[1]],r[1])
else :return r[0]
def set(skey,name,value,root=HKEY_CURRENT_USER,type='auto,or REG_TYPE int',returnType=True):
r = OpenKey(root,skey,0,KEY_SET_VALUE)
if not py.isint(type):
if py.isint(value):type=4
if py.istr(value):type=1
if py.isbyte(value):type=3 #TODO test,and add more rule
SetValueEx(r,'ProxyEnable',0,type,value)
if get(skey,name,root=root,returnType=False)==value:
return 'reg.set [{}] {}={} sucess!'.format(skey[-55:],name,value)
else:
return 'reg.set [{}] {}={} Failed !'.format(skey,name,value)
REG_TYPE={ 0 : 'REG_NONE',
1 : 'REG_SZ',
2 : 'REG_EXPAND_SZ',
3 : 'REG_BINARY',
4 : 'REG_DWORD',
5 : 'REG_DWORD_BIG_ENDIAN',
6 : 'REG_LINK',
7 : 'REG_MULTI_SZ',
8 : 'REG_RESOURCE_LIST',
9 : 'REG_FULL_RESOURCE_DESCRIPTOR',
10: 'REG_RESOURCE_REQUIREMENTS_LIST',
11: 'REG_QWORD'}
| [
"py.is2",
"py.isint",
"py.isbyte",
"py.istr"
] | [((167, 175), 'py.is2', 'py.is2', ([], {}), '()\n', (173, 175), False, 'import py\n'), ((1014, 1028), 'py.isint', 'py.isint', (['type'], {}), '(type)\n', (1022, 1028), False, 'import py\n'), ((1035, 1050), 'py.isint', 'py.isint', (['value'], {}), '(value)\n', (1043, 1050), False, 'import py\n'), ((1063, 1077), 'py.istr', 'py.istr', (['value'], {}), '(value)\n', (1070, 1077), False, 'import py\n'), ((1090, 1106), 'py.isbyte', 'py.isbyte', (['value'], {}), '(value)\n', (1099, 1106), False, 'import py\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from clint import resources
resources.init('kennethreitz', 'clint')
lorem = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
print('%s created.' % resources.user.path)
resources.user.write('lorem.txt', lorem)
print('lorem.txt created')
assert resources.user.read('lorem.txt') == lorem
print('lorem.txt has correct contents')
resources.user.delete('lorem.txt')
print('lorem.txt deleted')
assert resources.user.read('lorem.txt') == None
print('lorem.txt deletion confirmed')
| [
"clint.resources.user.read",
"clint.resources.user.write",
"clint.resources.user.delete",
"os.path.abspath",
"clint.resources.init"
] | [((180, 219), 'clint.resources.init', 'resources.init', (['"""kennethreitz"""', '"""clint"""'], {}), "('kennethreitz', 'clint')\n", (194, 219), False, 'from clint import resources\n'), ((724, 764), 'clint.resources.user.write', 'resources.user.write', (['"""lorem.txt"""', 'lorem'], {}), "('lorem.txt', lorem)\n", (744, 764), False, 'from clint import resources\n'), ((883, 917), 'clint.resources.user.delete', 'resources.user.delete', (['"""lorem.txt"""'], {}), "('lorem.txt')\n", (904, 917), False, 'from clint import resources\n'), ((127, 148), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (142, 148), False, 'import os\n'), ((800, 832), 'clint.resources.user.read', 'resources.user.read', (['"""lorem.txt"""'], {}), "('lorem.txt')\n", (819, 832), False, 'from clint import resources\n'), ((953, 985), 'clint.resources.user.read', 'resources.user.read', (['"""lorem.txt"""'], {}), "('lorem.txt')\n", (972, 985), False, 'from clint import resources\n')] |
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^image/$', views.add_image, name='upload_image'),
url(r'^profile/$', views.profile_info, name='profile'),
url(r'^update/$', views.profile_update, name='update'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^search/', views.search_results, name = 'search_results'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'),
url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'),
url(r'^likes/(\d+)/$', views.like_images,name='likes')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((151, 187), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (154, 187), False, 'from django.conf.urls import url\n'), ((194, 247), 'django.conf.urls.url', 'url', (['"""^image/$"""', 'views.add_image'], {'name': '"""upload_image"""'}), "('^image/$', views.add_image, name='upload_image')\n", (197, 247), False, 'from django.conf.urls import url\n'), ((254, 307), 'django.conf.urls.url', 'url', (['"""^profile/$"""', 'views.profile_info'], {'name': '"""profile"""'}), "('^profile/$', views.profile_info, name='profile')\n", (257, 307), False, 'from django.conf.urls import url\n'), ((314, 367), 'django.conf.urls.url', 'url', (['"""^update/$"""', 'views.profile_update'], {'name': '"""update"""'}), "('^update/$', views.profile_update, name='update')\n", (317, 367), False, 'from django.conf.urls import url\n'), ((374, 439), 'django.conf.urls.url', 'url', (['"""^comment/(?P<image_id>\\\\d+)"""', 'views.comment'], {'name': '"""comment"""'}), "('^comment/(?P<image_id>\\\\d+)', views.comment, name='comment')\n", (377, 439), False, 'from django.conf.urls import url\n'), ((445, 505), 'django.conf.urls.url', 'url', (['"""^search/"""', 'views.search_results'], {'name': '"""search_results"""'}), "('^search/', views.search_results, name='search_results')\n", (448, 505), False, 'from django.conf.urls import url\n'), ((514, 575), 'django.conf.urls.url', 'url', (['"""^follow/(?P<user_id>\\\\d+)"""', 'views.follow'], {'name': '"""follow"""'}), "('^follow/(?P<user_id>\\\\d+)', views.follow, name='follow')\n", (517, 575), False, 'from django.conf.urls import url\n'), ((583, 650), 'django.conf.urls.url', 'url', (['"""^unfollow/(?P<user_id>\\\\d+)"""', 'views.unfollow'], {'name': '"""unfollow"""'}), "('^unfollow/(?P<user_id>\\\\d+)', views.unfollow, name='unfollow')\n", (586, 650), False, 'from django.conf.urls import url\n'), ((656, 711), 'django.conf.urls.url', 'url', (['"""^likes/(\\\\d+)/$"""', 'views.like_images'], {'name': '"""likes"""'}), "('^likes/(\\\\d+)/$', views.like_images, name='likes')\n", (659, 711), False, 'from django.conf.urls import url\n'), ((751, 812), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (757, 812), False, 'from django.conf.urls.static import static\n')] |
import datetime
from unittest.mock import patch
import dns.resolver
import dns.rrset
import pytest
import pytz
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status
from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team
from posthog.test.base import APIBaseTest, BaseTest
class FakeAnswer(object):
def __init__(self, answer):
self.answer = answer
class FakeDNSResponse(object):
def __init__(self, answer):
self.response = FakeAnswer(answer)
class TestOrganizationDomains(BaseTest):
def test_continuous_verification_task(self):
"""
Tests the task that re-verifies domains to ensure ownership is maintained.
"""
pass
class TestOrganizationDomainsAPI(APIBaseTest):
domain: OrganizationDomain = None # type: ignore
another_domain: OrganizationDomain = None # type: ignore
another_org: Organization = None # type: ignore
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.domain = OrganizationDomain.objects.create(organization=cls.organization, domain="myposthog.com")
cls.another_org = Organization.objects.create(name="Another Org")
Team.objects.create(organization=cls.another_org)
cls.another_domain = OrganizationDomain.objects.create(organization=cls.another_org, domain="org.posthog.net")
# List & retrieve domains
def test_can_list_and_retrieve_domains(self):
response = self.client.get("/api/organizations/@current/domains")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(response_data["count"], 1)
item = response_data["results"][0]
self.assertEqual(item["domain"], "myposthog.com")
self.assertEqual(item["verified_at"], None)
self.assertEqual(item["is_verified"], False)
self.assertEqual(item["jit_provisioning_enabled"], False)
self.assertEqual(item["sso_enforcement"], "")
self.assertRegex(item["verification_challenge"], r"[0-9A-Za-z_-]{32}")
retrieve_response = self.client.get(f"/api/organizations/{self.organization.id}/domains/{self.domain.id}")
self.assertEqual(retrieve_response.status_code, status.HTTP_200_OK)
self.assertEqual(retrieve_response.json(), response_data["results"][0])
def test_cannot_list_or_retrieve_domains_for_other_org(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.get(f"/api/organizations/@current/domains/{self.another_domain.id}")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.json(), self.not_found_response())
response = self.client.get(f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.json(), self.permission_denied_response())
# Create domains
def test_create_domain(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
with self.settings(MULTI_TENANCY=True):
response = self.client.post(
"/api/organizations/@current/domains/",
{
"domain": "the.posthog.com",
"verified_at": "2022-01-01T14:25:25.000Z", # ignore me
"verification_challenge": "123", # ignore me
"jit_provisioning_enabled": True, # ignore me
"sso_enforcement": "saml", # ignore me
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response_data = response.json()
self.assertEqual(response_data["domain"], "the.posthog.com")
self.assertEqual(response_data["verified_at"], None)
self.assertEqual(response_data["jit_provisioning_enabled"], False)
self.assertRegex(response_data["verification_challenge"], r"[0-9A-Za-z_-]{32}")
instance = OrganizationDomain.objects.get(id=response_data["id"])
self.assertEqual(instance.domain, "the.posthog.com")
self.assertEqual(instance.verified_at, None)
self.assertEqual(instance.last_verification_retry, None)
self.assertEqual(instance.sso_enforcement, "")
@pytest.mark.skip_on_multitenancy
def test_creating_domain_on_self_hosted_is_automatically_verified(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
with freeze_time("2021-08-08T20:20:08Z"):
response = self.client.post(
"/api/organizations/@current/domains/",
{
"domain": "the.posthog.com",
"verified_at": "2022-01-01T14:25:25.000Z", # ignore me
"verification_challenge": "123", # ignore me
"jit_provisioning_enabled": True, # ignore me
"sso_enforcement": "saml", # ignore me
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response_data = response.json()
self.assertEqual(response_data["domain"], "the.posthog.com")
self.assertEqual(
response_data["verified_at"], "2021-08-08T20:20:08Z",
)
self.assertEqual(response_data["jit_provisioning_enabled"], False)
self.assertRegex(response_data["verification_challenge"], r"[0-9A-Za-z_-]{32}")
instance = OrganizationDomain.objects.get(id=response_data["id"])
self.assertEqual(instance.domain, "the.posthog.com")
self.assertEqual(
instance.verified_at, datetime.datetime(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC),
)
self.assertEqual(instance.last_verification_retry, None)
self.assertEqual(instance.sso_enforcement, "")
def test_cannot_create_duplicate_domain(self):
OrganizationDomain.objects.create(domain="i-registered-first.com", organization=self.another_org)
count = OrganizationDomain.objects.count()
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.post("/api/organizations/@current/domains/", {"domain": "i-registered-first.com"},)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "unique",
"detail": "domain with this domain already exists.",
"attr": "domain",
},
)
self.assertEqual(OrganizationDomain.objects.count(), count)
def test_cannot_create_invalid_domain(self):
count = OrganizationDomain.objects.count()
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
invalid_domains = ["<EMAIL>", "๐ฆ๐ฆ๐ฆ.com", "one.two.c", "--alpha.com", "javascript: alert(1)"]
for _domain in invalid_domains:
response = self.client.post("/api/organizations/@current/domains/", {"domain": _domain,},)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "invalid_input",
"detail": "Please enter a valid domain or subdomain name.",
"attr": "domain",
},
)
self.assertEqual(OrganizationDomain.objects.count(), count)
@patch("posthog.models.organization_domain.dns.resolver.resolve")
def test_can_request_verification_for_unverified_domains(self, mock_dns_query):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
mock_dns_query.return_value = FakeDNSResponse(
[
dns.rrset.from_text(
"_posthog-challenge.myposthog.com.", 3600, "IN", "TXT", self.domain.verification_challenge,
)
],
)
with freeze_time("2021-08-08T20:20:08Z"):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.domain.refresh_from_db()
self.assertEqual(response_data["domain"], "myposthog.com")
self.assertEqual(
response_data["verified_at"], self.domain.verified_at.strftime("%Y-%m-%dT%H:%M:%SZ"),
)
self.assertEqual(response_data["is_verified"], True)
self.assertEqual(
self.domain.verified_at, datetime.datetime(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC),
)
self.assertEqual(self.domain.is_verified, True)
@patch("posthog.models.organization_domain.dns.resolver.resolve")
def test_domain_is_not_verified_with_missing_challenge(self, mock_dns_query):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
mock_dns_query.side_effect = dns.resolver.NoAnswer()
with freeze_time("2021-10-10T10:10:10Z"):
with self.settings(MULTI_TENANCY=True):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.domain.refresh_from_db()
self.assertEqual(response_data["domain"], "myposthog.com")
self.assertEqual(response_data["verified_at"], None)
self.assertEqual(self.domain.verified_at, None)
self.assertEqual(
self.domain.last_verification_retry, datetime.datetime(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC),
)
@patch("posthog.models.organization_domain.dns.resolver.resolve")
def test_domain_is_not_verified_with_incorrect_challenge(self, mock_dns_query):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
mock_dns_query.return_value = FakeDNSResponse(
[dns.rrset.from_text("_posthog-challenge.myposthog.com.", 3600, "IN", "TXT", "incorrect_challenge",)],
)
with freeze_time("2021-10-10T10:10:10Z"):
with self.settings(MULTI_TENANCY=True):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.domain.refresh_from_db()
self.assertEqual(response_data["domain"], "myposthog.com")
self.assertEqual(response_data["verified_at"], None)
self.assertEqual(self.domain.verified_at, None)
self.assertEqual(
self.domain.last_verification_retry, datetime.datetime(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC),
)
def test_cannot_request_verification_for_verified_domains(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
self.domain.verified_at = timezone.now()
self.domain.save()
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "already_verified",
"detail": "This domain has already been verified.",
"attr": None,
},
)
def test_only_admin_can_create_verified_domains(self):
count = OrganizationDomain.objects.count()
response = self.client.post("/api/organizations/@current/domains/", {"domain": "evil.posthog.com"})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.assertEqual(OrganizationDomain.objects.count(), count)
def test_only_admin_can_request_verification(self):
response = self.client.post(f"/api/organizations/@current/domains/{self.domain.id}/verify")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.verified_at, None)
# Update domains
def test_can_update_jit_provisioning_and_sso_enforcement(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
self.domain.verified_at = timezone.now()
self.domain.save()
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/",
{"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["sso_enforcement"], "google-oauth2")
self.assertEqual(response.json()["jit_provisioning_enabled"], True)
self.domain.refresh_from_db()
self.assertEqual(self.domain.sso_enforcement, "google-oauth2")
self.assertEqual(self.domain.jit_provisioning_enabled, True)
def test_cannot_enforce_sso_or_enable_jit_provisioning_on_unverified_domain(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
# SSO Enforcement
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/", {"sso_enforcement": "google-oauth2"},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "verification_required",
"detail": "This attribute cannot be updated until the domain is verified.",
"attr": "sso_enforcement",
},
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.sso_enforcement, "")
# JIT Provisioning
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/", {"jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "verification_required",
"detail": "This attribute cannot be updated until the domain is verified.",
"attr": "jit_provisioning_enabled",
},
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.jit_provisioning_enabled, False)
def test_only_allowed_parameters_can_be_updated(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.patch(
f"/api/organizations/@current/domains/{self.domain.id}/",
{"verified_at": "2020-01-01T12:12:12Z", "verification_challenge": "123"},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["verified_at"], None)
self.assertRegex(response.json()["verification_challenge"], r"[0-9A-Za-z_-]{32}")
def test_only_admin_can_update_domain(self):
self.domain.verified_at = timezone.now()
self.domain.save()
response = self.client.patch(
f"/api/organizations/{self.organization.id}/domains/{self.domain.id}/",
{"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.domain.refresh_from_db()
self.assertEqual(self.domain.jit_provisioning_enabled, False)
self.assertEqual(self.domain.sso_enforcement, "")
def test_cannot_update_domain_for_another_org(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
self.another_domain.verified_at = timezone.now()
self.another_domain.save()
response = self.client.patch(
f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}/",
{"sso_enforcement": "google-oauth2", "jit_provisioning_enabled": True},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.json(), self.permission_denied_response())
self.another_domain.refresh_from_db()
self.assertEqual(self.another_domain.jit_provisioning_enabled, False)
self.assertEqual(self.another_domain.sso_enforcement, "")
# Delete domains
def test_admin_can_delete_domain(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.delete(f"/api/organizations/@current/domains/{self.domain.id}")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.content, b"")
self.assertFalse(OrganizationDomain.objects.filter(id=self.domain.id).exists())
def test_only_admin_can_delete_domain(self):
response = self.client.delete(f"/api/organizations/@current/domains/{self.domain.id}")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(), self.permission_denied_response("Your organization access level is insufficient."),
)
self.domain.refresh_from_db()
def test_cannot_delete_domain_for_another_org(self):
self.organization_membership.level = OrganizationMembership.Level.ADMIN
self.organization_membership.save()
response = self.client.delete(f"/api/organizations/{self.another_org.id}/domains/{self.another_domain.id}")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.json(), self.permission_denied_response())
self.another_domain.refresh_from_db()
| [
"posthog.models.OrganizationDomain.objects.create",
"datetime.datetime",
"posthog.models.OrganizationDomain.objects.count",
"posthog.models.OrganizationDomain.objects.get",
"django.utils.timezone.now",
"posthog.models.OrganizationDomain.objects.filter",
"posthog.models.Team.objects.create",
"freezegun.freeze_time",
"posthog.models.Organization.objects.create",
"unittest.mock.patch"
] | [((7935, 7999), 'unittest.mock.patch', 'patch', (['"""posthog.models.organization_domain.dns.resolver.resolve"""'], {}), "('posthog.models.organization_domain.dns.resolver.resolve')\n", (7940, 7999), False, 'from unittest.mock import patch\n'), ((9227, 9291), 'unittest.mock.patch', 'patch', (['"""posthog.models.organization_domain.dns.resolver.resolve"""'], {}), "('posthog.models.organization_domain.dns.resolver.resolve')\n", (9232, 9291), False, 'from unittest.mock import patch\n'), ((10253, 10317), 'unittest.mock.patch', 'patch', (['"""posthog.models.organization_domain.dns.resolver.resolve"""'], {}), "('posthog.models.organization_domain.dns.resolver.resolve')\n", (10258, 10317), False, 'from unittest.mock import patch\n'), ((1084, 1177), 'posthog.models.OrganizationDomain.objects.create', 'OrganizationDomain.objects.create', ([], {'organization': 'cls.organization', 'domain': '"""myposthog.com"""'}), "(organization=cls.organization, domain=\n 'myposthog.com')\n", (1117, 1177), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((1200, 1247), 'posthog.models.Organization.objects.create', 'Organization.objects.create', ([], {'name': '"""Another Org"""'}), "(name='Another Org')\n", (1227, 1247), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((1256, 1305), 'posthog.models.Team.objects.create', 'Team.objects.create', ([], {'organization': 'cls.another_org'}), '(organization=cls.another_org)\n', (1275, 1305), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((1335, 1429), 'posthog.models.OrganizationDomain.objects.create', 'OrganizationDomain.objects.create', ([], {'organization': 'cls.another_org', 'domain': '"""org.posthog.net"""'}), "(organization=cls.another_org, domain=\n 'org.posthog.net')\n", (1368, 1429), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((4239, 4293), 'posthog.models.OrganizationDomain.objects.get', 'OrganizationDomain.objects.get', ([], {'id': "response_data['id']"}), "(id=response_data['id'])\n", (4269, 4293), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((5751, 5805), 'posthog.models.OrganizationDomain.objects.get', 'OrganizationDomain.objects.get', ([], {'id': "response_data['id']"}), "(id=response_data['id'])\n", (5781, 5805), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((6176, 6277), 'posthog.models.OrganizationDomain.objects.create', 'OrganizationDomain.objects.create', ([], {'domain': '"""i-registered-first.com"""', 'organization': 'self.another_org'}), "(domain='i-registered-first.com',\n organization=self.another_org)\n", (6209, 6277), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((6290, 6324), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ([], {}), '()\n', (6322, 6324), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((7051, 7085), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ([], {}), '()\n', (7083, 7085), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((11622, 11636), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (11634, 11636), False, 'from django.utils import timezone\n'), ((12197, 12231), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ([], {}), '()\n', (12229, 12231), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((13356, 13370), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (13368, 13370), False, 'from django.utils import timezone\n'), ((16280, 16294), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (16292, 16294), False, 'from django.utils import timezone\n'), ((17152, 17166), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (17164, 17166), False, 'from django.utils import timezone\n'), ((4782, 4817), 'freezegun.freeze_time', 'freeze_time', (['"""2021-08-08T20:20:08Z"""'], {}), "('2021-08-08T20:20:08Z')\n", (4793, 4817), False, 'from freezegun import freeze_time\n'), ((5927, 5984), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(8)', '(8)', '(20)', '(20)', '(8)'], {'tzinfo': 'pytz.UTC'}), '(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC)\n', (5944, 5984), False, 'import datetime\n'), ((6942, 6976), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ([], {}), '()\n', (6974, 6976), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((7886, 7920), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ([], {}), '()\n', (7918, 7920), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((8484, 8519), 'freezegun.freeze_time', 'freeze_time', (['"""2021-08-08T20:20:08Z"""'], {}), "('2021-08-08T20:20:08Z')\n", (8495, 8519), False, 'from freezegun import freeze_time\n'), ((9096, 9153), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(8)', '(8)', '(20)', '(20)', '(8)'], {'tzinfo': 'pytz.UTC'}), '(2021, 8, 8, 20, 20, 8, tzinfo=pytz.UTC)\n', (9113, 9153), False, 'import datetime\n'), ((9574, 9609), 'freezegun.freeze_time', 'freeze_time', (['"""2021-10-10T10:10:10Z"""'], {}), "('2021-10-10T10:10:10Z')\n", (9585, 9609), False, 'from freezegun import freeze_time\n'), ((10175, 10235), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(10)', '(10)', '(10)', '(10)', '(10)'], {'tzinfo': 'pytz.UTC'}), '(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC)\n', (10192, 10235), False, 'import datetime\n'), ((10721, 10756), 'freezegun.freeze_time', 'freeze_time', (['"""2021-10-10T10:10:10Z"""'], {}), "('2021-10-10T10:10:10Z')\n", (10732, 10756), False, 'from freezegun import freeze_time\n'), ((11322, 11382), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(10)', '(10)', '(10)', '(10)', '(10)'], {'tzinfo': 'pytz.UTC'}), '(2021, 10, 10, 10, 10, 10, tzinfo=pytz.UTC)\n', (11339, 11382), False, 'import datetime\n'), ((12589, 12623), 'posthog.models.OrganizationDomain.objects.count', 'OrganizationDomain.objects.count', ([], {}), '()\n', (12621, 12623), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n'), ((18203, 18255), 'posthog.models.OrganizationDomain.objects.filter', 'OrganizationDomain.objects.filter', ([], {'id': 'self.domain.id'}), '(id=self.domain.id)\n', (18236, 18255), False, 'from posthog.models import Organization, OrganizationDomain, OrganizationMembership, Team\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from os import listdir
from tensorflow.keras.callbacks import ModelCheckpoint
dataDir = "./data/trainSmallFA/"
files = listdir(dataDir)
files.sort()
totalLength = len(files)
inputs = np.empty((len(files), 3, 64, 64))
targets = np.empty((len(files), 3, 64, 64))
for i, file in enumerate(files):
npfile = np.load(dataDir + file)
d = npfile['a']
inputs[i] = d[0:3] # inx, iny, mask
targets[i] = d[3:6] # p, velx, vely
# print("inputs shape = ", inputs.shape)
print(np.shape(targets[:, 1, :, :].flatten()))
maxvel = np.amax(np.sqrt(targets[:, 1, :, :]* targets[:, 1, :, :]
+ targets[:, 2, :, :]* targets[:, 2, :, :]))
print(maxvel)
targets[:, 1:3, :, :] /= maxvel
targets[:, 0, :, :] /= np.amax(targets[:, 0, :, :])
for input in inputs:
plt.figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')
# predicted data
plt.subplot(331)
plt.title('x vel')
plt.imshow(input[0, :, :], cmap='jet') # vmin=-100,vmax=100, cmap='jet')
plt.colorbar()
plt.subplot(332)
plt.title('y vel')
plt.imshow(input[1, :, :], cmap='jet')
plt.colorbar()
plt.show() | [
"matplotlib.pyplot.imshow",
"os.listdir",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.load",
"numpy.amax",
"matplotlib.pyplot.show"
] | [((224, 240), 'os.listdir', 'listdir', (['dataDir'], {}), '(dataDir)\n', (231, 240), False, 'from os import listdir\n'), ((833, 861), 'numpy.amax', 'np.amax', (['targets[:, 0, :, :]'], {}), '(targets[:, 0, :, :])\n', (840, 861), True, 'import numpy as np\n'), ((413, 436), 'numpy.load', 'np.load', (['(dataDir + file)'], {}), '(dataDir + file)\n', (420, 436), True, 'import numpy as np\n'), ((645, 743), 'numpy.sqrt', 'np.sqrt', (['(targets[:, 1, :, :] * targets[:, 1, :, :] + targets[:, 2, :, :] * targets[\n :, 2, :, :])'], {}), '(targets[:, 1, :, :] * targets[:, 1, :, :] + targets[:, 2, :, :] *\n targets[:, 2, :, :])\n', (652, 743), True, 'import numpy as np\n'), ((887, 963), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(20, 10)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')\n", (897, 963), True, 'import matplotlib.pyplot as plt\n'), ((990, 1006), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(331)'], {}), '(331)\n', (1001, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1029), 'matplotlib.pyplot.title', 'plt.title', (['"""x vel"""'], {}), "('x vel')\n", (1020, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1072), 'matplotlib.pyplot.imshow', 'plt.imshow', (['input[0, :, :]'], {'cmap': '"""jet"""'}), "(input[0, :, :], cmap='jet')\n", (1044, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1126), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1124, 1126), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1147), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(332)'], {}), '(332)\n', (1142, 1147), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1170), 'matplotlib.pyplot.title', 'plt.title', (['"""y vel"""'], {}), "('y vel')\n", (1161, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1213), 'matplotlib.pyplot.imshow', 'plt.imshow', (['input[1, :, :]'], {'cmap': '"""jet"""'}), "(input[1, :, :], cmap='jet')\n", (1185, 1213), True, 'import matplotlib.pyplot as plt\n'), ((1218, 1232), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1230, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1238, 1248), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1246, 1248), True, 'import matplotlib.pyplot as plt\n')] |
# ~~~
# This file is part of the paper:
#
# " An Online Efficient Two-Scale Reduced Basis Approach
# for the Localized Orthogonal Decomposition "
#
# https://github.com/TiKeil/Two-scale-RBLOD.git
#
# Copyright 2019-2021 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# <NAME>
# <NAME>
# ~~~
from setuptools import setup
setup(name='rblod',
version='2021.1',
description='Pymor support for RBLOD',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['rblod'])
| [
"setuptools.setup"
] | [((450, 606), 'setuptools.setup', 'setup', ([], {'name': '"""rblod"""', 'version': '"""2021.1"""', 'description': '"""Pymor support for RBLOD"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['rblod']"}), "(name='rblod', version='2021.1', description='Pymor support for RBLOD',\n author='<NAME>', author_email='<EMAIL>', license='MIT', packages=['rblod'])\n", (455, 606), False, 'from setuptools import setup\n')] |
import os
import shutil
import numpy as np
import pandas as pd
import seaborn as sns
import cosmicfish as cf
import matplotlib.pyplot as plt
import dill
# Instruct pyplot to use seaborn
sns.set()
# Set project, data, CLASS directories
projectdir = os.environ['STORAGE_DIR']
datastore = os.environ['DATASTORE_DIR']
classpath = os.environ['CLASS_DIR']
fidx = int(os.environ['FORECAST_INDEX'])
# Generate output paths
fp_resultsdir = projectdir
cf.makedirectory(fp_resultsdir)
# Specify resolution of numerical integrals
derivative_step = 0.008 # How much to vary parameter to calculate numerical derivative
g_derivative_step = 0.1
mu_integral_step = 0.05 # For calculating numerical integral wrt mu between -1 and 1
# Linda Fiducial Cosmology
fp_fid = {
"A_s" : 2.2321e-9,
"n_s" : 0.967,
"omega_b" : 0.02226,
"omega_cdm" : 0.1127,
"tau_reio" : 0.0598,
"h" : 0.701,
"T_cmb" : 2.726, # Units [K]
"N_ncdm" : 4.,
"deg_ncdm" : 1.0,
"T_ncdm" : (0.79/2.726), # Units [T_cmb].
"m_ncdm" : 0.01, # Units [eV]
"b0" : 1.0,
"beta0" : 1.7,
"beta1" : 1.0,
"alphak2" : 1.0,
"sigma_fog_0" : 250000, #Units [m s^-2]
"N_eff" : 0.0064, #We allow relativistic neutrinos in addition to our DM relic
"relic_vary" : "N_ncdm", # Fix T_ncdm or m_ncdm
"m_nu" : 0.02
}
# EUCLID values
z_table = np.array([0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65, 1.75, 1.85, 1.95])
dNdz = np.array([2434.280, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, 4269.851, 3720.657, 3104.309,
2308.975, 1514.831, 1474.707, 893.716, 497.613])
skycover = 0.3636
# Run Fisher Forecast
full_masses = np.geomspace(0.01, 10., 21)
full_temps = np.array([0.79, 0.91, 0.94, 1.08])
mass_index=(fidx % 21)
temp_index=(fidx // 21)
masses = np.array([full_masses[mass_index]])
temps = np.array([full_temps[temp_index]])
omegacdm_set = np.array([
fp_fid['omega_cdm']
- ((masses/cf.NEUTRINO_SCALE_FACTOR)* np.power(tval / 1.95, 3.))
for tidx, tval in enumerate(temps)])
fp_fiducialset = [[
dict(fp_fid, **{
'm_ncdm' : masses[midx],
'omega_cdm' : omegacdm_set[tidx, midx],
'T_ncdm' : temps[tidx]/2.726})
for midx, mval in enumerate(masses)]
for tidx, tval in enumerate(temps)]
fp_forecastset = [[cf.forecast(
classpath,
datastore,
'2relic',
fidval,
z_table,
"EUCLID",
dNdz,
fsky=skycover,
dstep=derivative_step,
gstep=g_derivative_step,
RSD=True,
FOG=True,
AP=True,
COV=True)
for fididx, fidval in enumerate(fidrowvals)]
for fidrowidx, fidrowvals in enumerate(fp_fiducialset)]
#dill.load_session('')
for frowidx, frowval in enumerate(fp_forecastset):
for fidx, fcst in enumerate(frowval):
if type(fcst.fisher)==type(None):
fcst.gen_pm()
fcst.gen_fisher(
fisher_order=[
'omega_b',
'omega_cdm',
'n_s',
'A_s',
'tau_reio',
'h',
'N_ncdm',
'M_ncdm',
'sigma_fog',
'beta0',
'beta1',
'alpha_k2'],
mu_step=mu_integral_step,
skipgen=False)
print("Relic Forecast ", fidx, " complete...")
dill.dump_session(os.path.join(fp_resultsdir, 'fp_'+str(temp_index)+'_'+str(mass_index)+'.db'))
else:
print('Fisher matrix already generated!')
| [
"seaborn.set",
"numpy.power",
"numpy.geomspace",
"numpy.array",
"cosmicfish.makedirectory",
"cosmicfish.forecast"
] | [((665, 674), 'seaborn.set', 'sns.set', ([], {}), '()\n', (672, 674), True, 'import seaborn as sns\n'), ((1328, 1359), 'cosmicfish.makedirectory', 'cf.makedirectory', (['fp_resultsdir'], {}), '(fp_resultsdir)\n', (1344, 1359), True, 'import cosmicfish as cf\n'), ((3270, 3368), 'numpy.array', 'np.array', (['[0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65, 1.75, \n 1.85, 1.95]'], {}), '([0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65,\n 1.75, 1.85, 1.95])\n', (3278, 3368), True, 'import numpy as np\n'), ((3372, 3529), 'numpy.array', 'np.array', (['[2434.28, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, 4269.851, \n 3720.657, 3104.309, 2308.975, 1514.831, 1474.707, 893.716, 497.613]'], {}), '([2434.28, 4364.812, 4728.559, 4825.798, 4728.797, 4507.625, \n 4269.851, 3720.657, 3104.309, 2308.975, 1514.831, 1474.707, 893.716, \n 497.613])\n', (3380, 3529), True, 'import numpy as np\n'), ((3675, 3703), 'numpy.geomspace', 'np.geomspace', (['(0.01)', '(10.0)', '(21)'], {}), '(0.01, 10.0, 21)\n', (3687, 3703), True, 'import numpy as np\n'), ((3768, 3802), 'numpy.array', 'np.array', (['[0.79, 0.91, 0.94, 1.08]'], {}), '([0.79, 0.91, 0.94, 1.08])\n', (3776, 3802), True, 'import numpy as np\n'), ((3865, 3900), 'numpy.array', 'np.array', (['[full_masses[mass_index]]'], {}), '([full_masses[mass_index]])\n', (3873, 3900), True, 'import numpy as np\n'), ((3909, 3943), 'numpy.array', 'np.array', (['[full_temps[temp_index]]'], {}), '([full_temps[temp_index]])\n', (3917, 3943), True, 'import numpy as np\n'), ((4805, 4991), 'cosmicfish.forecast', 'cf.forecast', (['classpath', 'datastore', '"""2relic"""', 'fidval', 'z_table', '"""EUCLID"""', 'dNdz'], {'fsky': 'skycover', 'dstep': 'derivative_step', 'gstep': 'g_derivative_step', 'RSD': '(True)', 'FOG': '(True)', 'AP': '(True)', 'COV': '(True)'}), "(classpath, datastore, '2relic', fidval, z_table, 'EUCLID', dNdz,\n fsky=skycover, dstep=derivative_step, gstep=g_derivative_step, RSD=True,\n FOG=True, AP=True, COV=True)\n", (4816, 4991), True, 'import cosmicfish as cf\n'), ((4147, 4173), 'numpy.power', 'np.power', (['(tval / 1.95)', '(3.0)'], {}), '(tval / 1.95, 3.0)\n', (4155, 4173), True, 'import numpy as np\n')] |
import argparse
import warnings
warnings.simplefilter("ignore", UserWarning)
import files
from tensorboardX import SummaryWriter
import os
import numpy as np
import time
import torch
import torch.optim
import torch.nn as nn
import torch.utils.data
import torchvision
import torchvision.transforms as tfs
from data import DataSet,return_model_loader
from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage
def RotationDataLoader(image_dir, is_validation=False,
batch_size=256, crop_size=224, num_workers=4,shuffle=True):
normalize = tfs.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transforms = tfs.Compose([
tfs.RandomResizedCrop(crop_size),
tfs.RandomGrayscale(p=0.2),
tfs.ColorJitter(0.4, 0.4, 0.4, 0.4),
tfs.RandomHorizontalFlip(),
tfs.Lambda(lambda img: torch.stack([normalize(tfs.ToTensor()(
tfs.functional.rotate(img, angle))) for angle in [0, 90, 180, 270]]
))
])
if is_validation:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/val', transforms))
else:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/train', transforms))
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
drop_last=False
)
return loader
class Optimizer:
def __init__(self):
self.num_epochs = 30
self.lr = 0.05
self.lr_schedule = lambda epoch: (self.lr * (0.1 ** (epoch//args.lrdrop)))*(epoch<80) + (epoch>=80)*self.lr*(0.1**3)
self.momentum = 0.9
self.weight_decay = 10**(-5)
self.resume = True
self.checkpoint_dir = None
self.writer = None
self.K = args.ncl
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.val_loader = RotationDataLoader(args.imagenet_path, is_validation=True,
batch_size=args.batch_size, num_workers=args.workers,shuffle=True)
def optimize_epoch(self, model, optimizer, loader, epoch, validation=False):
print(f"Starting epoch {epoch}, validation: {validation} " + "="*30)
loss_value = AverageMeter()
rotacc_value = AverageMeter()
# house keeping
if not validation:
model.train()
lr = self.lr_schedule(epoch)
for pg in optimizer.param_groups:
pg['lr'] = lr
else:
model.eval()
XE = torch.nn.CrossEntropyLoss().to(self.dev)
l_dl = 0 # len(loader)
now = time.time()
batch_time = MovingAverage(intertia=0.9)
for iter, (data, label, selected) in enumerate(loader):
now = time.time()
if not validation:
niter = epoch * len(loader.dataset) + iter*args.batch_size
data = data.to(self.dev)
mass = data.size(0)
where = np.arange(mass,dtype=int) * 4
data = data.view(mass * 4, 3, data.size(3), data.size(4))
rotlabel = torch.tensor(range(4)).view(-1, 1).repeat(mass, 1).view(-1).to(self.dev)
#################### train CNN ###########################################
if not validation:
final = model(data)
if args.onlyrot:
loss = torch.Tensor([0]).to(self.dev)
else:
if args.hc == 1:
loss = XE(final[0][where], self.L[selected])
else:
loss = torch.mean(torch.stack([XE(final[k][where], self.L[k, selected]) for k in range(args.hc)]))
rotloss = XE(final[-1], rotlabel)
pred = torch.argmax(final[-1], 1)
total_loss = loss + rotloss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
correct = (pred == rotlabel).to(torch.float)
rotacc = correct.sum() / float(mass)
else:
final = model(data)
pred = torch.argmax(final[-1], 1)
correct = (pred == rotlabel.cuda()).to(torch.float)
rotacc = correct.sum() / float(mass)
total_loss = torch.Tensor([0])
loss = torch.Tensor([0])
rotloss = torch.Tensor([0])
rotacc_value.update(rotacc.item(), mass)
loss_value.update(total_loss.item(), mass)
batch_time.update(time.time() - now)
now = time.time()
print(
f"Loss: {loss_value.avg:03.3f}, RotAcc: {rotacc_value.avg:03.3f} | {epoch: 3}/{iter:05}/{l_dl:05} Freq: {mass / batch_time.avg:04.1f}Hz:",
end='\r', flush=True)
# every few iter logging
if (iter % args.logiter == 0):
if not validation:
print(niter, " Loss: {0:.3f}".format(loss.item()), flush=True)
with torch.no_grad():
if not args.onlyrot:
pred = torch.argmax(final[0][where], dim=1)
pseudoloss = XE(final[0][where], pred)
if not args.onlyrot:
self.writer.add_scalar('Pseudoloss', pseudoloss.item(), niter)
self.writer.add_scalar('lr', self.lr_schedule(epoch), niter)
self.writer.add_scalar('Loss', loss.item(), niter)
self.writer.add_scalar('RotLoss', rotloss.item(), niter)
self.writer.add_scalar('RotAcc', rotacc.item(), niter)
if iter > 0:
self.writer.add_scalar('Freq(Hz)', mass/(time.time() - now), niter)
# end of epoch logging
if self.writer and (epoch % self.log_interval == 0):
write_conv(self.writer, model, epoch)
if validation:
print('val Rot-Acc: ', rotacc_value.avg)
self.writer.add_scalar('val Rot-Acc', rotacc_value.avg, epoch)
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch,lowest=False)
return {'loss': loss_value.avg}
def optimize(self, model, train_loader):
"""Perform full optimization."""
first_epoch = 0
model = model.to(self.dev)
self.optimize_times = [0]
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
weight_decay=self.weight_decay,
momentum=self.momentum,
lr=self.lr)
if self.checkpoint_dir is not None and self.resume:
self.L, first_epoch = files.load_checkpoint_all(self.checkpoint_dir, model=None, opt=None)
print('loaded from: ', self.checkpoint_dir,flush=True)
print('first five entries of L: ', self.L[:5], flush=True)
print('found first epoch to be', first_epoch, flush=True)
first_epoch = 0
self.optimize_times = [0]
self.L = self.L.cuda()
print("model.headcount ", model.headcount, flush=True)
#####################################################################################
# Perform optmization ###############################################################
lowest_loss = 1e9
epoch = first_epoch
while epoch < (self.num_epochs+1):
if not args.val_only:
m = self.optimize_epoch(model, optimizer, train_loader, epoch, validation=False)
if m['loss'] < lowest_loss:
lowest_loss = m['loss']
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch, lowest=True)
else:
print('='*30 +' doing only validation ' + "="*30)
epoch = self.num_epochs
m = self.optimize_epoch(model, optimizer, self.val_loader, epoch, validation=True)
epoch += 1
print(f"Model optimization completed. Saving final model to {os.path.join(self.checkpoint_dir, 'model_final.pth.tar')}")
torch.save(model, os.path.join(self.checkpoint_dir, 'model_final.pth.tar'))
return model
def get_parser():
parser = argparse.ArgumentParser(description='Retrain with given labels combined with RotNet loss')
# optimizer
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of epochs')
parser.add_argument('--batch-size', default=64, type=int, metavar='BS', help='batch size')
parser.add_argument('--lr', default=0.05, type=float, metavar='FLOAT', help='initial learning rate')
parser.add_argument('--lrdrop', default=30, type=int, metavar='INT', help='multiply LR by 0.1 every')
# architecture
parser.add_argument('--arch', default='alexnet', type=str, help='alexnet or resnet')
parser.add_argument('--archspec', default='big', type=str, help='big or small for alexnet ')
parser.add_argument('--ncl', default=1000, type=int, metavar='INT', help='number of clusters')
parser.add_argument('--hc', default=1, type=int, metavar='INT', help='number of heads')
parser.add_argument('--init', default=False, action='store_true', help='initialization of network to PyTorch 0.4')
# what we do in this code
parser.add_argument('--val-only', default=False, action='store_true', help='if we run only validation set')
parser.add_argument('--onlyrot', default=False, action='store_true', help='if train only RotNet')
# housekeeping
parser.add_argument('--data', default="Imagenet", type=str)
parser.add_argument('--device', default="0", type=str, metavar='N', help='GPU device')
parser.add_argument('--exp', default='./rot-retrain', metavar='DIR', help='path to result dirs')
parser.add_argument('--workers', default=6, type=int, metavar='N', help='number workers (default: 6)')
parser.add_argument('--imagenet-path', default='/home/ubuntu/data/imagenet', type=str, help='')
parser.add_argument('--comment', default='rot-retrain', type=str, help='comment for tensorboardX')
parser.add_argument('--log-interval', default=1, type=int, metavar='INT', help='save stuff every x epochs')
parser.add_argument('--logiter', default=200, type=int, metavar='INT', help='log every x-th batch')
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
name = "%s" % args.comment.replace('/', '_')
try:
args.device = [int(item) for item in args.device.split(',')]
except AttributeError:
args.device = [int(args.device)]
setup_runtime(seed=42, cuda_dev_id=args.device)
print(args, flush=True)
print()
print(name,flush=True)
writer = SummaryWriter('./runs/%s/%s'%(args.data,name))
writer.add_text('args', " \n".join(['%s %s' % (arg, getattr(args, arg)) for arg in vars(args)]))
# Setup model and train_loader
print('Commencing!', flush=True)
model, train_loader = return_model_loader(args)
train_loader = RotationDataLoader(args.imagenet_path, is_validation=False,
crop_size=224, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True)
# add additional head to the network for RotNet loss.
if args.arch == 'alexnet':
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(4096, args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(4096, 4))
else:
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(2048*int(args.archspec), args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(2048*int(args.archspec), 4))
if args.init:
for mod in model.modules():
mod.apply(weight_init)
# Setup optimizer
o = Optimizer()
o.writer = writer
o.lr = args.lr
o.num_epochs = args.epochs
o.resume = True
o.log_interval = args.log_interval
o.checkpoint_dir = os.path.join(args.exp, 'checkpoints')
# Optimize
o.optimize(model, train_loader)
| [
"torch.nn.CrossEntropyLoss",
"torchvision.transforms.ColorJitter",
"torchvision.transforms.functional.rotate",
"torch.cuda.is_available",
"files.save_checkpoint_all",
"numpy.arange",
"util.setup_runtime",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"util.write_conv",
"torchvision.datasets.ImageFolder",
"util.MovingAverage",
"files.load_checkpoint_all",
"warnings.simplefilter",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"torch.argmax",
"torchvision.transforms.RandomHorizontalFlip",
"torch.Tensor",
"torchvision.transforms.Normalize",
"data.return_model_loader",
"util.AverageMeter",
"time.time",
"torchvision.transforms.RandomGrayscale",
"os.path.join",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | [((32, 76), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (53, 76), False, 'import warnings\n'), ((594, 662), 'torchvision.transforms.Normalize', 'tfs.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (607, 662), True, 'import torchvision.transforms as tfs\n'), ((1444, 1583), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers', 'pin_memory': '(True)', 'drop_last': '(False)'}), '(dataset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, pin_memory=True, drop_last=False)\n', (1471, 1583), False, 'import torch\n'), ((8746, 8841), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Retrain with given labels combined with RotNet loss"""'}), "(description=\n 'Retrain with given labels combined with RotNet loss')\n", (8769, 8841), False, 'import argparse\n'), ((11101, 11148), 'util.setup_runtime', 'setup_runtime', ([], {'seed': '(42)', 'cuda_dev_id': 'args.device'}), '(seed=42, cuda_dev_id=args.device)\n', (11114, 11148), False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((11230, 11279), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["('./runs/%s/%s' % (args.data, name))"], {}), "('./runs/%s/%s' % (args.data, name))\n", (11243, 11279), False, 'from tensorboardX import SummaryWriter\n'), ((11478, 11503), 'data.return_model_loader', 'return_model_loader', (['args'], {}), '(args)\n', (11497, 11503), False, 'from data import DataSet, return_model_loader\n'), ((12643, 12680), 'os.path.join', 'os.path.join', (['args.exp', '"""checkpoints"""'], {}), "(args.exp, 'checkpoints')\n", (12655, 12680), False, 'import os\n'), ((2512, 2526), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2524, 2526), False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((2550, 2564), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2562, 2564), False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((2900, 2911), 'time.time', 'time.time', ([], {}), '()\n', (2909, 2911), False, 'import time\n'), ((2933, 2960), 'util.MovingAverage', 'MovingAverage', ([], {'intertia': '(0.9)'}), '(intertia=0.9)\n', (2946, 2960), False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((6392, 6500), 'files.save_checkpoint_all', 'files.save_checkpoint_all', (['self.checkpoint_dir', 'model', 'args.arch', 'optimizer', 'self.L', 'epoch'], {'lowest': '(False)'}), '(self.checkpoint_dir, model, args.arch, optimizer,\n self.L, epoch, lowest=False)\n', (6417, 6500), False, 'import files\n'), ((726, 758), 'torchvision.transforms.RandomResizedCrop', 'tfs.RandomResizedCrop', (['crop_size'], {}), '(crop_size)\n', (747, 758), True, 'import torchvision.transforms as tfs\n'), ((792, 818), 'torchvision.transforms.RandomGrayscale', 'tfs.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (811, 818), True, 'import torchvision.transforms as tfs\n'), ((852, 887), 'torchvision.transforms.ColorJitter', 'tfs.ColorJitter', (['(0.4)', '(0.4)', '(0.4)', '(0.4)'], {}), '(0.4, 0.4, 0.4, 0.4)\n', (867, 887), True, 'import torchvision.transforms as tfs\n'), ((921, 947), 'torchvision.transforms.RandomHorizontalFlip', 'tfs.RandomHorizontalFlip', ([], {}), '()\n', (945, 947), True, 'import torchvision.transforms as tfs\n'), ((1261, 1325), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (["(image_dir + '/val')", 'transforms'], {}), "(image_dir + '/val', transforms)\n", (1293, 1325), False, 'import torchvision\n'), ((1363, 1429), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (["(image_dir + '/train')", 'transforms'], {}), "(image_dir + '/train', transforms)\n", (1395, 1429), False, 'import torchvision\n'), ((3043, 3054), 'time.time', 'time.time', ([], {}), '()\n', (3052, 3054), False, 'import time\n'), ((4870, 4881), 'time.time', 'time.time', ([], {}), '()\n', (4879, 4881), False, 'import time\n'), ((6182, 6219), 'util.write_conv', 'write_conv', (['self.writer', 'model', 'epoch'], {}), '(self.writer, model, epoch)\n', (6192, 6219), False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((7114, 7182), 'files.load_checkpoint_all', 'files.load_checkpoint_all', (['self.checkpoint_dir'], {'model': 'None', 'opt': 'None'}), '(self.checkpoint_dir, model=None, opt=None)\n', (7139, 7182), False, 'import files\n'), ((8632, 8688), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""model_final.pth.tar"""'], {}), "(self.checkpoint_dir, 'model_final.pth.tar')\n", (8644, 8688), False, 'import os\n'), ((12051, 12069), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4)'], {}), '(4096, 4)\n', (12060, 12069), True, 'import torch.nn as nn\n'), ((2095, 2120), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2118, 2120), False, 'import torch\n'), ((2813, 2840), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2838, 2840), False, 'import torch\n'), ((3251, 3277), 'numpy.arange', 'np.arange', (['mass'], {'dtype': 'int'}), '(mass, dtype=int)\n', (3260, 3277), True, 'import numpy as np\n'), ((4042, 4068), 'torch.argmax', 'torch.argmax', (['final[-1]', '(1)'], {}), '(final[-1], 1)\n', (4054, 4068), False, 'import torch\n'), ((4414, 4440), 'torch.argmax', 'torch.argmax', (['final[-1]', '(1)'], {}), '(final[-1], 1)\n', (4426, 4440), False, 'import torch\n'), ((4591, 4608), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (4603, 4608), False, 'import torch\n'), ((4632, 4649), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (4644, 4649), False, 'import torch\n'), ((4676, 4693), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (4688, 4693), False, 'import torch\n'), ((11902, 11927), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'args.ncl'], {}), '(4096, args.ncl)\n', (11911, 11927), True, 'import torch.nn as nn\n'), ((4833, 4844), 'time.time', 'time.time', ([], {}), '()\n', (4842, 4844), False, 'import time\n'), ((8085, 8192), 'files.save_checkpoint_all', 'files.save_checkpoint_all', (['self.checkpoint_dir', 'model', 'args.arch', 'optimizer', 'self.L', 'epoch'], {'lowest': '(True)'}), '(self.checkpoint_dir, model, args.arch, optimizer,\n self.L, epoch, lowest=True)\n', (8110, 8192), False, 'import files\n'), ((8546, 8602), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""model_final.pth.tar"""'], {}), "(self.checkpoint_dir, 'model_final.pth.tar')\n", (8558, 8602), False, 'import os\n'), ((5318, 5333), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5331, 5333), False, 'import torch\n'), ((3661, 3678), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (3673, 3678), False, 'import torch\n'), ((5415, 5451), 'torch.argmax', 'torch.argmax', (['final[0][where]'], {'dim': '(1)'}), '(final[0][where], dim=1)\n', (5427, 5451), False, 'import torch\n'), ((1027, 1041), 'torchvision.transforms.ToTensor', 'tfs.ToTensor', ([], {}), '()\n', (1039, 1041), True, 'import torchvision.transforms as tfs\n'), ((1079, 1112), 'torchvision.transforms.functional.rotate', 'tfs.functional.rotate', (['img', 'angle'], {}), '(img, angle)\n', (1100, 1112), True, 'import torchvision.transforms as tfs\n'), ((6050, 6061), 'time.time', 'time.time', ([], {}), '()\n', (6059, 6061), False, 'import time\n')] |
# -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.vie import Vietnamese
class VietnameseTestCase(HangulizeTestCase):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0218.jsp """
lang = Vietnamese()
def test_1st(self):
"""์ 1ํญ
nh๋ ์ด์ด์ง๋ ๋ชจ์๊ณผ ํฉ์ณ์ ํ ์์ ๋ก ์ ๋๋ค. ์ด๋ง์ด๋ ์์ ์์์๋
๋ฐ์นจ โใด' ์ผ๋ก ์ ๋, ๊ทธ ์์ ๋ชจ์์ด a์ธ ๊ฒฝ์ฐ์๋ a์ ํฉ์ณ โ์์ธ'์ผ๋ก
์ ๋๋ค.
"""
self.assert_examples({
# u'Nha Trang': u'๋์งฑ',
# u'<NAME>': u'ํธ์ฐ๋ฏผ',
# u'Thanh Hoa': u'ํ์ธํธ์',
# u'ฤรดng Khanh': u'๋์นด์ธ',
})
def test_2nd(self):
"""์ 2ํญ
qu๋ ์ด์ด์ง๋ ๋ชจ์์ด a์ผ ๊ฒฝ์ฐ์๋ ํฉ์ณ์ โ๊ฝ'๋ก ์ ๋๋ค.
"""
self.assert_examples({
'Quang': '๊ฝ',
# u'hat quan ho': u'ํซ๊ฝํธ',
'Quรดc': '๊พธ์ฅ',
'Quyรชn': '๊พธ์',
})
def test_3rd(self):
"""์ 3ํญ
y๋ ๋ค๋ฐ๋ฅด๋ ๋ชจ์๊ณผ ํฉ์ณ์ ํ ์์ ๋ก ์ ๋๋ค.
"""
self.assert_examples({
'yรชn': '์',
'Nguyรชn': '์์ฐ์',
})
def test_4th(self):
"""์ 4ํญ
์ด์ค์ l์ด ๋ชจ์ ์์ ์ฌ ๋์๋ โในใน'๋ก ์ ๋๋ค.
๋ค๋ง, ์ธ๋ช
์ ์ฑ๊ณผ ์ด๋ฆ์ ๋ณ๊ฐ์ ๋จ์ด๋ก ๋ณด์ ์ด ๊ท์น์ ์ ์ฉํ์ง ์๋๋ค.
"""
self.assert_examples({
# u'klรดng put': u'๋๋กฑ์ซ',
'Pleiku': '์ ๋์ด๊พธ',
# u'Ha Long': u'ํ ๋กฑ',
# u'My Lay': u'๋ฐ๋ผ์ด',
}) | [
"hangulize.langs.vie.Vietnamese"
] | [((232, 244), 'hangulize.langs.vie.Vietnamese', 'Vietnamese', ([], {}), '()\n', (242, 244), False, 'from hangulize.langs.vie import Vietnamese\n')] |
from mathlibpy.functions import *
import unittest
class SinTester(unittest.TestCase):
def setUp(self):
self.sin = Sin()
def test_call(self):
self.assertEqual(self.sin(0), 0)
def test_eq(self):
self.assertEqual(self.sin, Sin())
def test_get_derivative_call(self):
self.assertEqual(self.sin.get_derivative()(0), 1)
class CosTester(unittest.TestCase):
def setUp(self):
self.cos = Cos()
def test_call(self):
self.assertEqual(self.cos(0), 1)
def test_eq(self):
self.assertEqual(self.cos, Cos())
def test_get_derivative_call(self):
self.assertEqual(self.cos.get_derivative()(math.pi/2), -1)
class TanTester(unittest.TestCase):
def setUp(self):
self.tan = Tan()
def test_call(self):
self.assertEqual(self.tan(0), 0)
def test_eq(self):
self.assertEqual(self.tan, Tan())
def test_get_derivative(self):
self.assertEqual(self.tan.get_derivative()(0), 1)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((1038, 1053), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1051, 1053), False, 'import unittest\n')] |
import numpy as np
from something import Top
i = 0
while i < 10:
a = np.ndarray((10,4))
b = np.ones((10, Top))
i += 1
del Top
# show_store()
| [
"numpy.ndarray",
"numpy.ones"
] | [((74, 93), 'numpy.ndarray', 'np.ndarray', (['(10, 4)'], {}), '((10, 4))\n', (84, 93), True, 'import numpy as np\n'), ((101, 119), 'numpy.ones', 'np.ones', (['(10, Top)'], {}), '((10, Top))\n', (108, 119), True, 'import numpy as np\n')] |
from aiohttp_admin2.mappers import Mapper
from aiohttp_admin2.mappers import fields
class FloatMapper(Mapper):
field = fields.FloatField()
def test_correct_float_type():
"""
In this test we check success convert to float type.
"""
mapper = FloatMapper({"field": 1})
mapper.is_valid()
assert mapper.data["field"] == 1.0
mapper = FloatMapper({"field": 2})
mapper.is_valid()
assert mapper.data["field"] == 2.0
mapper = FloatMapper({"field": -3})
mapper.is_valid()
assert mapper.data["field"] == -3.0
mapper = FloatMapper({"field": 0})
mapper.is_valid()
assert mapper.data["field"] == 0.0
def test_wrong_float_type():
"""
In this test we check error when we received wrong float type.
"""
assert FloatMapper({"field": "string"}).is_valid() is False
assert FloatMapper({"field": []}).is_valid() is False
| [
"aiohttp_admin2.mappers.fields.FloatField"
] | [((125, 144), 'aiohttp_admin2.mappers.fields.FloatField', 'fields.FloatField', ([], {}), '()\n', (142, 144), False, 'from aiohttp_admin2.mappers import fields\n')] |
#!/usr/bin/env python3
import os
from argparse import ArgumentParser, ArgumentTypeError, FileType, Namespace
from typing import Any
def DirType(string: str) -> str:
if os.path.isdir(string):
return string
raise ArgumentTypeError(
'Directory does not exist: "{}"'.format(os.path.abspath(string)))
class Cli(ArgumentParser):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def arg(self, *args: Any, **kwargs: Any) -> None:
self.add_argument(*args, **kwargs)
def arg_bool(self, *args: Any, **kwargs: Any) -> None:
self.add_argument(*args, **kwargs, action='store_true')
def arg_dir(self, *args: Any, **kwargs: Any) -> None:
self.add_argument(*args, **kwargs, type=DirType)
def arg_file(self, *args: Any, mode: str = 'r', **kwargs: Any) -> None:
self.add_argument(*args, **kwargs, type=FileType(mode))
def parse(self) -> Namespace:
return self.parse_args()
| [
"os.path.abspath",
"argparse.FileType",
"os.path.isdir"
] | [((174, 195), 'os.path.isdir', 'os.path.isdir', (['string'], {}), '(string)\n', (187, 195), False, 'import os\n'), ((296, 319), 'os.path.abspath', 'os.path.abspath', (['string'], {}), '(string)\n', (311, 319), False, 'import os\n'), ((915, 929), 'argparse.FileType', 'FileType', (['mode'], {}), '(mode)\n', (923, 929), False, 'from argparse import ArgumentParser, ArgumentTypeError, FileType, Namespace\n')] |
import torch
import torch.nn as nn
from torch.nn.functional import max_pool1d
from utility.model_parameter import Configuration, ModelParameter
class CNNLayer(nn.Module):
def __init__(self, config: Configuration, vocab_size=30000, use_embeddings=True, embed_dim=-1, **kwargs):
super(CNNLayer, self).__init__()
# set parameters
self.max_seq_length = config.get_int(ModelParameter.MAX_LENGTH)
self.use_gpu = torch.cuda.is_available()
if embed_dim == -1:
self.embedding_dim = config.get_int(ModelParameter.EMBEDDING_SIZE)
else:
self.embedding_dim = embed_dim
self.max_length = config.get_int(ModelParameter.MAX_LENGTH)
self.use_embeddings = use_embeddings
self.conv_out_channels = config.get_int(ModelParameter.CHANNELS)
self.filter_sizes = [2]
# create and initialize layers
self.embedding = nn.Embedding(vocab_size, self.embedding_dim)
self.relu = nn.ReLU()
self.convolutions = nn.ModuleList(
[nn.Conv2d(1, self.conv_out_channels, (K, self.embedding_dim)) for K in self.filter_sizes])
self.dropout = nn.Dropout(0.3)
def get_output_length(self):
return len(self.filter_sizes) * self.conv_out_channels
def forward(self, samples, **kwargs):
encoded_samples = self.encode(samples)
return encoded_samples
def encode(self, samples):
x = self.embedding(samples)
x = x.unsqueeze(1)
x = [self.relu(conv(x)).squeeze(3) for conv in self.convolutions]
x = [max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = self.dropout(torch.cat(x, 1))
return x
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.cat"
] | [((446, 471), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (469, 471), False, 'import torch\n'), ((919, 963), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'self.embedding_dim'], {}), '(vocab_size, self.embedding_dim)\n', (931, 963), True, 'import torch.nn as nn\n'), ((984, 993), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (991, 993), True, 'import torch.nn as nn\n'), ((1164, 1179), 'torch.nn.Dropout', 'nn.Dropout', (['(0.3)'], {}), '(0.3)\n', (1174, 1179), True, 'import torch.nn as nn\n'), ((1653, 1668), 'torch.cat', 'torch.cat', (['x', '(1)'], {}), '(x, 1)\n', (1662, 1668), False, 'import torch\n'), ((1050, 1111), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'self.conv_out_channels', '(K, self.embedding_dim)'], {}), '(1, self.conv_out_channels, (K, self.embedding_dim))\n', (1059, 1111), True, 'import torch.nn as nn\n')] |
'''
<xs:complexType name="backup">
<xs:annotation>
<xs:documentation></xs:documentation>
</xs:annotation>
<xs:sequence>
<xs:group ref="duration"/>
<xs:group ref="editorial"/>
</xs:sequence>
</xs:complexType>
'''
from musicscore.dtd.dtd import Sequence, GroupReference, Element
from musicscore.musicxml.groups.common import Editorial
from musicscore.musicxml.elements.note import Duration
from musicscore.musicxml.types.complextypes.complextype import ComplexType
class ComplexTypeBackup(ComplexType):
"""
The backup and forward elements are required to coordinate multiple voices in one part, including music on multiple
staves. The backup type is generally used to move between voices and staves. Thus the backup element does not
include voice or staff elements. Duration values should always be positive, and should not cross measure boundaries
or mid-measure changes in the divisions value.
"""
_DTD = Sequence(
Element(Duration),
GroupReference(Editorial)
)
def __init__(self, tag, *args, **kwargs):
super().__init__(tag=tag, *args, **kwargs)
| [
"musicscore.dtd.dtd.GroupReference",
"musicscore.dtd.dtd.Element"
] | [((972, 989), 'musicscore.dtd.dtd.Element', 'Element', (['Duration'], {}), '(Duration)\n', (979, 989), False, 'from musicscore.dtd.dtd import Sequence, GroupReference, Element\n'), ((999, 1024), 'musicscore.dtd.dtd.GroupReference', 'GroupReference', (['Editorial'], {}), '(Editorial)\n', (1013, 1024), False, 'from musicscore.dtd.dtd import Sequence, GroupReference, Element\n')] |
import torch
import numpy as np
import hashlib
from torch.autograd import Variable
import os
def deterministic_random(min_value, max_value, data):
digest = hashlib.sha256(data.encode()).digest()
raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)
return int(raw_value / (2 ** 32 - 1) * (max_value - min_value)) + min_value
def mpjpe_cal(predicted, target):
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1))
def test_calculation(predicted, target, action, error_sum, data_type, subject, MAE=False):
error_sum = mpjpe_by_action_p1(predicted, target, action, error_sum)
if not MAE:
error_sum = mpjpe_by_action_p2(predicted, target, action, error_sum)
return error_sum
def mpjpe_by_action_p1(predicted, target, action, action_error_sum):
assert predicted.shape == target.shape
batch_num = predicted.size(0)
frame_num = predicted.size(1)
dist = torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1), dim=len(target.shape) - 2)
if len(set(list(action))) == 1:
end_index = action[0].find(' ')
if end_index != -1:
action_name = action[0][:end_index]
else:
action_name = action[0]
action_error_sum[action_name]['p1'].update(torch.mean(dist).item()*batch_num*frame_num, batch_num*frame_num)
else:
for i in range(batch_num):
end_index = action[i].find(' ')
if end_index != -1:
action_name = action[i][:end_index]
else:
action_name = action[i]
action_error_sum[action_name]['p1'].update(torch.mean(dist[i]).item()*frame_num, frame_num)
return action_error_sum
def mpjpe_by_action_p2(predicted, target, action, action_error_sum):
assert predicted.shape == target.shape
num = predicted.size(0)
pred = predicted.detach().cpu().numpy().reshape(-1, predicted.shape[-2], predicted.shape[-1])
gt = target.detach().cpu().numpy().reshape(-1, target.shape[-2], target.shape[-1])
dist = p_mpjpe(pred, gt)
if len(set(list(action))) == 1:
end_index = action[0].find(' ')
if end_index != -1:
action_name = action[0][:end_index]
else:
action_name = action[0]
action_error_sum[action_name]['p2'].update(np.mean(dist) * num, num)
else:
for i in range(num):
end_index = action[i].find(' ')
if end_index != -1:
action_name = action[i][:end_index]
else:
action_name = action[i]
action_error_sum[action_name]['p2'].update(np.mean(dist), 1)
return action_error_sum
def p_mpjpe(predicted, target):
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1))
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY
t = muX - a * np.matmul(muY, R)
predicted_aligned = a * np.matmul(predicted, R) + t
return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape) - 1), axis=len(target.shape) - 2)
def define_actions( action ):
actions = ["Directions","Discussion","Eating","Greeting",
"Phoning","Photo","Posing","Purchases",
"Sitting","SittingDown","Smoking","Waiting",
"WalkDog","Walking","WalkTogether"]
if action == "All" or action == "all" or action == '*':
return actions
if not action in actions:
raise( ValueError, "Unrecognized action: %s" % action )
return [action]
def define_error_list(actions):
error_sum = {}
error_sum.update({actions[i]: {'p1':AccumLoss(), 'p2':AccumLoss()} for i in range(len(actions))})
return error_sum
class AccumLoss(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
def get_varialbe(split, target):
num = len(target)
var = []
if split == 'train':
for i in range(num):
temp = Variable(target[i], requires_grad=False).contiguous().type(torch.cuda.FloatTensor)
var.append(temp)
else:
for i in range(num):
temp = Variable(target[i]).contiguous().cuda().type(torch.cuda.FloatTensor)
var.append(temp)
return var
def print_error(data_type, action_error_sum, is_train):
mean_error_p1, mean_error_p2 = print_error_action(action_error_sum, is_train)
return mean_error_p1, mean_error_p2
def print_error_action(action_error_sum, is_train):
mean_error_each = {'p1': 0.0, 'p2': 0.0}
mean_error_all = {'p1': AccumLoss(), 'p2': AccumLoss()}
if is_train == 0:
print("{0:=^12} {1:=^10} {2:=^8}".format("Action", "p#1 mm", "p#2 mm"))
for action, value in action_error_sum.items():
if is_train == 0:
print("{0:<12} ".format(action), end="")
mean_error_each['p1'] = action_error_sum[action]['p1'].avg * 1000.0
mean_error_all['p1'].update(mean_error_each['p1'], 1)
mean_error_each['p2'] = action_error_sum[action]['p2'].avg * 1000.0
mean_error_all['p2'].update(mean_error_each['p2'], 1)
if is_train == 0:
print("{0:>6.2f} {1:>10.2f}".format(mean_error_each['p1'], mean_error_each['p2']))
if is_train == 0:
print("{0:<12} {1:>6.2f} {2:>10.2f}".format("Average", mean_error_all['p1'].avg, \
mean_error_all['p2'].avg))
return mean_error_all['p1'].avg, mean_error_all['p2'].avg
def save_model(previous_name, save_dir,epoch, data_threshold, model, model_name):
# if os.path.exists(previous_name):
# os.remove(previous_name)
torch.save(model.state_dict(),
'%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
previous_name = '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100)
return previous_name
def save_model_new(save_dir,epoch, data_threshold, lr, optimizer, model, model_name):
# if os.path.exists(previous_name):
# os.remove(previous_name)
# torch.save(model.state_dict(),
# '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
torch.save({
'epoch': epoch,
'lr': lr,
'optimizer': optimizer.state_dict(),
'model_pos': model.state_dict(),
},
'%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
| [
"numpy.mean",
"torch.mean",
"numpy.linalg.det",
"numpy.sum",
"numpy.matmul",
"numpy.linalg.svd",
"torch.autograd.Variable"
] | [((2852, 2890), 'numpy.mean', 'np.mean', (['target'], {'axis': '(1)', 'keepdims': '(True)'}), '(target, axis=1, keepdims=True)\n', (2859, 2890), True, 'import numpy as np\n'), ((2901, 2942), 'numpy.mean', 'np.mean', (['predicted'], {'axis': '(1)', 'keepdims': '(True)'}), '(predicted, axis=1, keepdims=True)\n', (2908, 2942), True, 'import numpy as np\n'), ((3216, 3232), 'numpy.linalg.svd', 'np.linalg.svd', (['H'], {}), '(H)\n', (3229, 3232), True, 'import numpy as np\n'), ((3012, 3055), 'numpy.sum', 'np.sum', (['(X0 ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(X0 ** 2, axis=(1, 2), keepdims=True)\n', (3018, 3055), True, 'import numpy as np\n'), ((3077, 3120), 'numpy.sum', 'np.sum', (['(Y0 ** 2)'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(Y0 ** 2, axis=(1, 2), keepdims=True)\n', (3083, 3120), True, 'import numpy as np\n'), ((3506, 3538), 'numpy.sum', 'np.sum', (['s'], {'axis': '(1)', 'keepdims': '(True)'}), '(s, axis=1, keepdims=True)\n', (3512, 3538), True, 'import numpy as np\n'), ((3346, 3362), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (3359, 3362), True, 'import numpy as np\n'), ((3594, 3611), 'numpy.matmul', 'np.matmul', (['muY', 'R'], {}), '(muY, R)\n', (3603, 3611), True, 'import numpy as np\n'), ((3641, 3664), 'numpy.matmul', 'np.matmul', (['predicted', 'R'], {}), '(predicted, R)\n', (3650, 3664), True, 'import numpy as np\n'), ((2399, 2412), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (2406, 2412), True, 'import numpy as np\n'), ((2705, 2718), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (2712, 2718), True, 'import numpy as np\n'), ((1347, 1363), 'torch.mean', 'torch.mean', (['dist'], {}), '(dist)\n', (1357, 1363), False, 'import torch\n'), ((1700, 1719), 'torch.mean', 'torch.mean', (['dist[i]'], {}), '(dist[i])\n', (1710, 1719), False, 'import torch\n'), ((4831, 4871), 'torch.autograd.Variable', 'Variable', (['target[i]'], {'requires_grad': '(False)'}), '(target[i], requires_grad=False)\n', (4839, 4871), False, 'from torch.autograd import Variable\n'), ((5001, 5020), 'torch.autograd.Variable', 'Variable', (['target[i]'], {}), '(target[i])\n', (5009, 5020), False, 'from torch.autograd import Variable\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.