content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Settings UI functionality related to the remote app."""
from __future__ import annotations
import ba
class RemoteAppSettingsWindow(ba.Window):
"""Window showing info/settings related to the remote app."""
def __init__(self) -> None:
from ba.internal import get_remote_app_name
self._r = 'connectMobileDevicesWindow'
width = 700
height = 390
spacing = 40
super().__init__(root_widget=ba.containerwidget(
size=(width, height),
transition='in_right',
scale=(1.85 if ba.app.small_ui else 1.3 if ba.app.med_ui else 1.0),
stack_offset=(-10, 0) if ba.app.small_ui else (0, 0)))
btn = ba.buttonwidget(parent=self._root_widget,
position=(40, height - 67),
size=(140, 65),
scale=0.8,
label=ba.Lstr(resource='backText'),
button_type='back',
text_scale=1.1,
autoselect=True,
on_activate_call=self._back)
ba.containerwidget(edit=self._root_widget, cancel_button=btn)
ba.textwidget(parent=self._root_widget,
position=(width * 0.5, height - 42),
size=(0, 0),
text=ba.Lstr(resource=self._r + '.titleText'),
maxwidth=370,
color=ba.app.title_color,
scale=0.8,
h_align='center',
v_align='center')
ba.buttonwidget(edit=btn,
button_type='backSmall',
size=(60, 60),
label=ba.charstr(ba.SpecialChar.BACK))
v = height - 70.0
v -= spacing * 1.2
ba.textwidget(parent=self._root_widget,
position=(15, v - 26),
size=(width - 30, 30),
maxwidth=width * 0.95,
color=(0.7, 0.9, 0.7, 1.0),
scale=0.8,
text=ba.Lstr(resource=self._r + '.explanationText',
subs=[('${APP_NAME}',
ba.Lstr(resource='titleText')),
('${REMOTE_APP_NAME}',
get_remote_app_name())]),
max_height=100,
h_align='center',
v_align='center')
v -= 90
# hmm the itms:// version doesnt bounce through safari but is kinda
# apple-specific-ish
# Update: now we just show link to the remote webpage.
ba.textwidget(parent=self._root_widget,
position=(width * 0.5, v + 5),
size=(0, 0),
color=(0.7, 0.9, 0.7, 1.0),
scale=1.4,
text='bombsquadgame.com/remote',
maxwidth=width * 0.95,
max_height=60,
h_align='center',
v_align='center')
v -= 30
ba.textwidget(parent=self._root_widget,
position=(width * 0.5, v - 35),
size=(0, 0),
color=(0.7, 0.9, 0.7, 0.8),
scale=0.65,
text=ba.Lstr(resource=self._r + '.bestResultsText'),
maxwidth=width * 0.95,
max_height=height * 0.19,
h_align='center',
v_align='center')
ba.checkboxwidget(
parent=self._root_widget,
position=(width * 0.5 - 150, v - 116),
size=(300, 30),
maxwidth=300,
scale=0.8,
value=not ba.app.config.resolve('Enable Remote App'),
autoselect=True,
text=ba.Lstr(resource='disableRemoteAppConnectionsText'),
on_value_change_call=self._on_check_changed)
def _on_check_changed(self, value: bool) -> None:
cfg = ba.app.config
cfg['Enable Remote App'] = not value
cfg.apply_and_commit()
def _back(self) -> None:
from bastd.ui.settings import controls
ba.containerwidget(edit=self._root_widget, transition='out_right')
ba.app.main_menu_window = (controls.ControlsSettingsWindow(
transition='in_left').get_root_widget())
|
python
|
#!/usr/bin/env python
from avro.io import BinaryEncoder, BinaryDecoder
from avro.io import DatumWriter, DatumReader
import avro.schema
from io import BytesIO
import argo_ams_library
from argo_ams_library import ArgoMessagingService
import argparse
import base64
import logging
import logging.handlers
import sys
import json
import time
# set up logging
LOGGER = logging.getLogger("AMS republish script")
def extract_messages(ams, ingest_sub, bulk_size, schema, verify):
# consume metric data messages
consumed_msgs = ams.pull_sub(ingest_sub, num=bulk_size, return_immediately=True, verify=verify)
# initialise the avro reader
avro_reader = DatumReader(writers_schema=schema)
# all the decoded messages that will be returned
decoded_msgs = []
# decode the messages
for msg in consumed_msgs:
try:
# decode the data field again using the provided avro schema
msg_bytes = BytesIO(msg[1].get_data())
msg_decoder = BinaryDecoder(msg_bytes)
avro_msg = avro_reader.read(msg_decoder)
# check that the tags field is present
if avro_msg["tags"] is None:
raise KeyError("tags field is empty")
# append to decoded messages
decoded_msgs.append((msg[0], avro_msg))
except Exception as e:
LOGGER.warning("Could not extract data from ams message {}, {}".format(msg[0], e.message))
last_msg_id = "-1"
if len(consumed_msgs) > 0:
last_msg_id = consumed_msgs.pop()[0]
return decoded_msgs, last_msg_id
def filter_messages(consumed_msgs, sites):
filtered_msgs = []
for msg in consumed_msgs:
if "endpoint_group" not in msg[1]["tags"]:
LOGGER.warning("Message {} has no endpoint_group".format(msg[0]))
continue
if msg[1]["tags"]["endpoint_group"] in sites:
filtered_msgs.append(msg)
return filtered_msgs
def republish_messages(filtered_msgs, ams, verify):
for msg in filtered_msgs:
topic = msg[1]["tags"]["endpoint_group"]
fields = ["status", "service", "timestamp", "metric", "hostname", "monitoring_host"]
header = dict()
for fl in fields:
if msg[1][fl] is None:
LOGGER.warning("Message {} contains empty field {}".format(msg[0], fl))
header[fl] = ""
else:
header[fl] = msg[1][fl]
data = dict()
if msg[1]["summary"] is None:
LOGGER.warning("Message {} contains no summary field".format(msg[0]))
data["body"] = ""
else:
data["body"] = msg[1]["summary"]
data["header"] = header
data["text"] = "true"
ams_msg = argo_ams_library.AmsMessage(data=json.dumps(data))
ams.publish(topic, ams_msg, verify=verify)
def main(args):
# set up the configuration object
config = dict()
# default values
config["bulk_size"] = 100
config["interval"] = 10
with open(args.ConfigPath, 'r') as f:
config = json.load(f)
# stream(console) handler
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(levelname)s %(message)s'))
LOGGER.addHandler(console_handler)
if args.debug:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
# sys log handler
syslog_handler = logging.handlers.SysLogHandler(config["syslog_socket"])
syslog_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d]: %(levelname)s %(message)s'))
if args.debug:
syslog_handler.setLevel(logging.DEBUG)
else:
syslog_handler.setLevel(logging.INFO)
syslog_handler.setLevel(logging.INFO)
LOGGER.addHandler(syslog_handler)
# start the process of republishing messages
ams_endpoint = "{}:{}".format(config["ams_host"], config["ams_port"])
ams = ArgoMessagingService(endpoint=ams_endpoint, token=config["ams_token"], project=config["ams_project"])
schema = avro.schema.parse(open(config["avro_schema"], "rb").read())
while True:
start_time = time.time()
try:
consumed_msgs, last_msg_id = extract_messages(ams, config["ingest_subscription"], config["bulk_size"], schema, args.verify)
if last_msg_id == "-1":
LOGGER.info("No new messages")
time.sleep(config["interval"])
continue
LOGGER.debug("Consumed messages \n {}".format(consumed_msgs))
filtered_msgs = filter_messages(consumed_msgs, config["sites"])
LOGGER.debug("Filtered messages \n {}".format(filtered_msgs))
republish_messages(filtered_msgs, ams, args.verify)
# make sure that the acknowledgment happens
try:
# try to acknowledge
ams.ack_sub(config["ingest_subscription"], [last_msg_id], verify=args.verify)
except Exception as e:
# if the acknowledgment fails
LOGGER.critical("Retrying to acknowledge message {} after error {}".format(last_msg_id, e.message))
while True:
try:
# consume again in order to refresh the TTL
ams.pull_sub(config["ingest_subscription"], config["bulk_size"], True, verify=args.verify)
# try to ack again using the msg_id from the first consumption
ams.ack_sub(config["ingest_subscription"], [last_msg_id], verify=args.verify)
break
except Exception as e:
LOGGER.critical(
"Retrying to acknowledge message {} after error {}".format(last_msg_id, e.message))
time.sleep(config["interval"])
end_time = time.time()
LOGGER.info("Consumed {} and Republished {} messages. in {}".format(
len(consumed_msgs),
len(filtered_msgs),
end_time - start_time))
except Exception as e:
LOGGER.critical("Could not republish, {}".format(e.message))
time.sleep(config["interval"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Republish messages for specific SITES")
parser.add_argument(
"-c", "--ConfigPath", type=str, help="Path for the config file", default="/etc/argo-messaging/republisher.json")
parser.add_argument(
"--verify", help="SSL verification for requests", dest="verify", action="store_true")
parser.add_argument(
"--debug", help="DEBUG mode", dest="debug", action="store_true")
sys.exit(main(parser.parse_args()))
|
python
|
from datetime import datetime
from itertools import chain
from random import randint
from django.contrib.auth.decorators import login_required
from django.contrib.formtools.wizard.views import SessionWizardView
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
from django.views.generic import ListView, DetailView, RedirectView, TemplateView
from django.views.generic.edit import UpdateView, DeleteView
from registration.backends.simple.views import RegistrationView
from django.template.loader import get_template
from django.template import Context
from sysrev.forms import *
class SRRegistrationView(RegistrationView):
def get_success_url(self, user=None):
return "/"
class ProfileView(UpdateView):
template_name = "sysrev/profile_form.html"
form_class = ProfileForm
model = User
success_url = "#"
def get_object(self, queryset=None):
return self.request.user
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProfileView, self).dispatch(*args, **kwargs)
class AboutView(TemplateView):
template_name = "sysrev/about.html"
class ReviewListView(ListView):
model = Review
def get_context_data(self, **kwargs):
context = super(ReviewListView, self).get_context_data(**kwargs)
in_progress_reviews = Review.objects.order_by('-last_modified').filter(participants=self.request.user, completed=False)
completed_reviews = Review.objects.order_by('-last_modified').filter(participants=self.request.user, completed=True)
reviews = list(chain(in_progress_reviews, completed_reviews))
for i in range(0, len(reviews)):
reviews[i] = {"review": reviews[i],
"count": reviews[i].paper_pool_counts(),
"percent": reviews[i].paper_pool_percentages()}
context["reviews"] = reviews
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ReviewListView, self).dispatch(*args, **kwargs)
class ReviewDetailView(DetailView):
model = Review
def get_context_data(self, object=None):
context = super(ReviewDetailView, self).get_context_data(object=None)
try:
if self.request.user in object.participants.all():
context["count"] = object.paper_pool_counts()
context["percent"] = object.paper_pool_percentages()
context["abstract_papers"] = Paper.objects.filter(review=object, pool="A")
context["document_papers"] = Paper.objects.filter(review=object, pool="D")
context["final_papers"] = Paper.objects.filter(review=object, pool="F")
context["rejected_papers"] = Paper.objects.filter(review=object, pool="R")
else:
raise Http404("Review not found")
except Review.DoesNotExist:
raise Http404("Review not found")
return context
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ReviewDetailView, self).dispatch(*args, **kwargs)
class ReviewDownloadView(ReviewDetailView):
model = Review
template_name = "sysrev/review_download.txt"
def render_to_response(self, context, **response_kwargs):
t = get_template(self.template_name)
resp = HttpResponse(t.render(Context(context)), content_type="text/plain")
resp["Content-Disposition"] = 'attachment; filename="' + context['review'].slug + '.txt' + '"'
return resp
class ReviewUpdateView(UpdateView):
model = Review
form_class = ReviewUpdate
def get_success_url(self):
return Review.objects.get(pk=self.kwargs['pk']).get_absolute_url()
def post(self, request, *args, **kwargs):
result = super(ReviewUpdateView, self).post(request, *args, **kwargs)
if result:
Review.objects.get(pk=kwargs['pk']).perform_query()
return result
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ReviewUpdateView, self).dispatch(*args, **kwargs)
class ReviewDeleteView(DeleteView):
model = Review
success_url = "/"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ReviewDeleteView, self).dispatch(*args, **kwargs)
class ReviewWorkView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
try:
review = Review.objects.get(pk=self.kwargs['pk'])
papers = Paper.objects.filter(review=review)
counts = review.paper_pool_counts()
if counts["abstract"] == 0 and counts["document"] == 0:
review.completed = True
review.date_completed = datetime.now()
review.save()
self.url = review.get_absolute_url()
return super(ReviewWorkView, self).get(request, args, **kwargs)
elif counts["abstract"] > 0:
papers = papers.filter(pool="A")
elif counts["document"] > 0:
papers = papers.filter(pool="D")
paper = papers.all()[randint(0, papers.count()-1)]
self.url = paper.get_absolute_url()
return super(ReviewWorkView, self).get(request, args, **kwargs)
except Review.DoesNotExist:
raise Http404("Paper not found")
except Paper.DoesNotExist:
raise Http404("Paper not found")
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ReviewWorkView, self).dispatch(*args, **kwargs)
class PaperDetailView(DetailView):
model = Paper
def get_context_data(self, object=None):
context = {}
try:
review = Review.objects.get(pk=self.kwargs['pk'])
if self.request.user in review.participants.all():
paper = Paper.objects.get(pk=self.kwargs['pk2'])
context["paper"] = paper
context["review"] = review
titles = {'A': 'Abstract screening', 'D': 'Document screening', 'F': 'Final document', 'R': 'Rejected document'}
context["title"] = titles[paper.pool]
context["to_judge"] = ('A', 'D')
context["to_embed_full"] = ('D', 'F')
context["count"] = review.paper_pool_counts()
context["percent"] = review.paper_pool_percentages()
else:
raise Http404("Paper not found")
except Review.DoesNotExist:
raise Http404("Paper not found")
except Paper.DoesNotExist:
raise Http404("Paper not found")
return context
@method_decorator(login_required)
@cache_control(no_cache=True, must_revalidate=True, no_store=True)
def dispatch(self, *args, **kwargs):
return super(PaperDetailView, self).dispatch(*args, **kwargs)
class PaperChoiceView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
try:
review = Review.objects.get(pk=self.kwargs['pk'])
paper = Paper.objects.get(pk=self.kwargs['pk2'], review=review)
choice = self.kwargs['choice']
if choice == "document":
paper.pool = "D"
elif choice == "final":
paper.pool = "F"
elif choice == "rejected":
paper.pool = "R"
else:
raise Http404("Invalid choice")
paper.save()
self.url = review.get_absolute_url() + "/work/"
return super(PaperChoiceView, self).get(request, args, **kwargs)
except Review.DoesNotExist:
raise Http404("Review not found")
except Paper.DoesNotExist:
raise Http404("Paper not found")
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PaperChoiceView, self).dispatch(*args, **kwargs)
class ReviewCreateWizard(SessionWizardView):
form_list = [ReviewCreateStep1, ReviewCreateStep2]
template_name = "sysrev/review_create_wizard.html"
def done(self, form_list, **kwargs):
s1 = form_list[0].cleaned_data
s2 = form_list[1].cleaned_data
review = Review()
review.title = s1["title"]
review.description = s1["description"]
review.query = s2["query"]
review.save()
review.participants.add(self.request.user)
invited = filter(lambda i: i, map(lambda l: str.strip(str(l)), s1["invited"].splitlines()))
review.invite(invited)
review.perform_query()
review.save()
return HttpResponseRedirect(review.get_absolute_url())
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ReviewCreateWizard, self).dispatch(*args, **kwargs)
|
python
|
'''
visualize VIL-100 datasets in points form or curves form.
datasets name:vil-100
paper link: https://arxiv.org/abs/2108.08482
reference: https://github.com/yujun0-0/MMA-Net/tree/main/dataset
datasets structure:
VIL-100
|----Annotations
|----data
|----JPEGImages
|----Json
|----train.json
*********** A sample of one json-file ***********
{
"camera_id": 8272,
"info": {
"height": 1080 ,
"width": 1920,
"date": "2020-11-24",
"image_path": "0_Road014_Trim005_frames/XXXXXX.jpg"
},
"annotations": {
"lane": [{
"id": 1,
"lane_id": 1,
"attribute": 1,
"occlusion": 0,
"points": [[412.6, 720],[423.7, 709.9], ...]
}, {...}, {...}, {...}]
}
}
'''
import os
import cv2
import numpy as np
import json
color = [(218,112,214), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (255, 255, 255),
(100, 255, 0), (100, 0, 255), (255, 100, 0), (0, 100, 255), (255, 0, 100), (0, 255, 100)]
def get_points(mask, label):
# read label
label_content = open(label)
label_info = json.load(label_content)['annotations']
# label_info = eval(label_info)
for index, line in enumerate(label_info['lane']):
# print(line)
points_x = []
points_y = []
# get points
for point in line['points']:
points_x.append(int(float(point[0])))
points_y.append(int(float(point[1])))
ptStart = 0
points = list(zip(points_x, points_y))
# sort along y
sorted(points , key=lambda k: (k[1], k[0]))
# print(points)
while ptStart < len(points_x):
image = cv2.circle(mask, points[ptStart], 5, color[index], -1)
ptStart += 1
return image
def get_curves(mask, label):
# read label
label_content = open(label)
label_info = json.load(label_content)['annotations']
# label_info = eval(label_info)
for index, line in enumerate(label_info['lane']):
# print(line)
points_x = []
points_y = []
# get points
for point in line['points']:
points_x.append(int(float(point[0])))
points_y.append(int(float(point[1])))
ptStart = 0
ptEnd = 1
points = list(zip(points_x, points_y))
# sort along y
sorted(points , key=lambda k: (k[1], k[0]))
# print(points)
while ptEnd < len(points_x):
mask = cv2.line(mask, points[ptStart], points[ptEnd], color[index], 4, lineType = 8)
ptStart += 1
ptEnd += 1
return mask
if __name__ == '__main__':
# choose datasets category from:'train','test'
datasets_category = 'test'
# choose vis_mode between 'points' and 'curves'
vis_mod = 'curves'
# datasets dir
dataset_dir = '../dataset/VIL-100'
# save label dir(mask)
save_mask_dir = '{}/{}_{}'.format(dataset_dir, "vis_datasets", vis_mod)
if not os.path.exists(save_mask_dir):
os.makedirs(save_mask_dir)
# read file from txt
txt_file = dataset_dir + '/data/{}.txt'.format(datasets_category)
file_list = open(txt_file)
for file in file_list:
file = file.strip()
full_img_path = dataset_dir + file
if not os.path.exists(full_img_path):
continue
print("Now dealing with:", file)
file_name = os.path.splitext(file.strip().split('/')[-1])[0] # image_name xxx
json_file = dataset_dir + file.replace('JPEGImages', 'Json') + '.json'
img = cv2.imread(full_img_path)
# datasets have different height and width.
# get img shape,h and w.
h = img.shape[0]
w = img.shape[1]
# parse label
# visulize points
if vis_mod == 'points':
label_mask = get_points(img, json_file)
else:
# visulize curves
label_mask = get_curves(img, json_file)
cv2.imencode('.png',label_mask)[1].tofile('{}/{}.png'.format(save_mask_dir,file_name))
print("finished~~")
|
python
|
from __future__ import print_function
import os
import re
import sqlite3
import sys
import traceback
import simpy
from vcd import VCDWriter
from . import probe
from .util import partial_format
from .timescale import parse_time, scale_time
from .queue import Queue
from .pool import Pool
class Tracer(object):
name = ''
def __init__(self, env):
self.env = env
cfg_scope = 'sim.' + self.name + '.'
self.enabled = env.config.setdefault(cfg_scope + 'enable', False)
self.persist = env.config.setdefault(cfg_scope + 'persist', True)
if self.enabled:
self.open()
include_pat = env.config.setdefault(cfg_scope + 'include_pat',
['.*'])
exclude_pat = env.config.setdefault(cfg_scope + 'exclude_pat', [])
self._include_re = [re.compile(pat) for pat in include_pat]
self._exclude_re = [re.compile(pat) for pat in exclude_pat]
def is_scope_enabled(self, scope):
return (self.enabled and
any(r.match(scope) for r in self._include_re) and
not any(r.match(scope) for r in self._exclude_re))
def open(self):
raise NotImplementedError() # pragma: no cover
def close(self):
if self.enabled:
self._close()
def _close(self):
raise NotImplementedError() # pragma: no cover
def remove_files(self):
raise NotImplementedError()
def flush(self):
pass
def activate_probe(self, scope, target, **hints):
raise NotImplementedError() # pragma: no cover
def activate_trace(self, scope, **hints):
raise NotImplementedError() # pragma: no cover
def trace_exception(self):
pass
class LogTracer(Tracer):
name = 'log'
default_format = '{level:7} {ts:.3f} {ts_unit}: {scope}:'
levels = {
'ERROR': 1,
'WARNING': 2,
'INFO': 3,
'PROBE': 4,
'DEBUG': 5,
}
def open(self):
self.filename = self.env.config.setdefault('sim.log.file', 'sim.log')
buffering = self.env.config.setdefault('sim.log.buffering', -1)
level = self.env.config.setdefault('sim.log.level', 'INFO')
self.max_level = self.levels[level]
self.format_str = self.env.config.setdefault('sim.log.format',
self.default_format)
ts_n, ts_unit = self.env.timescale
if ts_n == 1:
self.ts_unit = ts_unit
else:
self.ts_unit = '({}{})'.format(ts_n, ts_unit)
if self.filename:
self.file = open(self.filename, 'w', buffering)
self.should_close = True
else:
self.file = sys.stderr
self.should_close = False
def flush(self):
self.file.flush()
def _close(self):
if self.should_close:
self.file.close()
def remove_files(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
def is_scope_enabled(self, scope, level=None):
return ((level is None or self.levels[level] <= self.max_level) and
super(LogTracer, self).is_scope_enabled(scope))
def activate_probe(self, scope, target, **hints):
level = hints.get('level', 'PROBE')
if not self.is_scope_enabled(scope, level):
return None
format_str = partial_format(self.format_str,
level=level,
ts_unit=self.ts_unit,
scope=scope)
def probe_callback(value):
print(format_str.format(ts=self.env.now), value, file=self.file)
return probe_callback
def activate_trace(self, scope, **hints):
level = hints.get('level', 'DEBUG')
if not self.is_scope_enabled(scope, level):
return None
format_str = partial_format(self.format_str,
level=level,
ts_unit=self.ts_unit,
scope=scope)
def trace_callback(*value):
print(format_str.format(ts=self.env.now), *value, file=self.file)
return trace_callback
def trace_exception(self):
tb_lines = traceback.format_exception(*sys.exc_info())
print(self.format_str.format(level='ERROR',
ts=self.env.now,
ts_unit=self.ts_unit,
scope='Exception'),
tb_lines[-1], '\n',
*tb_lines,
file=self.file)
class VCDTracer(Tracer):
name = 'vcd'
def open(self):
dump_filename = self.env.config.setdefault('sim.vcd.dump_file',
'sim.vcd')
if 'sim.vcd.timescale' in self.env.config:
vcd_ts_str = self.env.config.setdefault(
'sim.vcd.timescale',
self.env.config['sim.timescale'])
vcd_timescale = parse_time(vcd_ts_str)
else:
vcd_timescale = self.env.timescale
self.scale_factor = scale_time(self.env.timescale, vcd_timescale)
check_values = self.env.config.setdefault('sim.vcd.check_values', True)
self.dump_file = open(dump_filename, 'w')
self.vcd = VCDWriter(self.dump_file,
timescale=vcd_timescale,
check_values=check_values)
self.save_filename = self.env.config.setdefault('sim.gtkw.file',
'sim.gtkw')
if self.env.config.setdefault('sim.gtkw.live'):
from vcd.gtkw import spawn_gtkwave_interactive
quiet = self.env.config.setdefault('sim.gtkw.quiet', True)
spawn_gtkwave_interactive(dump_filename, self.save_filename,
quiet=quiet)
start_time = self.env.config.setdefault('sim.vcd.start_time', '')
stop_time = self.env.config.setdefault('sim.vcd.stop_time', '')
t_start = (scale_time(parse_time(start_time), self.env.timescale)
if start_time else None)
t_stop = (scale_time(parse_time(stop_time), self.env.timescale)
if stop_time else None)
self.env.process(self._start_stop(t_start, t_stop))
def vcd_now(self):
return self.env.now * self.scale_factor
def flush(self):
self.dump_file.flush()
def _close(self):
self.vcd.close(self.vcd_now())
self.dump_file.close()
def remove_files(self):
if os.path.isfile(self.dump_file.name):
os.remove(self.dump_file.name)
if os.path.isfile(self.save_filename):
os.remove(self.save_filename)
def activate_probe(self, scope, target, **hints):
assert self.enabled
var_type = hints.get('var_type')
if var_type is None:
if isinstance(target, (simpy.Container, Pool)):
if isinstance(target.level, float):
var_type = 'real'
else:
var_type = 'integer'
elif isinstance(target, (simpy.Resource, simpy.Store, Queue)):
var_type = 'integer'
else:
raise ValueError(
'Could not infer VCD var_type for {}'.format(scope))
kwargs = {k: hints[k]
for k in ['size', 'init', 'ident']
if k in hints}
if 'init' not in kwargs:
if isinstance(target, (simpy.Container, Pool)):
kwargs['init'] = target.level
elif isinstance(target, simpy.Resource):
kwargs['init'] = len(target.users) if target.users else 'z'
elif isinstance(target, (simpy.Store, Queue)):
kwargs['init'] = len(target.items)
parent_scope, name = scope.rsplit('.', 1)
var = self.vcd.register_var(parent_scope, name, var_type, **kwargs)
def probe_callback(value):
self.vcd.change(var, self.vcd_now(), value)
return probe_callback
def activate_trace(self, scope, **hints):
assert self.enabled
var_type = hints['var_type']
kwargs = {k: hints[k]
for k in ['size', 'init', 'ident']
if k in hints}
parent_scope, name = scope.rsplit('.', 1)
var = self.vcd.register_var(parent_scope, name, var_type, **kwargs)
if isinstance(var.size, tuple):
def trace_callback(*value):
self.vcd.change(var, self.vcd_now(), value)
else:
def trace_callback(value):
self.vcd.change(var, self.vcd_now(), value)
return trace_callback
def _start_stop(self, t_start, t_stop):
# Wait for simulation to start to ensure all variable registration is
# complete before doing and dump_on()/dump_off() calls.
yield self.env.timeout(0)
if t_start is None and t_stop is None:
# |vvvvvvvvvvvvvv|
pass
elif t_start is None:
# |vvvvvv--------|
yield self.env.timeout(t_stop)
self.vcd.dump_off(self.vcd_now())
elif t_stop is None:
# |--------vvvvvv|
self.vcd.dump_off(self.vcd_now())
yield self.env.timeout(t_start)
self.vcd.dump_on(self.vcd_now())
elif t_start <= t_stop:
# |---vvvvvv-----|
self.vcd.dump_off(self.vcd_now())
yield self.env.timeout(t_start)
self.vcd.dump_on(self.vcd_now())
yield self.env.timeout(t_stop - t_start)
self.vcd.dump_off(self.vcd_now())
else:
# |vvv-------vvvv|
yield self.env.timeout(t_stop)
self.vcd.dump_off(self.vcd_now())
yield self.env.timeout(t_start - t_stop)
self.vcd.dump_on(self.vcd_now())
class SQLiteTracer(Tracer):
name = 'db'
def open(self):
self.filename = self.env.config.setdefault('sim.db.file', 'sim.sqlite')
self.trace_table = self.env.config.setdefault('sim.db.trace_table',
'trace')
self.remove_files()
self.db = sqlite3.connect(self.filename)
self._is_trace_table_created = False
def _create_trace_table(self):
if not self._is_trace_table_created:
self.db.execute('CREATE TABLE {} ('
'timestamp FLOAT, '
'scope TEXT, '
'value)'.format(self.trace_table))
self._is_trace_table_created = True
def flush(self):
self.db.commit()
def _close(self):
self.db.commit()
self.db.close()
def remove_files(self):
if self.filename != ':memory:':
for filename in [self.filename, self.filename + '-journal']:
if os.path.exists(filename):
os.remove(filename)
def activate_probe(self, scope, target, **hints):
return self.activate_trace(scope, **hints)
def activate_trace(self, scope, **hints):
assert self.enabled
self._create_trace_table()
insert_sql = (
'INSERT INTO {} (timestamp, scope, value) VALUES (?, ?, ?)'
.format(self.trace_table))
def trace_callback(value):
self.db.execute(insert_sql, (self.env.now, scope, value))
return trace_callback
class TraceManager(object):
def __init__(self, env):
self.tracers = []
try:
self.log_tracer = LogTracer(env)
self.tracers.append(self.log_tracer)
self.vcd_tracer = VCDTracer(env)
self.tracers.append(self.vcd_tracer)
self.sqlite_tracer = SQLiteTracer(env)
self.tracers.append(self.sqlite_tracer)
except BaseException:
self.close()
raise
def flush(self):
"""Flush all managed tracers instances.
The effect of flushing is tracer-dependent.
"""
for tracer in self.tracers:
if tracer.enabled:
tracer.flush()
def close(self):
for tracer in self.tracers:
tracer.close()
if tracer.enabled and not tracer.persist:
tracer.remove_files()
def auto_probe(self, scope, target, **hints):
callbacks = []
for tracer in self.tracers:
if tracer.name in hints and tracer.is_scope_enabled(scope):
callback = tracer.activate_probe(scope, target,
**hints[tracer.name])
if callback:
callbacks.append(callback)
if callbacks:
probe.attach(scope, target, callbacks, **hints)
def get_trace_function(self, scope, **hints):
callbacks = []
for tracer in self.tracers:
if tracer.name in hints and tracer.is_scope_enabled(scope):
callback = tracer.activate_trace(scope, **hints[tracer.name])
if callback:
callbacks.append(callback)
def trace_function(*value):
for callback in callbacks:
callback(*value)
return trace_function
def trace_exception(self):
for tracer in self.tracers:
if tracer.enabled:
tracer.trace_exception()
|
python
|
from pyhafas.profile import ProfileInterface
from pyhafas.profile.interfaces.helper.parse_lid import ParseLidHelperInterface
from pyhafas.types.fptf import Station
class BaseParseLidHelper(ParseLidHelperInterface):
def parse_lid(self: ProfileInterface, lid: str) -> dict:
"""
Converts the LID given by HaFAS
Splits the LID (e.g. A=1@O=Siegburg/Bonn) in multiple elements (e.g. A=1 and O=Siegburg/Bonn).
These are converted into a dict where the part before the equal sign is the key and the part after the value.
:param lid: Location identifier (given by HaFAS)
:return: Dict of the elements of the dict
"""
parsedLid = {}
for lidElementGroup in lid.split("@"):
if lidElementGroup:
parsedLid[lidElementGroup.split(
"=")[0]] = lidElementGroup.split("=")[1]
return parsedLid
def parse_lid_to_station(
self: ProfileInterface,
lid: str,
name: str = "",
latitude: float = 0,
longitude: float = 0) -> Station:
"""
Parses the LID given by HaFAS to a station object
:param lid: Location identifier (given by HaFAS)
:param name: Station name (optional, if not given, LID is used)
:param latitude: Latitude of the station (optional, if not given, LID is used)
:param longitude: Longitude of the station (optional, if not given, LID is used)
:return: Parsed LID as station object
"""
parsedLid = self.parse_lid(lid)
if latitude == 0 and longitude == 0 and parsedLid['X'] and parsedLid['Y']:
latitude = float(float(parsedLid['Y']) / 1000000)
longitude = float(float(parsedLid['X']) / 1000000)
return Station(
id=parsedLid['L'],
name=name or parsedLid['O'],
latitude=latitude,
longitude=longitude
)
|
python
|
from __future__ import print_function, absolute_import
import torch
from torch.optim.lr_scheduler import _LRScheduler
from bisect import bisect_right
AVAI_SCH = ['single_step', 'multi_step', 'cosine', 'multi_step_warmup']
def build_lr_scheduler(optimizer,
lr_scheduler='single_step',
stepsize=1,
gamma=0.1,
lr_scales=None,
max_epoch=1,
frozen=20,
warmup=10,
warmup_factor_base=0.1,
frozen_factor_base=0.1):
"""A function wrapper for building a learning rate scheduler.
Args:
optimizer (Optimizer): an Optimizer.
lr_scheduler (str, optional): learning rate scheduler method. Default is single_step.
stepsize (int or list, optional): step size to decay learning rate. When ``lr_scheduler``
is "single_step", ``stepsize`` should be an integer. When ``lr_scheduler`` is
"multi_step", ``stepsize`` is a list. Default is 1.
gamma (float, optional): decay rate. Default is 0.1.
max_epoch (int, optional): maximum epoch (for cosine annealing). Default is 1.
Examples::
>>> # Decay learning rate by every 20 epochs.
>>> scheduler = torchreid.optim.build_lr_scheduler(
>>> optimizer, lr_scheduler='single_step', stepsize=20
>>> )
>>> # Decay learning rate at 30, 50 and 55 epochs.
>>> scheduler = torchreid.optim.build_lr_scheduler(
>>> optimizer, lr_scheduler='multi_step', stepsize=[30, 50, 55]
>>> )
"""
if lr_scheduler not in AVAI_SCH:
raise ValueError('Unsupported scheduler: {}. Must be one of {}'.format(lr_scheduler, AVAI_SCH))
if lr_scheduler == 'single_step':
if isinstance(stepsize, list):
stepsize = stepsize[-1]
if not isinstance(stepsize, int):
raise TypeError(
'For single_step lr_scheduler, stepsize must '
'be an integer, but got {}'.format(type(stepsize))
)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=stepsize, gamma=gamma
)
elif lr_scheduler == 'multi_step':
if not isinstance(stepsize, list):
raise TypeError(
'For multi_step lr_scheduler, stepsize must '
'be a list, but got {}'.format(type(stepsize))
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=stepsize, gamma=gamma
)
elif lr_scheduler == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(max_epoch)
)
elif lr_scheduler == 'multi_step_warmup':
if not isinstance(stepsize, list):
raise TypeError(
'For multi_step lr_scheduler, stepsize must '
'be a list, but got {}'.format(type(stepsize))
)
scheduler = MultiStepLRWithWarmUp(
optimizer, milestones=stepsize, frozen_iters=frozen, gamma=gamma, lr_scales=lr_scales,
warmup_factor_base=warmup_factor_base, frozen_factor_base=frozen_factor_base, warmup_iters=warmup
)
else:
raise ValueError('Unknown scheduler: {}'.format(lr_scheduler))
return scheduler
class MultiStepLRWithWarmUp(_LRScheduler):
def __init__(self,
optimizer,
milestones,
warmup_iters,
frozen_iters,
lr_scales=None,
warmup_method='linear',
warmup_factor_base=0.1,
frozen_factor_base=1.0,
gamma=0.1,
last_epoch=-1):
if warmup_method not in {'constant', 'linear'}:
raise KeyError('Unknown warm up method: {}'.format(warmup_method))
self.milestones = sorted(milestones)
self.gamma = gamma
self.lr_scales = lr_scales
self.warmup_iters = warmup_iters
self.frozen_iters = frozen_iters
self.warmup_method = warmup_method
self.warmup_factor_base = warmup_factor_base
self.frozen_factor_base = frozen_factor_base
self.uses_lr_scales = self.lr_scales is not None and len(self.lr_scales) > 0
if self.uses_lr_scales:
assert len(self.lr_scales) == len(self.milestones) + 1
# Base class calls method `step` which increases `last_epoch` by 1 and then calls
# method `get_lr` with this value. If `last_epoch` is not equal to -1, we drop
# the first step, so to avoid this dropping do small fix by subtracting 1
if last_epoch > -1:
last_epoch = last_epoch - 1
elif last_epoch < -1:
raise ValueError('Learning rate scheduler got incorrect parameter last_epoch = {}'.format(last_epoch))
super(MultiStepLRWithWarmUp, self).__init__(optimizer, last_epoch)
def get_lr(self):
# During warm up change learning rate on every step according to warmup_factor
if self.last_epoch < self.frozen_iters:
return [self.frozen_factor_base * base_lr for base_lr in self.base_lrs]
if self.last_epoch < self.frozen_iters + self.warmup_iters:
if self.warmup_method == 'constant':
warmup_factor = self.warmup_factor_base
elif self.warmup_method == 'linear':
alpha = (self.last_epoch - self.frozen_iters) / self.warmup_iters
warmup_factor = self.warmup_factor_base * (1 - alpha) + alpha
return [base_lr * warmup_factor for base_lr in self.base_lrs]
# On the last step of warm up set learning rate equal to base LR
elif self.last_epoch == self.frozen_iters + self.warmup_iters:
return [base_lr for base_lr in self.base_lrs]
# After warm up increase LR according to defined in `milestones` values of steps
else:
if self.uses_lr_scales:
lr_scale = self.lr_scales[bisect_right(self.milestones, self.last_epoch)]
else:
lr_scale = self.gamma ** bisect_right(self.milestones, self.last_epoch)
return [base_lr * lr_scale for base_lr in self.base_lrs]
def __repr__(self):
format_string = self.__class__.__name__ + \
'[warmup_method = {}, warmup_factor_base = {}, warmup_iters = {},' \
' milestones = {}, gamma = {}]'.format(self.warmup_method, self.warmup_factor_base,
self.warmup_iters, str(list(self.milestones)),
self.gamma)
return format_string
|
python
|
"""
File: rocket.py
Name:Claire Lin
-----------------------
This program should implement a console program
that draws ASCII art - a rocket.
The size of rocket is determined by a constant
defined as SIZE at top of the file.
Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
# This constant determines rocket size.
SIZE = 3
def main():
"""
:return: str, the rocket will be build in any size.
"""
head()
belt()
upper()
lower()
belt()
head()
def head():
for i in range(SIZE):
print(' ', end='')
for j in range(-i+(SIZE-1)):
print(' ', end='')
for k in range(i+1):
print('/', end='')
for l in range(i+1):
print('\\', end='')
print("")
def belt():
print('+', end='')
for k in range(SIZE*2):
print('=', end='')
print('+')
def upper():
for m in range(SIZE):
print('|', end='')
for n in range(-m+(SIZE-1)):
print('.', end='')
for u in range(m+1):
print('/\\', end='')
for s in range(-m+(SIZE - 1)):
print('.', end='')
print('|')
def lower():
for o in range(SIZE):
print('|', end='')
for p in range(o):
print('.', end='')
for r in range(-o+SIZE):
print('\\/', end='')
for q in range(o):
print('.', end='')
print('|')
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
|
python
|
from enum import Enum
class RecipeStyle(Enum):
"""
Class for allrecipes.com style labels
"""
diabetic = 'diabetic'
dairy_free = 'dairy_free'
sugar_free = 'sugar-free'
gluten_free = 'gluten_free'
low_cholesterol = 'low_cholesterol'
mediterranean = 'mediterranean'
chinese = 'chinese'
indian = 'indian'
japanese = 'japanese'
korean = 'korean'
thai = 'thai'
european = 'european'
italian = 'italian'
american = 'american'
mexican = 'mexican'
eastern = 'eastern'
|
python
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ImplementationGuide_PageSchema:
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide into a
logical whole and to publish a computable definition of all the parts.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A set of rules of how FHIR is used to solve a particular problem. This
resource is used to gather all the parts of an implementation guide into a
logical whole and to publish a computable definition of all the parts.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
source: The source address for the page.
title: A short title used to represent this page in navigational structures such as
table of contents, bread crumbs, etc.
kind: The kind of page that this is. Some pages are autogenerated (list, example),
and other kinds are of interest so that tools can navigate the user to the
page of interest.
type: For constructed pages, what kind of resources to include in the list.
package: For constructed pages, a list of packages to include in the page (or else
empty for everything).
format: The format of the page.
page: Nested Pages/Sections under this page.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
if (
max_recursion_limit
and nesting_list.count("ImplementationGuide_Page") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["ImplementationGuide_Page"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The source address for the page.
StructField("source", StringType(), True),
# A short title used to represent this page in navigational structures such as
# table of contents, bread crumbs, etc.
StructField("title", StringType(), True),
# The kind of page that this is. Some pages are autogenerated (list, example),
# and other kinds are of interest so that tools can navigate the user to the
# page of interest.
StructField("kind", StringType(), True),
# For constructed pages, what kind of resources to include in the list.
StructField("type", ArrayType(StringType()), True),
# For constructed pages, a list of packages to include in the page (or else
# empty for everything).
StructField("package", ArrayType(StringType()), True),
# The format of the page.
StructField("format", StringType(), True),
# Nested Pages/Sections under this page.
StructField(
"page",
ArrayType(
ImplementationGuide_PageSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-15 01:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=100)),
('article_title', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(max_length=250)),
('author', models.CharField(max_length=250)),
('author_avatar', models.CharField(max_length=1000)),
],
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.Category'),
),
]
|
python
|
from rallf.sdk.logger import Logger
from rallf.sdk.network import Network
from rallf.sdk.listener import Listener
class Task:
def __init__(self, manifest, robot, input, output):
self.manifest = manifest
self.robot = robot
self.finished = False
self.status = "stopped"
self.network = Network(input, output, self.manifest['fqtn'])
self.logger = Logger(input, output, self.manifest['fqtn'])
self.listener = Listener(input, output)
self.home = "%s/data/%s" % (robot.home, manifest['fqtn'])
def warmup(self):
self.status = "ready"
def waitloop(self):
while not self.finished:
self.listener.listen(self)
def main(self, input):
pass
def cooldown(self):
self.status = "finished"
def finish(self):
self.status = "terminating"
self.finished = True
|
python
|
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$',
views.IndexView.as_view(),
name='index'),
re_path(r'^(?P<pk>[-\w]+)/$',
views.PageDetailView.as_view(),
name='page_detail'),
]
|
python
|
import unittest
import time
from liquidtap.client import Client
class TestClient(unittest.TestCase):
def setUp(self) -> None:
self.client = Client()
self.client.pusher.connection.bind('pusher:connection_established', self._on_connect)
def _on_connect(self, data: str):
print('on_connect:', data)
self.client.pusher.subscribe(f"price_ladders_cash_btcjpy_buy").bind(
'updated', self._callback)
self.client.pusher.subscribe(f"price_ladders_cash_btcjpy_sell").bind(
'updated', self._callback)
@staticmethod
def _callback(data: str):
print(data)
def test_connect(self):
"""
Example:
on_connect: {"activity_timeout":120,"socket_id":"9557290249.7129860806"}
[["5251849.00000","0.02901661"],["5251843.00000","0.00700000"],["5251609.00000","0.00100000"],["5251603.00000","0.00967056"],["5251592.00000","0.07920000"],["5251591.00000","0.59311887"],["5251583.00000","0.00100000"],["5251544.00000","0.00070000"],["5251510.00000","0.00110000"],["5251175.00000","0.00050000"],["5250609.00000","0.18000000"],["5250591.00000","0.00010000"],["5250411.00000","0.16000000"],["5250383.00000","0.00762958"],["5250312.00000","0.00170000"],["5250306.00000","0.01907801"],["5250247.00000","0.00100000"],["5250230.00000","0.00090000"],["5250000.00000","0.00010000"],["5249309.00000","0.12000000"],["5248522.00000","0.12000000"],["5248373.00000","0.01570000"],["5248320.00000","0.02000000"],["5248154.00000","0.00094347"],["5247735.00000","0.12000000"],["5247688.00000","0.08000000"],["5247637.00000","0.00090000"],["5247001.00000","0.49706191"],["5247000.00000","0.01420000"],["5246825.00000","0.02500000"],["5246565.00000","0.12000000"],["5246500.00000","0.00010000"],["5245646.00000","0.12170000"],["5245426.00000","0.00140000"],["5245071.00000","0.00070000"],["5244776.00000","0.01000000"],["5244476.00000","0.13000000"],["5243056.00000","0.00270000"],["5243000.00000","0.00010000"],["5242752.00000","0.00150000"]]
[["5251849.00000","0.02901661"],["5251843.00000","0.00700000"],["5251609.00000","0.00100000"],["5251603.00000","0.00967056"],["5251592.00000","0.07920000"],["5251591.00000","0.59311887"],["5251583.00000","0.00100000"],["5251544.00000","0.00070000"],["5251510.00000","0.00110000"],["5251175.00000","0.00050000"],["5250609.00000","0.18000000"],["5250591.00000","0.00010000"],["5250411.00000","0.16000000"],["5250383.00000","0.00762958"],["5250312.00000","0.00170000"],["5250306.00000","0.01907801"],["5250247.00000","0.00100000"],["5250230.00000","0.00090000"],["5250000.00000","0.00010000"],["5249309.00000","0.12000000"],["5248522.00000","0.12000000"],["5248373.00000","0.01570000"],["5248320.00000","0.02000000"],["5248154.00000","0.00094347"],["5247735.00000","0.12000000"],["5247688.00000","0.08000000"],["5247637.00000","0.00090000"],["5247001.00000","0.49706191"],["5247000.00000","0.01420000"],["5246825.00000","0.02500000"],["5246565.00000","0.12000000"],["5246500.00000","0.00010000"],["5245646.00000","0.12170000"],["5245426.00000","0.00140000"],["5245071.00000","0.00070000"],["5244776.00000","0.01000000"],["5244476.00000","0.13000000"],["5243056.00000","0.00270000"],["5243000.00000","0.00010000"],["5242757.00000","0.00483790"]]
[["5251849.00000","0.02901661"],["5251843.00000","0.00700000"],["5251609.00000","0.00100000"],["5251603.00000","0.00967056"],["5251592.00000","0.07920000"],["5251591.00000","0.59311887"],["5251583.00000","0.00100000"],["5251544.00000","0.00070000"],["5251510.00000","0.00110000"],["5251175.00000","0.00050000"],["5250859.00000","0.00100000"],["5250609.00000","0.18000000"],["5250591.00000","0.00010000"],["5250411.00000","0.16000000"],["5250383.00000","0.00762958"],["5250312.00000","0.00170000"],["5250306.00000","0.01907801"],["5250230.00000","0.00090000"],["5250000.00000","0.00010000"],["5249309.00000","0.12000000"],["5248522.00000","0.12000000"],["5248373.00000","0.01570000"],["5248320.00000","0.02000000"],["5248154.00000","0.00094347"],["5247735.00000","0.12000000"],["5247688.00000","0.08000000"],["5247637.00000","0.00090000"],["5247001.00000","0.49706191"],["5247000.00000","0.01420000"],["5246825.00000","0.02500000"],["5246565.00000","0.12000000"],["5246500.00000","0.00010000"],["5245646.00000","0.12170000"],["5245426.00000","0.00140000"],["5245071.00000","0.00070000"],["5244776.00000","0.01000000"],["5244476.00000","0.13000000"],["5243056.00000","0.00270000"],["5243000.00000","0.00010000"],["5242757.00000","0.00483790"]]
"""
self.client.pusher.connect()
time.sleep(3)
if __name__ == '__main__':
unittest.main()
|
python
|
class NopBackpressureManager:
def __init__(self):
pass
def register_pressure(self):
pass
def unregister_pressure(self):
pass
def reached(self) -> bool:
return False
class BackpressureManager:
def __init__(self, max):
self.max = max
self.pressure = 0
def register_pressure(self):
self.pressure += 1
def unregister_pressure(self):
self.pressure -= 1
def reached(self) -> bool:
return self.pressure >= self.max
|
python
|
import re
import croniter
from datetime import timedelta, datetime
from functools import partial
from airflow import DAG
from airflow.sensors.external_task import ExternalTaskSensor
from dagger import conf
from dagger.alerts.alert import airflow_task_fail_alerts
from dagger.dag_creator.airflow.operator_factory import OperatorFactory
from dagger.dag_creator.airflow.utils.macros import user_defined_macros
from dagger.dag_creator.graph_traverser_base import GraphTraverserBase
from dagger.graph.task_graph import Graph
# noinspection PyStatementEffect
class DagCreator(GraphTraverserBase):
def __init__(self, task_graph: Graph, with_data_nodes: bool = conf.WITH_DATA_NODES):
super().__init__(task_graph=task_graph, with_data_nodes=with_data_nodes)
self._operator_factory = OperatorFactory()
@staticmethod
def _get_control_flow_task_id(pipe_id):
return "control_flow:{}".format(pipe_id)
@staticmethod
def _get_default_args():
return {
"depends_on_past": True,
"retries": 0,
"retry_delay": timedelta(minutes=5),
}
@staticmethod
def _get_execution_date_fn(from_dag_schedule: str, to_dag_schedule: str):
def execution_date_fn(execution_date, **kwargs):
to_dag_cron = croniter.croniter(to_dag_schedule, execution_date)
to_dag_next_schedule = to_dag_cron.get_next(datetime)
from_dag_cron = croniter.croniter(from_dag_schedule, to_dag_next_schedule)
from_dag_cron.get_next(datetime)
# skipping one schedule
from_dag_cron.get_prev(datetime)
from_dag_target_schedule = from_dag_cron.get_prev(datetime)
return from_dag_target_schedule
return execution_date_fn
def _get_external_task_sensor(self, from_task_id: str, to_task_id: str) -> ExternalTaskSensor:
from_pipeline_name = self._task_graph.get_node(from_task_id).obj.pipeline_name
from_task_name = self._task_graph.get_node(from_task_id).obj.name
from_pipeline_schedule = self._task_graph.get_node(from_task_id).obj.pipeline.schedule
to_pipeline_schedule = self._task_graph.get_node(to_task_id).obj.pipeline.schedule
return ExternalTaskSensor(
task_id=f"{from_pipeline_name}-{from_task_name}-sensor",
external_dag_id=from_pipeline_name,
external_task_id=from_task_name,
execution_date_fn=self._get_execution_date_fn(from_pipeline_schedule, to_pipeline_schedule),
mode=conf.EXTERNAL_SENSOR_MODE,
poke_interval=conf.EXTERNAL_SENSOR_POKE_INTERVAL,
timeout=conf.EXTERNAL_SENSOR_TIMEOUT
)
def _create_control_flow_task(self, pipe_id, dag):
control_flow_task_id = self._get_control_flow_task_id(pipe_id)
self._tasks[
control_flow_task_id
] = self._operator_factory.create_control_flow_operator(conf.IS_DUMMY_OPERATOR_SHORT_CIRCUIT, dag)
def _create_dag(self, pipe_id, node):
pipeline = node.obj
default_args = DagCreator._get_default_args()
default_args.update(pipeline.default_args)
default_args["owner"] = pipeline.owner.split("@")[0]
if len(pipeline.alerts) > 0:
default_args["on_failure_callback"] = partial(
airflow_task_fail_alerts, pipeline.alerts
)
dag = DAG(
pipeline.name,
description=pipeline.description,
default_args=default_args,
start_date=pipeline.start_date,
schedule_interval=pipeline.schedule,
user_defined_macros=user_defined_macros,
**pipeline.parameters,
)
self._create_control_flow_task(pipe_id, dag)
return dag
def _create_job_task(self, node):
pipeline_id = node.obj.pipeline_name
return self._operator_factory.create_operator(node.obj, self._dags[pipeline_id])
def _create_data_task(self, pipe_id, node):
if pipe_id not in self._data_tasks:
self._data_tasks[pipe_id] = {}
dataset_id = node.obj.airflow_name
if dataset_id not in self._data_tasks[pipe_id]:
self._data_tasks[pipe_id][
dataset_id
] = self._operator_factory.create_dataset_operator(
re.sub("[^0-9a-zA-Z-_]+", "_", dataset_id), self._dags[pipe_id]
)
def _create_edge_without_data(self, from_task_id, to_task_ids, node):
from_pipe = (
self._task_graph.get_node(from_task_id).obj.pipeline_name
if from_task_id
else None
)
for to_task_id in to_task_ids:
edge_properties = self._task_graph.get_edge(node.obj.alias(), to_task_id)
to_pipe = self._task_graph.get_node(to_task_id).obj.pipeline_name
if from_pipe and from_pipe == to_pipe:
self._tasks[from_task_id] >> self._tasks[to_task_id]
elif from_pipe and from_pipe != to_pipe and edge_properties.follow_external_dependency:
from_schedule = self._task_graph.get_node(from_task_id).obj.pipeline.schedule
to_schedule = self._task_graph.get_node(to_task_id).obj.pipeline.schedule
if not from_schedule.startswith('@') and not to_schedule.startswith('@'):
external_task_sensor = self._get_external_task_sensor(from_task_id, to_task_id)
self._tasks[self._get_control_flow_task_id(to_pipe)] >> external_task_sensor >> self._tasks[to_task_id]
else:
self._tasks[self._get_control_flow_task_id(to_pipe)] >> self._tasks[
to_task_id
]
def _create_edge_with_data(self, from_task_id, to_task_ids, node):
from_pipe = (
self._task_graph.get_node(from_task_id).obj.pipeline_name
if from_task_id
else None
)
data_id = node.obj.airflow_name
if from_pipe:
self._tasks[from_task_id] >> self._data_tasks[from_pipe][data_id]
for to_task_id in to_task_ids:
to_pipe = self._task_graph.get_node(to_task_id).obj.pipeline_name
self._data_tasks[to_pipe][data_id] >> self._tasks[to_task_id]
if not from_pipe or (from_pipe != to_pipe):
self._tasks[
self._get_control_flow_task_id(to_pipe)
] >> self._data_tasks[to_pipe][data_id]
|
python
|
from __future__ import absolute_import
from collections import defaultdict, Sequence, OrderedDict
import operator
from string import capwords
import numpy as np
from .elements import ELEMENTS
# records
MODEL = 'MODEL '
ATOM = 'ATOM '
HETATM = 'HETATM'
TER = 'TER '
MODEL_LINE = 'MODEL ' + ' ' * 4 + '{:>4d}\n'
ENDMDL_LINE = 'ENDMDL\n'
TER_LINE = 'TER ' + '{:>5d}' + ' ' * 6 + '{:3s}' + ' ' + '{:1s}' + \
'{:>4d}' + '{:1s}' + ' ' * 53 + '\n'
ATOM_LINE = '{:6s}' + '{:>5d}' + ' ' + '{:4s}' + '{:1s}' + '{:3s}' + ' ' + \
'{:1s}' + '{:>4d}' + '{:1s}' + ' ' * 3 + '{:8.3f}' * 3 + '{:6.2f}' * 2 + \
' ' * 10 + '{:<2s}' + '{:2s}\n'
END_LINE = 'END \n'
ATOM_DATA = ('record id name alt resn chain resi i x y z q b ' \
'e charge').split()
TER_DATA = 'id resn chain resi i'.split()
def parse_pdb(infile):
if isinstance(infile, file):
f = infile
elif isinstance(infile, str):
f = open(infile)
else:
raise TypeError('Input should be either a file or string.')
pdb = defaultdict(list)
model_number = 1
for line in f:
record = line[:6]
if record in (ATOM, HETATM):
pdb['model'].append(model_number)
pdb['record'].append(record)
pdb['id'].append(int(line[6:11]))
name = line[12:16].strip()
pdb['name'].append(name)
pdb['alt'].append(line[16])
pdb['resn'].append(line[17:20].strip())
pdb['chain'].append(line[21])
pdb['resi'].append(int(line[22:26]))
pdb['i'].append(line[26])
pdb['x'].append(float(line[30:38]))
pdb['y'].append(float(line[38:46]))
pdb['z'].append(float(line[46:54]))
pdb['q'].append(float(line[54:60]))
pdb['b'].append(float(line[60:66]))
# Be forgiving when determining the element
e = line[76:78].strip()
if not e:
# If element is not given, take the first non-numeric letter of
# the name as element.
for e in name:
if e.isalpha():
break
pdb['e'].append(e)
pdb['charge'].append(line[78: 80].strip())
elif record == MODEL:
model_number = int(line[10: 14])
f.close()
return pdb
def tofile(pdb, out):
f = open(out, 'w')
nmodels = len(set(pdb['model']))
natoms = len(pdb['id'])
natoms_per_model = natoms / nmodels
for nmodel in xrange(nmodels):
offset = nmodel * natoms_per_model
# write MODEL record
if nmodels > 1:
f.write(MODEL_LINE.format(nmodel + 1))
prev_chain = pdb['chain'][offset]
for natom in xrange(natoms_per_model):
index = offset + natom
# write TER record
current_chain = pdb['chain'][index]
if prev_chain != current_chain:
prev_record = pdb['record'][index - 1]
if prev_record == ATOM:
line_data = [pdb[data][index - 1] for data in TER_DATA]
line_data[0] += 1
f.write(TER_LINE.format(*line_data))
prev_chain = current_chain
# write ATOM/HETATM record
line_data = [pdb[data][index] for data in ATOM_DATA]
# take care of the rules for atom name position
e = pdb['e'][index]
name = pdb['name'][index]
if len(e) == 1 and len(name) != 4:
line_data[2] = ' ' + name
f.write(ATOM_LINE.format(*line_data))
# write ENDMDL record
if nmodels > 1:
f.write(ENDMDL_LINE)
f.write(END_LINE)
f.close()
def pdb_dict_to_array(pdb):
dtype = [('record', np.str_, 6), ('id', np.int32),
('name', np.str_, 4), ('alt', np.str_, 1),
('resn', np.str_, 4), ('chain', np.str_, 2),
('resi', np.int32), ('i', np.str_, 1), ('x', np.float64),
('y', np.float64), ('z', np.float64),
('q', np.float64), ('b', np.float64),
('e', np.str_, 2), ('charge', np.str_, 2),
('model', np.int32)]
natoms = len(pdb['id'])
pdb_array = np.empty(natoms, dtype=dtype)
for data in ATOM_DATA:
pdb_array[data] = pdb[data]
pdb_array['model'] = pdb['model']
return pdb_array
def pdb_array_to_dict(pdb_array):
pdb = defaultdict(list)
for data in ATOM_DATA:
pdb[data] = pdb_array[data].tolist()
pdb['model'] = pdb_array['model'].tolist()
return pdb
class Structure(object):
@classmethod
def fromfile(cls, fid):
"""Initialize Structure from PDB-file"""
try:
fname = fid.name
except AttributeError:
fname = fid
if fname[-3:] in ('pdb', 'ent'):
arr = pdb_dict_to_array(parse_pdb(fid))
elif fname[-3:] == 'cif':
arr = mmcif_dict_to_array(parse_mmcif(fid))
else:
raise IOError('Filetype not recognized.')
return cls(arr)
def __init__(self, pdb):
self.data = pdb
@property
def atomnumber(self):
"""Return array of atom numbers"""
return self._get_property('number')
@property
def chain_list(self):
return np.unique(self.data['chain'])
def combine(self, structure):
return Structure(np.hstack((self.data, structure.data)))
@property
def coor(self):
"""Return the coordinates"""
return np.asarray([self.data['x'], self.data[ 'y'], self.data['z']])
def duplicate(self):
"""Duplicate the object"""
return Structure(self.data.copy())
def _get_property(self, ptype):
elements, ind = np.unique(self.data['e'], return_inverse=True)
return np.asarray([getattr(ELEMENTS[capwords(e)], ptype)
for e in elements], dtype=np.float64)[ind]
@property
def mass(self):
return self._get_property('mass')
def rmsd(self, structure):
return np.sqrt(((self.coor - structure.coor) ** 2).mean() * 3)
def rotate(self, rotmat):
"""Rotate atoms"""
self.data['x'], self.data['y'], self.data['z'] = (
np.asmatrix(rotmat) * np.asmatrix(self.coor)
)
def select(self, identifier, values, loperator='==', return_ind=False):
"""A simple way of selecting atoms"""
if loperator == '==':
oper = operator.eq
elif loperator == '<':
oper = operator.lt
elif loperator == '>':
oper = operator.gt
elif loperator == '>=':
oper = operator.ge
elif loperator == '<=':
oper = operator.le
elif loperator == '!=':
oper = operator.ne
else:
raise ValueError('Logic operator not recognized.')
if not isinstance(values, Sequence) or isinstance(values, basestring):
values = (values,)
selection = oper(self.data[identifier], values[0])
if len(values) > 1:
for v in values[1:]:
if loperator == '!=':
selection &= oper(self.data[identifier], v)
else:
selection |= oper(self.data[identifier], v)
if return_ind:
return selection
else:
return Structure(self.data[selection])
@property
def sequence(self):
resids, indices = np.unique(self.data['resi'], return_index=True)
return self.data['resn'][indices]
def translate(self, trans):
"""Translate atoms"""
self.data['x'] += trans[0]
self.data['y'] += trans[1]
self.data['z'] += trans[2]
def tofile(self, fid):
"""Write instance to PDB-file"""
tofile(pdb_array_to_dict(self.data), fid)
@property
def rvdw(self):
return self._get_property('vdwrad')
def parse_mmcif(infile):
if isinstance(infile, file):
pass
elif isinstance(infile, str):
infile = open(infile)
else:
raise TypeError("Input should either be a file or string.")
atom_site = OrderedDict()
with infile as f:
for line in f:
if line.startswith('_atom_site.'):
words = line.split('.')
atom_site[words[1].strip()] = []
if line.startswith('ATOM'):
words = line.split()
for key, word in zip(atom_site, words):
atom_site[key].append(word)
return atom_site
def mmcif_dict_to_array(atom_site):
natoms = len(atom_site['id'])
dtype = [('record', np.str_, 6), ('id', np.int32),
('name', np.str_, 4), ('alt', np.str_, 1),
('resn', np.str_, 4), ('chain', np.str_, 2),
('resi', np.int32), ('i', np.str_, 1), ('x', np.float64),
('y', np.float64), ('z', np.float64),
('q', np.float64), ('b', np.float64),
('e', np.str_, 2), ('charge', np.str_, 2),
('model', np.int32)]
cifdata = np.zeros(natoms, dtype=dtype)
cifdata['record'] = 'ATOM '
cifdata['id'] = atom_site['id']
cifdata['name'] = atom_site['label_atom_id']
cifdata['resn'] = atom_site['label_comp_id']
cifdata['chain'] = atom_site['label_asym_id']
cifdata['resi'] = atom_site['label_seq_id']
cifdata['x'] = atom_site['Cartn_x']
cifdata['y'] = atom_site['Cartn_y']
cifdata['z'] = atom_site['Cartn_z']
cifdata['q'] = atom_site['occupancy']
cifdata['b'] = atom_site['B_iso_or_equiv']
cifdata['e'] = atom_site['type_symbol']
cifdata['charge'] = atom_site['pdbx_formal_charge']
cifdata['model'] = atom_site['pdbx_PDB_model_num']
return cifdata
|
python
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import re
import os
import subprocess
class Scheme(object):
def __init__(
self,
name,
commit_cmd,
commit_pattern,
commit_ok_errors,
status_cmd,
status_pattern,
status_ok_errors,
):
self.name = name
self.commit_cmd = commit_cmd
self.commit_pattern = commit_pattern
self.commit_ok_errors = commit_ok_errors
self.status_cmd = status_cmd
self.status_pattern = status_pattern
self.status_ok_errors = status_ok_errors
SCHEMES = [
Scheme(
"git",
commit_cmd=["git", "log", "-1", "."],
commit_pattern=re.compile(r"commit ([a-f0-9]+)"),
commit_ok_errors=[128],
status_cmd=["git", "status", "-s"],
status_pattern=re.compile(r"(.)"),
status_ok_errors=[],
)
]
log = logging.getLogger("guild")
class NoCommit(Exception):
pass
class CommitReadError(Exception):
pass
def commit_for_dir(dir):
"""Returns a tuple of commit and workspace status.
Raises NoCommit if a commit is not available.
"""
dir = os.path.abspath(dir)
for scheme in SCHEMES:
commit = _apply_scheme(
dir,
scheme.commit_cmd,
scheme.commit_pattern,
scheme.commit_ok_errors,
)
if commit is None:
raise NoCommit(dir)
status = _apply_scheme(
dir,
scheme.status_cmd,
scheme.status_pattern,
scheme.status_ok_errors,
)
return _format_commit(commit, scheme), _format_status(status)
raise NoCommit(dir)
def _apply_scheme(repo_dir, cmd_template, pattern, ok_errors):
cmd = [arg.format(repo=repo_dir) for arg in cmd_template]
log.debug("vcs scheme cmd for repo %s: %s", repo_dir, cmd)
try:
out = subprocess.check_output(
cmd, cwd=repo_dir, env=os.environ, stderr=subprocess.STDOUT
)
except OSError as e:
if e.errno == 2:
return None
raise CommitReadError(e)
except subprocess.CalledProcessError as e:
if e.returncode in ok_errors:
return None
raise CommitReadError(e, e.output)
else:
out = out.decode("ascii", errors="replace")
log.debug("vcs scheme result: %s", out)
m = pattern.match(out)
if not m:
return None
return m.group(1)
def _format_commit(commit, scheme):
return "%s:%s" % (scheme.name, commit)
def _format_status(status):
return bool(status)
|
python
|
"""Module for raceplan exceptions."""
class CompetitionFormatNotSupportedException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class NoRaceclassesInEventException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class InconsistentValuesInContestantsException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class InconsistentValuesInRaceclassesException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class MissingPropertyException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class InvalidDateFormatException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class NoRaceplanInEventException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class DuplicateRaceplansInEventException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class InconsistentInputDataException(Exception): # pragma: no cover
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class NoRacesInRaceplanException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class CouldNotCreateRaceplanException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
class CouldNotCreateRaceException(Exception):
"""Class representing custom exception for command."""
def __init__(self, message: str) -> None:
"""Initialize the error."""
# Call the base class constructor with the parameters it needs
super().__init__(message)
|
python
|
################ Running: F:\users\emiwar\edited_new\catAtt.py #################
COM1
Serial<id=0x52b47f0, open=True>(port='COM1', baudrate=115200, bytesize=8, parity='N', stopbits=1, timeout=2.5, xonxoff=False, rtscts=False, dsrdtr=False)
492
1 False [('b', 1.378630934454577)]
1 finished.
2 desert [('b', 1.3728325826727996)]
2 finished.
3 woods [('b', 0.2293752072882853)]
3 finished.
4 chair [('a', 0.6698945223711235)]
4 finished.
5 bathroom [('a', 1.068733033890112)]
5 finished.
6 car [('a', 0.7773190108109702)]
6 finished.
7 bathroom [('a', 0.7775817591646046)]
7 finished.
8 coast [('b', 0.5513196506735767)]
8 finished.
9 car [('b', 0.48504929610680847)]
9 finished.
10 bathroom [('a', 0.5399842292335961)]
10 finished.
11 desert [('a', 0.6085093517594942)]
11 finished.
12 False [('a', 0.8668944992050456)]
12 finished.
13 car [('b', 0.7476574781967429)]
13 finished.
14 car [('b', 0.3686060290774549)]
14 finished.
15 bathroom [('a', 1.200669363160614)]
15 finished.
16 desert [('a', 0.8890058288493492)]
16 finished.
17 chair [('a', 0.21403903161990456)]
17 finished.
18 desert [('b', 0.39270703235979454)]
18 finished.
19 car [('a', 0.5123246865859983)]
19 finished.
20 chair [('a', 0.3190014272281587)]
20 finished.
21 flower [('a', 0.5525155075779367)]
21 finished.
22 shoe [('b', 0.8203921460526544)]
22 finished.
23 coast [('b', 0.23459146589812008)]
23 finished.
24 coast [('b', 0.2188092631699874)]
24 finished.
25 chair [('a', 0.36736120010345985)]
25 finished.
26 car [('B', 0.16185797102525612)]
26 finished.
27 False [('b', 0.753368149445123)]
27 finished.
28 desert [('a', 0.35343201840896654)]
28 finished.
29 desert [('a', 0.41572713114283033)]
29 finished.
30 woods [('b', 0.6208063266049066)]
30 finished.
31 coast [('b', 0.24567528232955738)]
31 finished.
32 desert [('b', 0.4705515105538325)]
32 finished.
33 flower [('b', 0.18913981291507298)]
33 finished.
34 False [('b', 0.5904081602129736)]
34 finished.
35 woods [('b', 0.3012846810966039)]
35 finished.
36 False [('a', 0.7722517211336708)]
36 finished.
37 False [('b', 0.6514379167574589)]
37 finished.
38 bathroom [('b', 0.43740240408851605)]
38 finished.
39 flower [('a', 0.311466766292142)]
39 finished.
40 flower [('B', 0.197050121887969)]
40 finished.
41 False [('b', 0.6815756807627622)]
41 finished.
42 coast [('a', 0.4003460888470727)]
42 finished.
43 flower [('b', 0.4615614700655897)]
43 finished.
44 False [('a', 0.7023237100775077)]
44 finished.
45 shoe [('a', 0.9297171613679893)]
45 finished.
46 chair [('a', 0.696176982130055)]
46 finished.
47 False [('a', 0.6817715690444857)]
47 finished.
48 desert [('b', 0.3936137487767155)]
48 finished.
49 bathroom [('a', 0.40891092273659524)]
49 finished.
50 flower [('b', 0.27923933178635707)]
50 finished.
51 woods [('a', 0.3456536701046389)]
51 finished.
52 chair [('a', 0.7068977600997641)]
52 finished.
53 woods [('b', 0.8326407353197283)]
53 finished.
54 desert [('b', 0.27951996814181257)]
54 finished.
55 desert [('b', 0.9122499675231666)]
55 finished.
56 bathroom [('a', 0.31191337984409984)]
56 finished.
57 woods [('a', 0.3375410214398471)]
57 finished.
58 False [('a', 0.8180435393960579)]
58 finished.
59 chair [('a', 0.3158859824634419)]
59 finished.
60 desert [('a', 0.25162934770105494)]
60 finished.
61 shoe [('b', 0.6090976030955062)]
61 finished.
62 False [('a', 0.6494162793133)]
62 finished.
63 chair [('b', 0.37276894830574747)]
63 finished.
64 shoe [('b', 0.7006548474873853)]
64 finished.
65 woods [('b', 0.6106966731540524)]
65 finished.
66 coast [('b', 0.4466754269001285)]
66 finished.
67 desert [('b', 0.4042518313940491)]
67 finished.
68 False [('a', 0.6298359552938564)]
68 finished.
69 woods [('a', 1.6945318724606295)]
69 finished.
70 False [('a', 0.6370166683918796)]
70 finished.
71 car [('a', 0.8047627244541218)]
71 finished.
72 coast [('b', 0.3362410622303287)]
72 finished.
73 chair [('a', 0.19146877209414015)]
73 finished.
74 woods [('b', 0.974377636830468)]
74 finished.
75 car [('b', 0.3174794808492152)]
75 finished.
76 coast [('b', 0.8973571797014301)]
76 finished.
77 car [('b', 0.28512360462582365)]
77 finished.
78 False [('a', 0.6534501703317801)]
78 finished.
79 False [('b', 0.6501467549255722)]
79 finished.
80 woods [('a', 0.3114943314099037)]
80 finished.
81 bathroom [('a', 0.6465943737252928)]
81 finished.
82 shoe [('a', 0.28474238491617143)]
82 finished.
83 car [('a', 0.41531775982412)]
83 finished.
84 flower [('b', 0.21339330408090973)]
84 finished.
85 flower [('a', 0.29591564135671433)]
85 finished.
86 chair [('a', 0.5676411326680864)]
86 finished.
87 shoe [('b', 0.49978959604459305)]
87 finished.
88 False [('b', 0.6761151482933201)]
88 finished.
89 False [('a', 0.7256792822040552)]
89 finished.
90 bathroom [('a', 0.23374427840553835)]
90 finished.
91 False [('b', 0.5538893647617442)]
91 finished.
92 ('coast', 'car') [('b', 0.38653655149209953)]
92 finished.
93 desert [('a', 0.8663742809244468)]
93 finished.
94 car [('a', 1.045911462602362)]
94 finished.
95 coast [('b', 0.4071798920094807)]
95 finished.
96 coast [('a', 1.171189349776796)]
96 finished.
97 False [('b', 0.6872901642086617)]
97 finished.
98 False [('a', 0.714222456949301)]
98 finished.
99 chair [('a', 0.4120336986493385)]
99 finished.
100 car [('a', 0.5754499785493863)]
100 finished.
101 shoe [('a', 0.6480617763609189)]
101 finished.
102 woods [('b', 1.133653870685066)]
102 finished.
103 shoe [('a', 0.9177019958610799)]
103 finished.
104 coast [('b', 0.4209076137494776)]
104 finished.
105 bathroom [('a', 0.35304552027264435)]
105 finished.
106 bathroom [('a', 0.24814939824454996)]
106 finished.
107 False [('a', 0.6320100806219671)]
107 finished.
108 bathroom [('a', 0.7060127438812742)]
108 finished.
109 False [('b', 0.6406238865820342)]
109 finished.
110 bathroom [('a', 0.3036060158815417)]
110 finished.
111 flower [('b', 0.5142762382529327)]
111 finished.
112 chair [('a', 0.33121365399983915)]
112 finished.
113 woods [('a', 1.0191791635861591)]
113 finished.
114 coast [('b', 1.114690242857705)]
114 finished.
115 False [('a', 0.7907886761327063)]
115 finished.
116 flower [('b', 0.6469744204505332)]
116 finished.
117 chair [('a', 0.5013276709496495)]
117 finished.
118 coast [('a', 0.4840006486601851)]
118 finished.
119 desert [('a', 0.1940100413271466)]
119 finished.
120 False [('b', 0.7016685986686753)]
120 finished.
121 car [('b', 0.22344782729669532)]
121 finished.
122 coast [('b', 0.4494753390436017)]
122 finished.
123 shoe [('b', 1.4954950093942898)]
123 finished.
124 False [('a', 0.6772907712279448)]
124 finished.
125 woods [('a', 0.7492442319257862)]
125 finished.
126 bathroom [('b', 0.40211054961037007)]
126 finished.
127 False [('a', 0.5090705364964379)]
127 finished.
128 car [('b', 0.023127426793507766)]
128 finished.
129 shoe [('b', 0.21782219736815023)]
129 finished.
130 desert [('b', 0.7362953713768547)]
130 finished.
131 chair [('b', 0.5802260750174355)]
131 finished.
132 ('woods', 'chair') [('a', 1.5374714048562055)]
132 finished.
133 woods [('a', 0.2848174558748724)]
133 finished.
134 shoe [('a', 0.8176444316850393)]
134 finished.
135 flower [('b', 0.2320947700468423)]
135 finished.
136 False [('a', 0.6502578951335636)]
136 finished.
137 coast [('b', 0.24363018521125923)]
137 finished.
138 flower [('b', 0.2172087268818359)]
138 finished.
139 chair [('a', 0.4209718346082809)]
139 finished.
140 bathroom [('a', 0.5872669098066581)]
140 finished.
141 ('shoe', 'bathroom') [('b', 0.8831324061093255)]
141 finished.
142 shoe [('a', 0.6499930940585728)]
142 finished.
143 flower [('b', 0.16684696403945054)]
143 finished.
144 shoe [('a', 0.3412628987885)]
144 finished.
145 woods [('a', 1.1364839871594086)]
145 finished.
146 ('car', 'bathroom') [('b', 1.1306917935421552)]
146 finished.
147 shoe [('a', 1.0913197151880922)]
147 finished.
148 shoe [('b', 0.3711220791606138)]
148 finished.
149 False [('b', 0.7091097141528735)]
149 finished.
150 car [('a', 0.6771951730547698)]
150 finished.
151 flower [('b', 0.3703760615135252)]
151 finished.
152 False [('a', 0.6412144838859604)]
152 finished.
153 bathroom [('a', 0.2778027199733515)]
153 finished.
154 desert [('b', 0.2928975542408807)]
154 finished.
155 shoe [('a', 0.7593480204577645)]
155 finished.
156 coast [('a', 0.36913475149003716)]
156 finished.
157 False [('a', 0.6590250687145272)]
157 finished.
158 False [('a', 0.5562634838142912)]
158 finished.
159 flower [('a', 0.7076115206941722)]
159 finished.
160 chair [('b', 0.43174950228785747)]
160 finished.
161 flower [('a', 0.5486302922454342)]
161 finished.
162 woods [('b', 0.5503469539216894)]
162 finished.
163 car [('b', 0.7138388912726441)]
163 finished.
164 False [('a', 0.5912779276422953)]
164 finished.
165 False [('b', 0.6128054046403122)]
165 finished.
166 flower [('b', 0.23906082712346688)]
166 finished.
167 False [('a', 0.5863760286702018)]
167 finished.
168 chair [('b', 0.690766301468102)]
168 finished.
169 coast [('a', 0.8249151712834646)]
169 finished.
170 False [('b', 0.7172326264244475)]
170 finished.
171 bathroom [('a', 0.30437168100615963)]
171 finished.
172 desert [('a', 1.0236837143238517)]
172 finished.
173 car [('B', 0.20328775607322314)]
173 finished.
174 coast [('b', 0.6416587514704588)]
174 finished.
175 shoe [('b', 0.22412493215051654)]
175 finished.
176 False [('a', 0.7122958311865659)]
176 finished.
177 coast [('a', 0.208278801808774)]
177 finished.
178 coast [('a', 0.21451790222363343)]
178 finished.
179 desert [('a', 1.3078243585905511)]
179 finished.
180 ('shoe', 'desert') [('b', 1.4747094446011033)]
180 finished.
181 False [('b', 0.7989890053304407)]
181 finished.
182 flower [('b', 0.20828935866211395)]
182 finished.
183 bathroom [('a', 0.764251385659918)]
183 finished.
184 False [('', 2.527974342154266)]
184 finished.
185 shoe [('a', 1.5437530259314372)]
185 finished.
186 False [('a', 0.6155419756623814)]
186 finished.
187 False [('b', 0.6065824327506562)]
187 finished.
188 woods [('b', 0.21979955467668333)]
188 finished.
189 car [('b', 0.2621266722717337)]
189 finished.
190 False [('a', 0.6501435292202586)]
190 finished.
191 bathroom [('a', 0.16326144604772708)]
191 finished.
192 bathroom [('b', 0.3192292793164597)]
192 finished.
193 car [('a', 1.6502678654951524)]
193 finished.
194 False [('a', 0.5887747803808452)]
194 finished.
195 desert [('b', 0.2822017021753709)]
195 finished.
196 False [('a', 0.9592959986302958)]
196 finished.
197 woods [('a', 0.36274462942037644)]
197 finished.
198 woods [('a', 0.2755819685426104)]
198 finished.
199 False [('a', 0.7049177635772139)]
199 finished.
200 False [('a', 0.862835095790615)]
200 finished.
201 flower [('b', 0.8977046761292513)]
201 finished.
202 desert [('b', 0.18756185656366142)]
202 finished.
203 woods [('a', 0.4184634089197061)]
203 finished.
204 chair [('b', 0.8067166220889703)]
204 finished.
205 desert [('a', 0.4726728516143339)]
205 finished.
206 woods [('b', 0.3456610012526653)]
206 finished.
207 coast [('b', 0.22088984304627957)]
207 finished.
208 bathroom [('a', 0.06553254780556017)]
208 finished.
209 False [('b', 0.6243285034752262)]
209 finished.
210 car [('b', 0.35554984051850624)]
210 finished.
211 shoe [('a', 0.8698850212031175)]
211 finished.
212 desert [('b', 0.48484431720225984)]
212 finished.
213 car [('a', 0.47801520597477065)]
213 finished.
214 chair [('b', 0.37932064888263994)]
214 finished.
215 chair [('a', 0.7124920127143923)]
215 finished.
216 False [('a', 0.7216453911860299)]
216 finished.
217 chair [('a', 0.6548307721723177)]
217 finished.
218 car [('b', 0.28273628950591956)]
218 finished.
219 flower [('b', 0.3117356728098457)]
219 finished.
220 desert [('a', 0.5760065593249237)]
220 finished.
221 car [('a', 0.4983609018718198)]
221 finished.
222 car [('a', 0.2945347462700738)]
222 finished.
223 desert [('b', 0.5016191574040931)]
223 finished.
224 bathroom [('a', 0.4226327795577163)]
224 finished.
225 bathroom [('a', 0.708325281289035)]
225 finished.
226 False [('b', 0.6616414089057798)]
226 finished.
227 woods [('b', 0.04953158361331589)]
227 finished.
228 chair [('a', 1.302906037845787)]
228 finished.
229 flower [('b', 0.17862548008088197)]
229 finished.
230 car [('b', 0.37965289652220235)]
230 finished.
231 car [('B', 0.12259820586314163)]
231 finished.
232 False [('', 2.52378033885725)]
232 finished.
233 False [('b', 0.5865009514363919)]
233 finished.
234 coast [('b', 1.3435962566572925)]
234 finished.
235 shoe [('b', 0.3308212909450958)]
235 finished.
236 flower [('a', 0.7318702902875884)]
236 finished.
237 desert [('a', 0.6951975407228019)]
237 finished.
238 desert [('b', 0.4632320921309656)]
238 finished.
239 shoe [('b', 0.6035470441247526)]
239 finished.
240 woods [('b', 0.40509432695216674)]
240 finished.
241 car [('b', 0.20552434278488363)]
241 finished.
242 chair [('b', 0.4935842189115647)]
242 finished.
243 flower [('a', 0.20660876623514923)]
243 finished.
244 shoe [('b', 1.0181472311578545)]
244 finished.
245 desert [('a', 0.233792663983877)]
245 finished.
246 shoe [('b', 1.3972670065777493)]
246 finished.
247 bathroom [('a', 0.24647496398210933)]
247 finished.
248 chair [('a', 0.5309199975245065)]
248 finished.
249 False [('a', 0.5520383964485518)]
249 finished.
250 flower [('a', 0.3397377267046977)]
250 finished.
251 flower [('b', 1.352215927535326)]
251 finished.
252 bathroom [('a', 0.3173794839876791)]
252 finished.
253 desert [('a', 0.15316557515689055)]
253 finished.
254 coast [('b', 0.2759678801867267)]
254 finished.
255 chair [('a', 0.2639477294987955)]
255 finished.
256 chair [('a', 1.3918539799487917)]
256 finished.
257 coast [('a', 0.1973967385774813)]
257 finished.
258 flower [('b', 0.2560166000657773)]
258 finished.
259 flower [('a', 0.2797865286929664)]
259 finished.
260 coast [('b', 0.3157405324818683)]
260 finished.
261 desert [('a', 0.5886208262672881)]
261 finished.
262 ('desert', 'flower') [('a', 0.7331655575617333)]
262 finished.
263 chair [('a', 0.5034912394239655)]
263 finished.
264 flower [('b', 0.26835550907981087)]
264 finished.
265 flower [('a', 0.4129931993338687)]
265 finished.
266 shoe [('b', 0.30528719480025757)]
266 finished.
267 False [('b', 0.6625102965972474)]
267 finished.
268 chair [('b', 0.3745867798279505)]
268 finished.
269 chair [('b', 0.35715741470903595)]
269 finished.
270 coast [('a', 0.27323600109866675)]
270 finished.
271 flower [('b', 0.5279989748123626)]
271 finished.
272 coast [('b', 0.04906092389501282)]
272 finished.
273 shoe [('b', 0.20160364469484193)]
273 finished.
274 shoe [('a', 0.24021650933536876)]
274 finished.
275 woods [('b', 0.5104804629299906)]
275 finished.
276 ('bathroom', 'chair') [('b', 0.852873824780545)]
276 finished.
277 shoe [('a', 0.6472879003504204)]
277 finished.
278 bathroom [('a', 0.11819775743060745)]
278 finished.
279 bathroom [('a', 0.641296885992233)]
279 finished.
280 bathroom [('b', 0.42985161462638644)]
280 finished.
281 chair [('a', 0.3419268075749642)]
281 finished.
282 bathroom [('b', 0.23643539630938903)]
282 finished.
283 woods [('a', 0.40150939545310393)]
283 finished.
284 desert [('b', 0.6091820579240448)]
284 finished.
285 False [('a', 0.6700525819269387)]
285 finished.
286 False [('a', 0.7045893281356257)]
286 finished.
287 shoe [('a', 0.8672144305146503)]
287 finished.
288 False [('a', 0.6573937416042099)]
288 finished.
289 chair [('b', 0.8348104619590231)]
289 finished.
290 woods [('a', 0.4034005384582997)]
290 finished.
291 coast [('a', 0.8878392965380044)]
291 finished.
292 car [('a', 0.9283570867428352)]
292 finished.
293 False [('b', 0.6721463578714975)]
293 finished.
294 False [('a', 0.683560369220686)]
294 finished.
295 woods [('b', 1.268982762124324)]
295 finished.
296 coast [('b', 0.35359359691665304)]
296 finished.
297 car [('a', 0.3204178050718838)]
297 finished.
298 shoe [('b', 0.8818952015290051)]
298 finished.
299 False [('', 2.5198048037791523)]
299 finished.
300 chair [('a', 0.5857455499199204)]
300 finished.
301 car [('a', 0.3576535868232895)]
301 finished.
302 woods [('b', 0.4302580534867957)]
302 finished.
303 False [('b', 0.6325953994992233)]
303 finished.
304 False [('a', 0.6426912703909693)]
304 finished.
305 desert [('a', 0.13612974607531214)]
305 finished.
306 car [('b', 0.931864014824896)]
306 finished.
307 coast [('a', 0.25447060752048856)]
307 finished.
308 shoe [('b', 0.3096392576535436)]
308 finished.
309 False [('b', 1.4347497014023247)]
309 finished.
310 flower [('b', 0.37518734749392024)]
310 finished.
311 False [('a', 0.6429745459599872)]
311 finished.
312 flower [('a', 0.40895989480668504)]
312 finished.
313 car [('a', 0.46108523867405893)]
313 finished.
314 woods [('b', 0.26235833655664464)]
314 finished.
315 woods [('b', 0.16201808330333733)]
315 finished.
316 coast [('b', 0.06016086885210825)]
316 finished.
317 desert [('a', 0.3651202147029835)]
317 finished.
318 woods [('A', 0.16641706550581148)]
318 finished.
319 ('shoe', 'coast') [('a', 1.0337138981267344)]
319 finished.
320 woods [('a', 0.6613804200278537)]
320 finished.
321 shoe [('a', 0.22566916521918756)]
321 finished.
322 bathroom [('a', 0.26007131154528906)]
322 finished.
323 bathroom [('a', 0.3380371935536459)]
323 finished.
324 bathroom [('a', 0.9085884988362523)]
324 finished.
325 False [('a', 0.6686960262541106)]
325 finished.
326 shoe [('a', 0.929356762118914)]
326 finished.
327 False [('b', 0.7821836675502709)]
327 finished.
328 coast [('a', 0.9256378172303812)]
328 finished.
329 False [('a', 0.7059007239358834)]
329 finished.
330 False [('a', 0.6027934020839893)]
330 finished.
331 shoe [('a', 0.3181439761274305)]
331 finished.
332 desert [('a', 0.40932967792559793)]
332 finished.
333 woods [('b', 0.231437019425357)]
333 finished.
334 car [('B', 0.1370766371846912)]
334 finished.
335 car [('a', 0.1773205356894323)]
335 finished.
336 chair [('a', 0.6904798001933159)]
336 finished.
337 desert [('b', 0.5567018864803686)]
337 finished.
338 woods [('a', 0.3249936145693937)]
338 finished.
339 desert [('a', 0.30636223438159504)]
339 finished.
340 False [('a', 0.7145215677983288)]
340 finished.
341 flower [('a', 0.8080423869405422)]
341 finished.
342 woods [('a', 0.19285846455841238)]
342 finished.
343 False [('a', 0.7109454336768977)]
343 finished.
344 shoe [('b', 0.7865333844365523)]
344 finished.
345 bathroom [('b', 0.2408308595595372)]
345 finished.
346 desert [('a', 0.3854550605010445)]
346 finished.
347 chair [('b', 0.16525815758814133)]
347 finished.
348 coast [('b', 0.22066257744972972)]
348 finished.
349 coast [('b', 0.2821852804036098)]
349 finished.
350 car [('a', 0.30902432093807874)]
350 finished.
351 coast [('a', 0.1809869895578231)]
351 finished.
352 car [('b', 0.09572837450559746)]
352 finished.
353 chair [('a', 0.2373943105003491)]
353 finished.
354 car [('B', 0.02922049073549715)]
354 finished.
355 car [('a', 0.2867270733731857)]
355 finished.
356 coast [('b', 0.244228700154963)]
356 finished.
357 desert [('b', 0.6803622291026841)]
357 finished.
358 shoe [('a', 0.3092985058829072)]
358 finished.
359 shoe [('a', 0.28733907762944)]
359 finished.
360 chair [('b', 0.4682653652798763)]
360 finished.
361 shoe [('a', 0.442187004689913)]
361 finished.
362 chair [('a', 1.1985664965932301)]
362 finished.
363 chair [('b', 0.24968688665740046)]
363 finished.
364 False [('b', 0.67074464232428)]
364 finished.
365 desert [('a', 1.030670298615405)]
365 finished.
366 shoe [('b', 0.244121665390594)]
366 finished.
367 flower [('b', 0.017281275925597583)]
367 finished.
368 False [('b', 0.5719451031891367)]
368 finished.
369 bathroom [('a', 0.4958601005773744)]
369 finished.
370 car [('b', 0.83554240380181)]
370 finished.
371 car [('b', 0.06390737885885756)]
371 finished.
372 False [('a', 0.6734275493408859)]
372 finished.
373 woods [('a', 0.4551226691710326)]
373 finished.
374 False [('a', 0.5882184928505012)]
374 finished.
375 flower [('b', 0.333303324499866)]
375 finished.
376 flower [('a', 0.42850092387106997)]
376 finished.
377 coast [('b', 0.09649198690840421)]
377 finished.
378 car [('A', 0.1578748115534836)]
378 finished.
379 False [('b', 0.7024518585485566)]
379 finished.
380 car [('b', 0.04831373326396715)]
380 finished.
381 coast [('b', 0.48233119957876625)]
381 finished.
382 False [('a', 0.536959397461942)]
382 finished.
383 desert [('a', 0.3840052526211366)]
383 finished.
384 flower [('b', 0.2796762682219196)]
384 finished.
385 desert [('b', 0.27793116169141285)]
385 finished.
386 False [('a', 0.7687137089833413)]
386 finished.
387 desert [('b', 0.35663485046097776)]
387 finished.
388 desert [('b', 0.7045107382264177)]
388 finished.
389 False [('b', 1.1900723349735927)]
389 finished.
390 False [('a', 0.7229054689487384)]
390 finished.
391 False [('b', 0.5227387293125503)]
391 finished.
392 woods [('b', 0.11168036662729719)]
392 finished.
393 shoe [('b', 1.0824205809376508)]
393 finished.
394 bathroom [('b', 0.5544588483589905)]
394 finished.
395 shoe [('a', 0.5650291911661043)]
395 finished.
396 woods [('a', 0.34274173801532015)]
396 finished.
397 desert [('a', 0.20438537559130054)]
397 finished.
398 ('woods', 'shoe') [('b', 1.3715540304156093)]
398 finished.
399 False [('b', 0.6459457137270874)]
399 finished.
400 shoe [('a', 0.7641874580476724)]
400 finished.
401 bathroom [('b', 0.6211793354286783)]
401 finished.
402 False [('b', 0.9119100954894748)]
402 finished.
403 woods [('a', 0.24774295938550495)]
403 finished.
404 False [('a', 0.6737032005148649)]
404 finished.
405 False [('b', 0.7049602842371314)]
405 finished.
406 coast [('a', 0.4452303109546847)]
406 finished.
407 bathroom [('a', 0.7983400520861323)]
407 finished.
408 False [('b', 0.9654661862514331)]
408 finished.
409 False [('b', 0.7803860699968936)]
409 finished.
410 ('bathroom', 'car') [('b', 1.2188136618588032)]
410 finished.
411 flower [('b', 0.4508791073130851)]
411 finished.
412 woods [('a', 0.28899503739921784)]
412 finished.
413 coast [('b', 0.8116765837548883)]
413 finished.
414 False [('a', 1.0566454366389735)]
414 finished.
415 False [('b', 0.8033953186804865)]
415 finished.
416 bathroom [('b', 0.4729951288918528)]
416 finished.
417 flower [('b', 1.027734027114093)]
417 finished.
418 bathroom [('a', 0.4469748309948045)]
418 finished.
419 desert [('a', 1.1000508195193106)]
419 finished.
420 False [('a', 1.1430263038664634)]
420 finished.
421 shoe [('A', 0.2065603806568106)]
421 finished.
422 coast [('a', 0.7298504123182283)]
422 finished.
423 False [('a', 0.5772376057411748)]
423 finished.
424 bathroom [('a', 0.35366954761229863)]
424 finished.
425 False [('', 2.5224956284355358)]
425 finished.
426 chair [('b', 0.40128095687305176)]
426 finished.
427 False [('b', 0.5931356406117629)]
427 finished.
428 desert [('b', 0.5002021930686169)]
428 finished.
429 flower [('a', 0.3996161997265517)]
429 finished.
430 shoe [('a', 0.7058570302924636)]
430 finished.
431 ('chair', 'desert') [('b', 0.6933257519485778)]
431 finished.
432 car [('a', 0.2511941707398364)]
432 finished.
433 woods [('a', 0.16340572304579837)]
433 finished.
434 coast [('a', 0.7265601929793775)]
434 finished.
435 bathroom [('a', 0.4782116807473358)]
435 finished.
436 flower [('b', 0.275969053170229)]
436 finished.
437 car [('b', 0.7238508938280575)]
437 finished.
438 bathroom [('a', 0.19028317879747192)]
438 finished.
439 False [('b', 1.011443629187852)]
439 finished.
440 False [('b', 0.45072749916653265)]
440 finished.
441 flower [('a', 0.836957608661578)]
441 finished.
442 coast [('a', 0.1634077757680643)]
442 finished.
443 car [('b', 0.2932714428025065)]
443 finished.
444 shoe [('a', 1.0665662397104825)]
444 finished.
445 coast [('a', 0.3662298573035514)]
445 finished.
446 woods [('b', 0.27638692862092284)]
446 finished.
447 chair [('a', 0.32204444024773693)]
447 finished.
448 car [('b', 0.3332939406300284)]
448 finished.
449 ('chair', 'bathroom') [('a', 1.466034643488456)]
449 finished.
450 woods [('a', 1.1373050757638339)]
450 finished.
451 bathroom [('a', 0.650595421199796)]
451 finished.
452 coast [('b', 0.27028448081000533)]
452 finished.
453 flower [('b', 0.29694816027767956)]
453 finished.
454 car [('a', 0.3973966212788582)]
454 finished.
455 shoe [('a', 2.038953029919867)]
455 finished.
456 bathroom [('b', 0.3161581146869139)]
456 finished.
457 bathroom [('a', 1.4285736488618568)]
457 finished.
458 woods [('b', 0.2047830170722591)]
458 finished.
459 bathroom [('a', 0.3488837740287636)]
459 finished.
460 desert [('a', 0.30357375882977067)]
460 finished.
461 shoe [('B', 0.27115600771503523)]
461 finished.
462 coast [('b', 0.3067170619569879)]
462 finished.
463 coast [('a', 0.06388538541432354)]
463 finished.
464 woods [('a', 0.262398511248648)]
464 finished.
465 woods [('a', 0.28990116732347815)]
465 finished.
466 chair [('a', 0.4877676858823179)]
466 finished.
467 woods [('b', 0.36861834540650307)]
467 finished.
468 flower [('b', 0.5426979270741867)]
468 finished.
469 flower [('a', 0.528810972793508)]
469 finished.
470 bathroom [('a', 0.2125569666877709)]
470 finished.
471 chair [('b', 0.6296169005845513)]
471 finished.
472 chair [('a', 0.24945962106176012)]
472 finished.
473 coast [('a', 0.31549567212914553)]
473 finished.
474 flower [('b', 0.34411207624907547)]
474 finished.
475 chair [('a', 0.5656769714269103)]
475 finished.
476 False [('a', 0.45977736182521767)]
476 finished.
477 False [('b', 0.6086427786576678)]
477 finished.
478 bathroom [('a', 0.20282618697910948)]
478 finished.
479 desert [('b', 0.9308262174763513)]
479 finished.
480 chair [('b', 0.30117471387256955)]
480 finished.
481 chair [('a', 0.4742115130111415)]
481 finished.
482 desert [('a', 0.665592604572339)]
482 finished.
483 flower [('a', 0.7205970369841452)]
483 finished.
484 False [('b', 0.6841383569490063)]
484 finished.
485 shoe [('a', 0.3065056316409027)]
485 finished.
486 False [('a', 0.5851074467755097)]
486 finished.
487 shoe [('a', 0.4984019563016773)]
487 finished.
488 flower [('b', 0.3000955688485192)]
488 finished.
489 car [('b', 0.2350351469913221)]
489 finished.
490 woods [('a', 0.23135432407252665)]
490 finished.
491 chair [('a', 0.18968583683817997)]
491 finished.
492 chair [('b', 1.0585213308559105)]
492 finished.
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: server_admin.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='server_admin.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x12server_admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\x32\xc1\x01\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='protos.ServerStatus.StatusCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=125,
serialized_end=214,
)
_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE)
_SERVERSTATUS = _descriptor.Descriptor(
name='ServerStatus',
full_name='protos.ServerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='protos.ServerStatus.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVERSTATUS_STATUSCODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=214,
)
_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE
_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS
ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict(
DESCRIPTOR = _SERVERSTATUS,
__module__ = 'server_admin_pb2'
# @@protoc_insertion_point(class_scope:protos.ServerStatus)
))
_sym_db.RegisterMessage(ServerStatus)
import abc
import six
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaAdminServicer(object):
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StartServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StopServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaAdminStub(object):
"""Interface exported by the server.
"""
def GetStatus(self, request, timeout):
"""Return the serve status.
"""
raise NotImplementedError()
GetStatus.future = None
def StartServer(self, request, timeout):
raise NotImplementedError()
StartServer.future = None
def StopServer(self, request, timeout):
raise NotImplementedError()
StopServer.future = None
def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import google.protobuf.empty_pb2
import server_admin_pb2
import google.protobuf.empty_pb2
import server_admin_pb2
import google.protobuf.empty_pb2
import server_admin_pb2
request_deserializers = {
('protos.Admin', 'GetStatus'): google.protobuf.empty_pb2.Empty.FromString,
('protos.Admin', 'StartServer'): google.protobuf.empty_pb2.Empty.FromString,
('protos.Admin', 'StopServer'): google.protobuf.empty_pb2.Empty.FromString,
}
response_serializers = {
('protos.Admin', 'GetStatus'): server_admin_pb2.ServerStatus.SerializeToString,
('protos.Admin', 'StartServer'): server_admin_pb2.ServerStatus.SerializeToString,
('protos.Admin', 'StopServer'): server_admin_pb2.ServerStatus.SerializeToString,
}
method_implementations = {
('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer),
('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import google.protobuf.empty_pb2
import server_admin_pb2
import google.protobuf.empty_pb2
import server_admin_pb2
import google.protobuf.empty_pb2
import server_admin_pb2
request_serializers = {
('protos.Admin', 'GetStatus'): google.protobuf.empty_pb2.Empty.SerializeToString,
('protos.Admin', 'StartServer'): google.protobuf.empty_pb2.Empty.SerializeToString,
('protos.Admin', 'StopServer'): google.protobuf.empty_pb2.Empty.SerializeToString,
}
response_deserializers = {
('protos.Admin', 'GetStatus'): server_admin_pb2.ServerStatus.FromString,
('protos.Admin', 'StartServer'): server_admin_pb2.ServerStatus.FromString,
('protos.Admin', 'StopServer'): server_admin_pb2.ServerStatus.FromString,
}
cardinalities = {
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'StartServer': cardinality.Cardinality.UNARY_UNARY,
'StopServer': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
|
python
|
import tensorflow as tf
from tensorflow.keras.applications import InceptionV3
__all__ = ["inception_score"]
def inception_score(images):
r"""
Args:
images: a numpy array/tensor of images. Shape: NxHxWxC
Return:
inception score
"""
img_shape = images.shape
if img_shape[1] != 299:
images = tf.image.resize(images, size=(299, 299))
assert images.shape[1:] == (299, 299, 3), "images must be of shape 299x299x3"
inception = InceptionV3(weights="imagenet")
predictions = inception(images)
in_scores = []
mean_pred = tf.reduce_mean(predictions, axis=0)
kl_div = tf.keras.losses.KLDivergence()
for i in range(predictions.shape[0]):
in_scores.append(kl_div(mean_pred, predictions[i, :]))
return tf.math.exp(tf.reduce_mean(in_scores)).numpy()
|
python
|
from django.dispatch import receiver
from django.db.models.signals import post_save, post_delete
from django.core.mail import send_mail
from .models import Profile
from django.conf import settings
SUBJECT = 'WELCOME TO DEVCONNECT'
MESSAGE = """ Congratulation I have to say thank you for creating a new account
with our team. we'll definatly try so hard to make you happy with our service """
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_profile(sender, instance, created, **kwargs):
"""Receiver to create profile after user creation"""
if created:
profile = Profile.objects.create(user=instance,
username=instance.username,
email=instance.email,
name=instance.first_name)
send_mail(SUBJECT,
MESSAGE,
[settings.EMAIL_HOST_USER],
[profile.email],
fail_silently=False,
)
@receiver(post_save, sender=Profile)
def update_user(sender, instance, created, **kwargs):
"""Receiver to update user information based on profile information"""
profile = instance
user = profile.user
if not created:
user.first_name = profile.name
user.username = profile.username
user.email = profile.email
user.save()
@receiver(post_delete, sender=Profile)
def delete_profile(sender, instance, **kwargs):
"""Receiver to delete profile after user deletion"""
instance.user.delete()
|
python
|
"""
Setup file for installation of the dataduct code
"""
from setuptools import find_packages
from setuptools import setup
from dataduct import __version__ as version
setup(
name='dataduct',
version=version,
author='Coursera Inc.',
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
namespace_packages=['dataduct'],
include_package_data=True,
url='https://github.com/coursera/dataduct',
long_description=open('README.rst').read(),
author_email='[email protected]',
license='Apache License 2.0',
description='DataPipeline for Humans',
install_requires=[
'boto>=2.38',
'MySQL-python>=1.2.3',
'pandas>=0.14',
'psycopg2>=2.6',
'pyparsing>=1.5.6',
'pytimeparse>=1.1.4',
'PyYAML>=3.11',
'testfixtures>=4.1.2'
],
scripts=['bin/dataduct'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: MacOS :: MacOS 9',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Unix Shell',
'Topic :: Database',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Utilities',
],
)
|
python
|
import re
f = open("regex.txt", "r")
content = f.readlines()
# s = 'A message from [email protected] to [email protected]'
for i in range(len(content)):
if re.findall('[\w\.]+@[\w\.]+', content[i]):
print(content[i], end='')
|
python
|
"""
Command-line interface implementing synthetic MDS Provider data generation:
- custom geographic area, device inventory, time periods
- generates complete "days" of service
- saves data as JSON files to container volume
All fully customizable through extensive parameterization and configuration options.
"""
import argparse
from datetime import datetime, timedelta
import json
import math
import mds
from mds.fake import geometry
from mds.fake.data import random_string
from mds.fake.provider import ProviderDataGenerator
from mds.json import parse_boundary, CustomJsonEncoder
from mds.schema import ProviderSchema
import os
import random
import time
import uuid
def setup_cli():
"""
Create the cli argument interface, and parses incoming args.
Returns a tuple:
- the argument parser
- the parsed args
"""
schema = ProviderSchema(mds.TRIPS)
parser = argparse.ArgumentParser()
parser.add_argument(
"--boundary",
type=str,
help="Path to a data file with geographic bounds for the generated data. Overrides the MDS_BOUNDARY environment variable."
)
parser.add_argument(
"--close",
type=int,
help="The hour of the day (24-hr format) that provider stops operations. Overrides --start and --end."
)
parser.add_argument(
"--date_format",
type=str,
help="Format for datetime input (to this CLI) and output (to stdout and files). Options:\
- 'unix' for Unix timestamps (default)\
- 'iso8601' for ISO 8601 format\
- '<python format string>' for custom formats,\
see https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior"
)
parser.add_argument(
"--devices",
type=int,
help="The number of devices to model in the generated data"
)
parser.add_argument(
"--end",
type=str,
help="The latest event in the generated data, in --date_format format"
)
parser.add_argument(
"--inactivity",
type=float,
help="Describes the portion of the fleet that remains inactive."
)
parser.add_argument(
"--open",
type=int,
help="The hour of the day (24-hr format) that provider begins operations. Overrides --start and --end."
)
parser.add_argument(
"--output",
type=str,
help="Path to a directory to write the resulting data file(s)"
)
parser.add_argument(
"--propulsion_types",
type=str,
nargs="+",
help="A list of propulsion_types to use for the generated data, e.g. '{}'".format(", ".join(schema.propulsion_types()))
)
parser.add_argument(
"--provider_name",
type=str,
help="The name of the fake mobility as a service provider"
)
parser.add_argument(
"--provider_id",
type=uuid.UUID,
help="The ID of the fake mobility as a service provider"
)
parser.add_argument(
"--start",
type=str,
help="The earliest event in the generated data, in --date_format format"
)
parser.add_argument(
"--speed_mph",
type=float,
help="The average speed of devices in miles per hour. Cannot be used with --speed_ms"
)
parser.add_argument(
"--speed_ms",
type=float,
help="The average speed of devices in meters per second. Always takes precedence"
)
parser.add_argument(
"--vehicle_types",
type=str,
nargs="+",
help="A list of vehicle_types to use for the generated data, e.g. '{}'".format(", ".join(schema.vehicle_types()))
)
return parser, parser.parse_args()
if __name__ == "__main__":
T0 = time.time()
parser, args = setup_cli()
print(f"Parsed args: {args}")
try:
boundary_file = args.boundary or os.environ["MDS_BOUNDARY"]
except:
print("A boundary file is required")
exit(1)
# collect the parameters for data generation
provider_name = args.provider_name or f"Provider {random_string(3)}"
provider_id = args.provider_id or uuid.uuid4()
N = args.devices or random.randint(100, 500)
date_format = "unix" if args.date_format is None else args.date_format
encoder = CustomJsonEncoder(date_format=date_format)
date_start = datetime.today()
date_end = date_start
if date_format == "unix":
date_start = datetime.fromtimestamp(
int(args.start)) if args.start else date_start
date_end = datetime.fromtimestamp(
int(args.end)) if args.end else date_end
elif date_format == "iso8601":
date_start = datetime.fromisoformat(
args.start) if args.start else date_start
date_end = datetime.fromisoformat(args.end) if args.end else date_end
else:
date_start = datetime.strptime(
args.start, date_format) if args.start else date_start
date_end = datetime.strptime(
args.end, date_format) if args.end else date_end
hour_open = 7 if args.open is None else args.open
hour_closed = 19 if args.close is None else args.close
inactivity = random.uniform(
0, 0.05) if args.inactivity is None else args.inactivity
# convert speed to meters/second
ONE_MPH_METERSSEC = 0.44704
if args.speed_ms is not None:
speed = args.speed_ms
elif args.speed_mph is not None:
speed = args.speed_mph * ONE_MPH_METERSSEC
else:
speed = random.uniform(8 * ONE_MPH_METERSSEC, 15 * ONE_MPH_METERSSEC)
# setup a data directory
outputdir = "data" if args.output is None else args.output
os.makedirs(outputdir, exist_ok=True)
print(f"Parsing boundary file: {boundary_file}")
t1 = time.time()
boundary = parse_boundary(boundary_file, downloads=outputdir)
print(f"Valid boundary: {boundary.is_valid} ({time.time() - t1} s)")
gen = ProviderDataGenerator(
boundary=boundary,
speed=speed,
vehicle_types=args.vehicle_types,
propulsion_types=args.propulsion_types)
print(f"Generating {N} devices for '{provider_name}'")
t1 = time.time()
devices = gen.devices(N, provider_name, provider_id)
print(f"Generating devices complete ({time.time() - t1} s)")
status_changes, trips = [], []
print(
f"Generating data from {encoder.encode(date_start)} to {encoder.encode(date_end)}")
t1 = time.time()
date = date_start
while(date <= date_end):
formatted_date = encoder.encode(date)
print(
f"Starting day: {formatted_date} (open hours {hour_open} to {hour_closed})")
t2 = time.time()
day_status_changes, day_trips = gen.service_day(
devices, date, hour_open, hour_closed, inactivity)
status_changes.extend(day_status_changes)
trips.extend(day_trips)
date = date + timedelta(days=1)
print(f"Finished day: {formatted_date} ({time.time() - t2} s)")
print(f"Finished generating data ({time.time() - t1} s)")
if len(status_changes) > 0 or len(trips) > 0:
print("Generating data files")
t1 = time.time()
trips_file = os.path.join(outputdir, "trips.json")
print("Writing to:", trips_file)
t2 = time.time()
with open(trips_file, "w") as f:
payload = gen.make_payload(trips=trips)
f.write(encoder.encode(payload))
print(f"Finished ({time.time() - t2} s)")
sc_file = os.path.join(outputdir, "status_changes.json")
print("Writing to:", sc_file)
t2 = time.time()
with open(sc_file, "w") as f:
payload = gen.make_payload(status_changes=status_changes)
f.write(encoder.encode(payload))
print(f"Finished ({time.time() - t2} s)")
print(f"Generating data files complete ({time.time() - t1} s)")
print(f"Data generation complete ({time.time() - T0} s)")
|
python
|
class Cache:
# private with __
__store = {}
# THIS BLOCK demo for PROTECTED if required - Single _
# _protected_store = {}
#
# @property
# def protected_store(self):
# return self._protected_store
def get(self, key):
if key:
try:
value_for_key = self.__store[key]
except KeyError:
return "NOT_FOUND"
else:
return f"Get for {key}={value_for_key}"
else:
return "INVALID_KEY"
def set(self, key, value):
if key:
self.__store[key] = value
return f"VALUE SET {key}={value}"
else:
return "INVALID_KEY"
cache = Cache()
print(cache.get(1))
print(cache.set(1, "This is the first data inserted"))
print(cache.set(2, 567))
print(cache.get(1))
print(cache.get(2))
print(cache.set(1, None))
print(cache.get(1))
# print(cache.__store) # AttributeError: 'Cache' object has no attribute '__store'
# print(cache._protected_store) # Warning to convert to property. After PyCharm add property:
# print(cache.protected_store) # Now works because of the property
|
python
|
b = "Hello, World!"
print(b[:5])
|
python
|
#!/usr/bin/python3
import argparse
import os
import random
import logging
import tensorflow as tf
import numpy as np
from PIL import Image
from unet import UNet, Discriminator
from scripts.image_manips import resize
model_name = "matting"
logging.basicConfig(level=logging.INFO)
# Parse Arguments
def parse_args():
parser = argparse.ArgumentParser(description="Trains the unet")
parser.add_argument("data", type=str,
help="Path to a folder containing data to train")
parser.add_argument("--lr", type=float, default=1.0,
help="Learning rate used to optimize")
parser.add_argument("--d_coeff", type=float, default=1.0,
help="Discriminator loss coefficient")
parser.add_argument("--gen_epoch", type=int, default=4,
help="Number of training epochs")
parser.add_argument("--disc_epoch", type=int, default=1,
help="Number of training epochs")
parser.add_argument("--adv_epoch", type=int, default=5,
help="Number of training epochs")
parser.add_argument("--batch_size", dest="batch_size", type=int, default=4,
help="Size of the batches used in training")
parser.add_argument('--checkpoint', type=int, default=None,
help='Saved session checkpoint, -1 for latest.')
parser.add_argument('--logdir', default="log/" + model_name,
help='Directory where logs should be written.')
return parser.parse_args()
def apply_trimap(images, output, alpha):
masked_output = []
for channel in range(4):
masked_output.append(output[:,:,:,channel])
masked_output[channel] = tf.where(alpha < 0.25, images[:,:,:,channel], masked_output[channel])
masked_output[channel] = tf.where(alpha > 0.75, images[:,:,:,channel], masked_output[channel])
masked_output[channel] = masked_output[channel]
masked_output = tf.stack(masked_output, 3)
return masked_output
def main(args):
input_path = os.path.join(args.data, "input")
trimap_path = os.path.join(args.data, "trimap")
target_path = os.path.join(args.data, "target")
output_path = os.path.join(args.data, "output")
train_data_update_freq = args.batch_size
test_data_update_freq = 50*args.batch_size
sess_save_freq = 100*args.batch_size
if not os.path.isdir(output_path):
os.makedirs(output_path)
if not os.path.isdir(args.logdir):
os.makedirs(args.logdir)
ids = [[int(i) for i in os.path.splitext(filename)[0].split('_')] for filename in os.listdir(input_path)]
np.random.shuffle(ids)
split_point = int(round(0.85*len(ids))) #using 70% as training and 30% as Validation
train_ids = tf.get_variable('train_ids', initializer=ids[0:split_point], trainable=False)
valid_ids = tf.get_variable('valid_ids', initializer=ids[split_point:len(ids)], trainable=False)
global_step = tf.get_variable('global_step', initializer=0, trainable=False)
g_iter = int(args.gen_epoch * int(train_ids.shape[0]))
d_iter = int(args.disc_epoch * int(train_ids.shape[0]))
a_iter = int(args.adv_epoch * int(train_ids.shape[0]))
n_iter = g_iter+d_iter+a_iter
input_images = tf.placeholder(tf.float32, shape=[None, 480, 360, 4])
target_images = tf.placeholder(tf.float32, shape=[None, 480, 360, 4])
alpha = target_images[:,:,:,3][..., np.newaxis]
with tf.variable_scope("Gen"):
gen = UNet(4,4)
output = tf.sigmoid(gen(input_images))
g_loss = tf.losses.mean_squared_error(target_images, output)
with tf.variable_scope("Disc"):
disc = Discriminator(4)
d_real = disc(target_images)
d_fake = disc(output)
d_loss = tf.reduce_mean(tf.log(d_real) + tf.log(1-d_fake))
a_loss = g_loss + args.d_coeff * d_loss
g_loss_summary = tf.summary.scalar("g_loss", g_loss)
d_loss_summary = tf.summary.scalar("d_loss", d_loss)
a_loss_summary = tf.summary.scalar("a_loss", a_loss)
summary_op = tf.summary.merge(
[g_loss_summary, d_loss_summary, a_loss_summary])
summary_image = tf.summary.image("result", output)
g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Gen')
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Disc')
g_optimizer = tf.train.AdadeltaOptimizer(args.lr).minimize(g_loss, global_step=global_step, var_list=g_vars)
a_optimizer = tf.train.AdadeltaOptimizer(args.lr).minimize(a_loss, global_step=global_step, var_list=g_vars)
d_optimizer = tf.train.AdadeltaOptimizer(args.lr).minimize(-d_loss, global_step=global_step, var_list=d_vars)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
train_writer = tf.summary.FileWriter(args.logdir + '/train')
test_writer = tf.summary.FileWriter(args.logdir + '/test')
saver = tf.train.Saver()
if args.checkpoint is not None and os.path.exists(os.path.join(args.logdir, 'checkpoint')):
if args.checkpoint == -1:#latest checkpoint
saver.restore(sess, tf.train.latest_checkpoint(args.logdir))
else:#Specified checkpoint
saver.restore(sess, os.path.join(args.logdir, model_name+".ckpt-"+str(args.checkpoint)))
logging.debug('Model restored to step ' + str(global_step.eval(sess)))
train_ids = list(train_ids.eval(sess))
valid_ids = list(valid_ids.eval(sess))
def load_batch(batch_ids):
images, targets = [], []
for i, j in batch_ids:
input_filename = os.path.join(input_path, str(i) + '_' + str(j) + '.jpg')
trimap_filename = os.path.join(trimap_path, str(i) + '_trimap.jpg')
target_filename = os.path.join(target_path, str(i) + '.png')
logging.debug(input_filename)
logging.debug(trimap_filename)
logging.debug(target_filename)
image = resize(Image.open(input_filename), 2)
trimap = resize(Image.open(trimap_filename), 2)
target = resize(Image.open(target_filename), 2)
image = np.array(image)
trimap = np.array(trimap)[..., np.newaxis]
image = np.concatenate((image, trimap), axis = 2).astype(np.float32) / 255
target = np.array(target).astype(np.float32) / 255
images.append(image)
targets.append(target)
return np.asarray(images), np.asarray(targets)
def test_step(batch_idx, summary_fct):
batch_range = random.sample(train_ids, args.batch_size)
images, targets = load_batch(batch_range)
loss, demo, summary = sess.run([g_loss, summary_image, summary_fct], feed_dict={
input_images: images,
target_images: targets,
})
test_writer.add_summary(summary, batch_idx)
test_writer.add_summary(demo, batch_idx)
logging.info('Validation Loss: {:.8f}'.format(loss))
try:
batch_idx = 0
while batch_idx < n_iter:
batch_idx = global_step.eval(sess) * args.batch_size
loss_fct = None
label = None
optimizers = []
if batch_idx < g_iter:
loss_fct = g_loss
summary_fct = g_loss_summary
label = 'Gen train'
optimizers = [g_optimizer]
elif batch_idx < g_iter+d_iter:
loss_fct = d_loss
summary_fct = d_loss_summary
label = 'Disc train'
optimizers = [d_optimizer]
else:
loss_fct = a_loss
summary_fct = summary_op
label = 'Adv train'
optimizers = [a_optimizer]
batch_range = random.sample(train_ids, args.batch_size)
images, targets = load_batch(batch_range)
loss, summary = sess.run([loss_fct, summary_fct] + optimizers, feed_dict={
input_images: np.array(images),
target_images: np.array(targets)})[0:2]
if batch_idx % train_data_update_freq == 0:
logging.info('{}: [{}/{} ({:.0f}%)]\tGen Loss: {:.8f}'.format(label, batch_idx, n_iter,
100. * (batch_idx+1) / n_iter, loss))
train_writer.add_summary(summary, batch_idx)
if batch_idx % test_data_update_freq == 0:
test_step(batch_idx, summary_fct)
if batch_idx % sess_save_freq == 0:
logging.debug('Saving model')
saver.save(sess, os.path.join(args.logdir, model_name+".ckpt"), global_step=batch_idx)
except Exception:
saver.save(sess, os.path.join(args.logdir, 'crash_save_'+model_name+".ckpt"), global_step=batch_idx)
saver.save(sess, os.path.join(args.logdir, model_name+".ckpt"), global_step=batch_idx)
if __name__ == '__main__':
args = parse_args()
main(args)
|
python
|
from setuptools import setup
setup(name='socialsent',
version='0.1.2',
description='Algorithms for inducing domain-specific sentiment lexicons from unlabeled corpora.',
url='https://github.com/williamleif/socialsent',
author='William Hamilton',
author_email='[email protected]',
license='Apache Version 2',
packages=['socialsent'],
install_requires = ['numpy',
'keras==0.3',
'sklearn',
'theano'],
package_data= {'socialsent' : ['data/lexicons/*.json']},
zip_safe=False)
|
python
|
########################################################################
# SwarmOps - Heuristic optimization for Python.
# Copyright (C) 2003-2016 Magnus Erik Hvass Pedersen.
# See the file README.md for instructions.
# See the file LICENSE.txt for license details.
# SwarmOps on the internet: http://www.Hvass-Labs.org/
########################################################################
########################################################################
# Provides logging of best solutions found for an optimization problem,
# so we get a list of good solutions rather than just a single solution.
########################################################################
import numpy as np
########################################################################
class _LogElement:
"""
Used for storing parameters and associated fitness in the log.
"""
def __init__(self, x=None, fitness=np.Infinity):
"""
Create object instance.
:param x: Position in the search-space aka. candidate solution.
:param fitness: Associated fitness of the position in the search-space.
:return: Object instance.
"""
# Copy arguments to instance variables.
self.x = x
self.fitness = fitness
########################################################################
class LogSolutions:
"""
Transparently wraps a Problem-object and provides logging of the
best solutions found. This allows us to get a list of good
solutions for a problem rather than just the single best solution.
"""
def __init__(self, problem, capacity=20):
"""
Create object instance.
:param problem: Instance of the Problem-class to be wrapped.
:param capacity: Capacity of the log, default 20.
:return: Object instance.
"""
# Copy all the attributes of the problem-object to self. This essentially
# wraps the problem-object and makes e.g. self.dim the same as problem.dim, etc.
# Note that a shallow copy is being made, otherwise self.__dict__
# would be a reference to problem.__dict__ which would cause the
# original problem-object to become modified e.g. with self.capacity, etc.
self.__dict__ = problem.__dict__.copy()
# Copy arguments to instance variables.
self.problem = problem
self.capacity = capacity
# Initialize log with empty solutions.
self.solutions = [_LogElement() for i in range(capacity)]
def fitness(self, x, limit=np.Infinity):
"""
Wraps the fitness-function of the actual problem, whose results
are logged if the fitness is an improvement.
"""
# Calculate fitness of the actual problem.
new_fitness = self.problem.fitness(x=x, limit=limit)
# If log is desired.
if self.capacity > 0:
# If the new fitness is an improvement over the worst-known fitness in the log.
if new_fitness < self.solutions[-1].fitness:
# Update the worst solution in the log with the new solution.
self.solutions[-1] = _LogElement(x=x, fitness=new_fitness)
# Sort the logged solutions.
self.solutions = sorted(self.solutions, key=lambda solution: solution.fitness)
return new_fitness
########################################################################
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mar 23 11:57 2017
@author: Denis Tome'
"""
__all__ = [
'VISIBLE_PART',
'MIN_NUM_JOINTS',
'CENTER_TR',
'SIGMA',
'STRIDE',
'SIGMA_CENTER',
'INPUT_SIZE',
'OUTPUT_SIZE',
'NUM_JOINTS',
'NUM_OUTPUT',
'H36M_NUM_JOINTS',
'JOINT_DRAW_SIZE',
'LIMB_DRAW_SIZE'
]
# threshold
VISIBLE_PART = 1e-3
MIN_NUM_JOINTS = 5
CENTER_TR = 0.4
# net attributes
SIGMA = 7
STRIDE = 8
SIGMA_CENTER = 21
INPUT_SIZE = 368
OUTPUT_SIZE = 46
NUM_JOINTS = 14
NUM_OUTPUT = NUM_JOINTS + 1
H36M_NUM_JOINTS = 17
# draw options
JOINT_DRAW_SIZE = 3
LIMB_DRAW_SIZE = 2
NORMALISATION_COEFFICIENT = 1280*720
# test options
BATCH_SIZE = 4
|
python
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ObjectiveAssessmentsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_objective_assessment_by_id(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_objective_assessment_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_objective_assessment_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_objective_assessment_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_objective_assessment_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Deletes an existing resource using the resource identifier. # noqa: E501
The DELETE operation is used to delete an existing resource by identifier. If the resource doesn't exist, an error will result (the resource will not be found). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_objective_assessment_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_match: The ETag header value used to prevent the DELETE from removing a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_objective_assessment_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_objective_assessment_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/objectiveAssessments/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_objective_assessments(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_objective_assessments(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:return: list[EdFiObjectiveAssessment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_objective_assessments_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.deletes_objective_assessments_with_http_info(**kwargs) # noqa: E501
return data
def deletes_objective_assessments_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves deleted resources based on change version. # noqa: E501
The DELETES operation is used to retrieve deleted resources. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_objective_assessments_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:return: list[EdFiObjectiveAssessment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_objective_assessments" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_objective_assessments`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `deletes_objective_assessments`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/objectiveAssessments/deletes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiObjectiveAssessment]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_objective_assessments(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_objective_assessments(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str identification_code: A unique number or alphanumeric code assigned to a space, room, site, building, individual, organization, program, or institution by a school, school system, a state, or other agency or entity.
:param str assessment_identifier: A unique number or alphanumeric code assigned to an assessment.
:param str namespace: Namespace for the Assessment.
:param str parent_identification_code: A unique number or alphanumeric code assigned to a space, room, site, building, individual, organization, program, or institution by a school, school system, a state, or other agency or entity.
:param str academic_subject_descriptor: The subject area of the objective assessment.
:param str description: The description of the ObjectiveAssessment (e.g., vocabulary, measurement, or geometry).
:param str id:
:param float max_raw_score: The maximum raw score achievable across all assessment items that are correct and scored at the maximum.
:param str nomenclature: Reflects the specific nomenclature used for this level of ObjectiveAssessment.
:param float percent_of_assessment: The percentage of the Assessment that tests this objective.
:return: list[EdFiObjectiveAssessment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_objective_assessments_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_objective_assessments_with_http_info(**kwargs) # noqa: E501
return data
def get_objective_assessments_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_objective_assessments_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param int min_change_version: Used in synchronization to set sequence minimum ChangeVersion
:param int max_change_version: Used in synchronization to set sequence maximum ChangeVersion
:param bool total_count: Indicates if the total number of items available should be returned in the 'Total-Count' header of the response. If set to false, 'Total-Count' header will not be provided.
:param str identification_code: A unique number or alphanumeric code assigned to a space, room, site, building, individual, organization, program, or institution by a school, school system, a state, or other agency or entity.
:param str assessment_identifier: A unique number or alphanumeric code assigned to an assessment.
:param str namespace: Namespace for the Assessment.
:param str parent_identification_code: A unique number or alphanumeric code assigned to a space, room, site, building, individual, organization, program, or institution by a school, school system, a state, or other agency or entity.
:param str academic_subject_descriptor: The subject area of the objective assessment.
:param str description: The description of the ObjectiveAssessment (e.g., vocabulary, measurement, or geometry).
:param str id:
:param float max_raw_score: The maximum raw score achievable across all assessment items that are correct and scored at the maximum.
:param str nomenclature: Reflects the specific nomenclature used for this level of ObjectiveAssessment.
:param float percent_of_assessment: The percentage of the Assessment that tests this objective.
:return: list[EdFiObjectiveAssessment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'min_change_version', 'max_change_version', 'total_count', 'identification_code', 'assessment_identifier', 'namespace', 'parent_identification_code', 'academic_subject_descriptor', 'description', 'id', 'max_raw_score', 'nomenclature', 'percent_of_assessment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_objective_assessments" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_objective_assessments`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_objective_assessments`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and ('identification_code' in params and
len(params['identification_code']) > 60):
raise ValueError("Invalid value for parameter `identification_code` when calling `get_objective_assessments`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('assessment_identifier' in params and
len(params['assessment_identifier']) > 60):
raise ValueError("Invalid value for parameter `assessment_identifier` when calling `get_objective_assessments`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('namespace' in params and
len(params['namespace']) > 255):
raise ValueError("Invalid value for parameter `namespace` when calling `get_objective_assessments`, length must be less than or equal to `255`") # noqa: E501
if self.api_client.client_side_validation and ('parent_identification_code' in params and
len(params['parent_identification_code']) > 60):
raise ValueError("Invalid value for parameter `parent_identification_code` when calling `get_objective_assessments`, length must be less than or equal to `60`") # noqa: E501
if self.api_client.client_side_validation and ('academic_subject_descriptor' in params and
len(params['academic_subject_descriptor']) > 306):
raise ValueError("Invalid value for parameter `academic_subject_descriptor` when calling `get_objective_assessments`, length must be less than or equal to `306`") # noqa: E501
if self.api_client.client_side_validation and ('description' in params and
len(params['description']) > 1024):
raise ValueError("Invalid value for parameter `description` when calling `get_objective_assessments`, length must be less than or equal to `1024`") # noqa: E501
if self.api_client.client_side_validation and ('nomenclature' in params and
len(params['nomenclature']) > 35):
raise ValueError("Invalid value for parameter `nomenclature` when calling `get_objective_assessments`, length must be less than or equal to `35`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'min_change_version' in params:
query_params.append(('minChangeVersion', params['min_change_version'])) # noqa: E501
if 'max_change_version' in params:
query_params.append(('maxChangeVersion', params['max_change_version'])) # noqa: E501
if 'total_count' in params:
query_params.append(('totalCount', params['total_count'])) # noqa: E501
if 'identification_code' in params:
query_params.append(('identificationCode', params['identification_code'])) # noqa: E501
if 'assessment_identifier' in params:
query_params.append(('assessmentIdentifier', params['assessment_identifier'])) # noqa: E501
if 'namespace' in params:
query_params.append(('namespace', params['namespace'])) # noqa: E501
if 'parent_identification_code' in params:
query_params.append(('parentIdentificationCode', params['parent_identification_code'])) # noqa: E501
if 'academic_subject_descriptor' in params:
query_params.append(('academicSubjectDescriptor', params['academic_subject_descriptor'])) # noqa: E501
if 'description' in params:
query_params.append(('description', params['description'])) # noqa: E501
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
if 'max_raw_score' in params:
query_params.append(('maxRawScore', params['max_raw_score'])) # noqa: E501
if 'nomenclature' in params:
query_params.append(('nomenclature', params['nomenclature'])) # noqa: E501
if 'percent_of_assessment' in params:
query_params.append(('percentOfAssessment', params['percent_of_assessment'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/objectiveAssessments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EdFiObjectiveAssessment]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_objective_assessments_by_id(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_objective_assessments_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:return: EdFiObjectiveAssessment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_objective_assessments_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_objective_assessments_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_objective_assessments_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_objective_assessments_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:return: EdFiObjectiveAssessment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_none_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_objective_assessments_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_objective_assessments_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/objectiveAssessments/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdFiObjectiveAssessment', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_objective_assessment(self, objective_assessment, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error (you must use a PUT operation to update a resource by \"id\"). The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_objective_assessment(objective_assessment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiObjectiveAssessment objective_assessment: The JSON representation of the \"objectiveAssessment\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_objective_assessment_with_http_info(objective_assessment, **kwargs) # noqa: E501
else:
(data) = self.post_objective_assessment_with_http_info(objective_assessment, **kwargs) # noqa: E501
return data
def post_objective_assessment_with_http_info(self, objective_assessment, **kwargs): # noqa: E501
"""Creates or updates resources based on the natural key values of the supplied resource. # noqa: E501
The POST operation can be used to create or update resources. In database terms, this is often referred to as an \"upsert\" operation (insert + update). Clients should NOT include the resource \"id\" in the JSON body because it will result in an error (you must use a PUT operation to update a resource by \"id\"). The web service will identify whether the resource already exists based on the natural key values provided, and update or create the resource appropriately. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_objective_assessment_with_http_info(objective_assessment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EdFiObjectiveAssessment objective_assessment: The JSON representation of the \"objectiveAssessment\" resource to be created or updated. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['objective_assessment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_objective_assessment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'objective_assessment' is set
if self.api_client.client_side_validation and ('objective_assessment' not in params or
params['objective_assessment'] is None): # noqa: E501
raise ValueError("Missing the required parameter `objective_assessment` when calling `post_objective_assessment`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'objective_assessment' in params:
body_params = params['objective_assessment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/objectiveAssessments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put_objective_assessment(self, id, objective_assessment, **kwargs): # noqa: E501
"""Updates or creates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update or create a resource by identifier. If the resource doesn't exist, the resource will be created using that identifier. Additionally, natural key values cannot be changed using this operation, and will not be modified in the database. If the resource \"id\" is provided in the JSON body, it will be ignored as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_objective_assessment(id, objective_assessment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiObjectiveAssessment objective_assessment: The JSON representation of the \"objectiveAssessment\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_objective_assessment_with_http_info(id, objective_assessment, **kwargs) # noqa: E501
else:
(data) = self.put_objective_assessment_with_http_info(id, objective_assessment, **kwargs) # noqa: E501
return data
def put_objective_assessment_with_http_info(self, id, objective_assessment, **kwargs): # noqa: E501
"""Updates or creates a resource based on the resource identifier. # noqa: E501
The PUT operation is used to update or create a resource by identifier. If the resource doesn't exist, the resource will be created using that identifier. Additionally, natural key values cannot be changed using this operation, and will not be modified in the database. If the resource \"id\" is provided in the JSON body, it will be ignored as well. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_objective_assessment_with_http_info(id, objective_assessment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param EdFiObjectiveAssessment objective_assessment: The JSON representation of the \"objectiveAssessment\" resource to be created or updated. (required)
:param str if_match: The ETag header value used to prevent the PUT from updating a resource modified by another consumer.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'objective_assessment', 'if_match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_objective_assessment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `put_objective_assessment`") # noqa: E501
# verify the required parameter 'objective_assessment' is set
if self.api_client.client_side_validation and ('objective_assessment' not in params or
params['objective_assessment'] is None): # noqa: E501
raise ValueError("Missing the required parameter `objective_assessment` when calling `put_objective_assessment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'if_match' in params:
header_params['If-Match'] = params['if_match'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'objective_assessment' in params:
body_params = params['objective_assessment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/objectiveAssessments/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
python
|
# -*- coding: utf-8 -*-
__doc__="返回选择物体的类型"
import rpw
from rpw import revit, DB, UI,db,doc
from System.Collections.Generic import List
import json
import subprocess as sp
#from MyLib.Adaptor import BAT_Wall
from Adaptor import BAT_ElementMaterial
from Helper import *
#根据Accessmbly Code 筛选构件
title="根据Accessmbly筛选构件价格"
description="根据Accessmbly筛选构件价格"
value=rpw.ui.forms.TextInput(title, default=None, description=description, sort=True, exit_on_close=True)
Accessmbly_Code=value
class GetElementByAccessmblyCode():
def __init__(self,Accessmbly_Code):
self.Accessmbly_Code=Accessmbly_Code
def GetAllElement(self):
param_id = DB.ElementId(DB.BuiltInParameter.UNIFORMAT_CODE )
param_id2 = DB.ElementId(DB.BuiltInParameter.ALL_MODEL_TYPE_MARK )
parameter_filter = rpw.db.ParameterFilter(param_id, begins=self.Accessmbly_Code)
parameter_filter2=rpw.db.ParameterFilter(param_id2,equals='外墙1')
collector2 =rpw.db.Collector(parameter_filter=parameter_filter2,is_type=False)
collector =rpw.db.Collector(parameter_filter=parameter_filter,is_type=False).wrapped_elements
return collector
def GetAllElementCost(self):
cost=[]
for i in self.GetAllElement():
symbol=i.type
cost.append(symbol.parameters['Cost'].value)
return cost
def GetAllElementquantity(self):
quantity=[]
for i in self.GetAllElement():
try:
quantity.append(CovertToM3(i.parameters['Volume'].value))
except:
quantity.append(CovertToM2(i.parameters['Area'].value))
return quantity
def GetTotalCost(self):
singlecost=self.GetAllElementCost()
singleQuantity=self.GetAllElementquantity()
def add(a,b):
return a*b
_result=map(add,singlecost,singleQuantity)
print(sum(_result))
result=round((sum(_result)/10000),2)
return result*1.07
Element=GetElementByAccessmblyCode(Accessmbly_Code)
print(Element.GetAllElement())
OneWall=Element.GetAllElement()[0].unwrap()
MyWall=BAT_ElementMaterial.BAT_ElementMaterial(OneWall)
name=MyWall.GetMaterialAreaWithName()
print(name)
#for i in name:
# print(i['name'])
print("万元")
|
python
|
#!/usr/bin/env python
'''
This script will generate a convenient <.csv> detailing the nucleotide composition of
sequences in fasta format. This <.csv> may be imported and merged with other <.csv>s to build
a comprehensive data sheet.
'''
#Imports
from collections import Counter
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
import argparse
import glob
#Functions
def read_in_fasta(afasta):
'''Reads in a fasta file to a dictionary'''
fasta_dict = {}
fasta_sequences,fasta_dict = SeqIO.parse(open(afasta),'fasta'),{}
for fasta in fasta_sequences:
fasta_dict[fasta.id] = str(fasta.seq)
return fasta_dict
def generate_comp(fasta_dictonary):
'''generates statistics for a fasta dictionary'''
out = {}
for name,sequence in fasta_dictonary.items():
nucleotide_count = Counter(sequence)
total_nucleotides = sum(nucleotide_count.values())
GC_content = (nucleotide_count['G']+nucleotide_count['C'])/float(total_nucleotides)
AT_content = (nucleotide_count['A']+nucleotide_count['T'])/float(total_nucleotides)
AC_content = (nucleotide_count['A']+nucleotide_count['C'])/float(total_nucleotides)
A_content = (nucleotide_count['A'])/float(total_nucleotides)
C_content = (nucleotide_count['C'])/float(total_nucleotides)
G_content = (nucleotide_count['G'])/float(total_nucleotides)
T_content = (nucleotide_count['T'])/float(total_nucleotides)
entire_length = len(sequence)
out[name] = [name,GC_content,AT_content,AC_content,A_content,C_content,G_content,T_content,entire_length]
return out
def write_out_csv(composition_dictionary,out_fyle='fasta_comp.csv'):
'''Writes out the data as a csv. This csv may be combined with others based on transcript'''
with open(out_fyle,'w') as g:
#Write Header
g.write(','.join(['transcript','GC_content','AT_content','AC_content',
'A_content','C_content','G_content','T_content','length'])+'\n')
#Write Data
for data in composition_dictionary.values():
g.write(','.join([str(x) for x in data])+'\n')
def main():
parser = argparse.ArgumentParser(description='Creates <.csv> of sequence compositions from <.fasta> files in the directory')
parser.add_argument('-single',default = None, help = 'Operate on this single file')
parser.add_argument('-suffix',type=str,default='composition', help='[default = composition] <.csv> file suffix')
args = parser.parse_args()
if args.single != None:
sequences = read_in_fasta(args.single)
composition = generate_comp(sequences)
new_name = args.single.split('.')[0]+'_'+args.suffix+'.csv'
write_out_csv(composition,new_name)
else:
all_fastas = [derp for herp in [glob.glob(x) for x in ['*.fa','*.fasta','*.fas']] for derp in herp]
for fyle in all_fastas:
sequences = read_in_fasta(fyle)
composition = generate_comp(sequences)
new_name = fyle.split('.')[0]+'_'+args.suffix+'.csv'
write_out_csv(composition,new_name)
if __name__ == '__main__':
main()
|
python
|
#7. Desarrolle el programa que determine el área y
# el perímetro de un rectángulo sabiendo que el área = b x h, perímetro = 2x (b+h).
B=float(input("Ingresa el B: "))
H=float(input("Ingresa el H: "))
area=(B*H)
perim=2*(B+H)
print("El Area es : ",area)
print("El perimetro es : ", perim)
|
python
|
from typing import List
from webdnn.backend.code_generator.allocator import MemoryLayout
from webdnn.backend.fallback.generator import FallbackDescriptorGenerator
from webdnn.backend.fallback.kernel import Kernel
from webdnn.graph.operators.concat import Concat
source = """
concat: function(input_arrays, output_arrays, option) {
var xs = input_arrays;
var y = output_arrays[0];
var shapes = option.x_shapes;
var strides = option.x_strides;
var offsets = option.x_offsets;
var x;
var offset;
var stride;
var shape;
var position;
for (var i = 0; i < xs.length; i++) {
x = xs[i];
offset = offsets[i];
stride = strides[i];
shape = shapes[i];
position = [];
for (var j = 0; j < shape.length; j++) {
position[j] = 0;
}
do {
y[offset + dot(stride, position)] = get(x, shape, position);
} while (increment(shape, position))
}
function dot(a, b) {
var sum = 0;
for (var i = 0; i < a.length; i++) {
sum += a[i] * b[i];
}
return sum;
}
function get(x, shape, position) {
var i = 0;
for (var j = 0; j < shape.length; j++) {
i = (i * shape[j]) + position[j];
}
return x[i];
}
function increment(shape, position) {
var d = shape.length - 1;
position[d]++;
while (position[d] === shape[d]) {
if (d == 0) return false;
position[d] -= shape[d];
d--;
position[d]++;
}
return true;
}
},
"""
# noinspection PyUnusedLocal
@FallbackDescriptorGenerator.register_handler(Concat)
def concat(op: Concat, memory_layout: MemoryLayout) -> List[Kernel]:
xs = [op.inputs[f"x{i}"] for i in range(len(op.inputs))]
y = op.outputs["y"]
target_axis = op.axis
x_shapes = [x.shape for x in xs]
y_strides = []
stride = 1
for s in reversed(y.shape):
y_strides.insert(0, stride)
stride *= s
# x_strides[i][j] is stride size of xs[i].order.axes[j] in y
x_strides = [[] for _ in xs]
for x, strides in zip(xs, x_strides):
for axis in x.order.axes:
strides.append(y_strides[y.order.axes_dict[axis]])
# x_offsets[i] is memory offset of xs[i]'s data in y.
x_offsets = []
target_axis_offset = 0
for x in xs:
x_offsets.append(target_axis_offset * y_strides[y.order.axes_dict[target_axis]])
target_axis_offset += x.shape_dict[target_axis]
# (destination address of xs[i][d_0, ..., d_n]) = x_offsets[i] + x_strides[i][0] * d_0 + ... + x_strides[i][n] * d_n
kernel = Kernel(
{"concat": source},
"concat",
inputs=[memory_layout[x] for x in xs],
outputs=[memory_layout[y]],
call_option={"x_shapes": x_shapes,
"x_strides": x_strides,
"x_offsets": x_offsets}
)
return [kernel]
|
python
|
import logging
import sys
import signal
import time
import os
from networktables import NetworkTables
import csv
from Adafruit_BNO055 import BNO055
import RPi.GPIO as GPIO
NetworkTables.initialize()
nt = NetworkTables.getTable("IMU_Data")
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(17, GPIO.OUT)
GPIO.output(17, GPIO.LOW)
# Create and configure the BNO sensor connection.
# Raspberry Pi configuration with serial UART and RST connected to GPIO 18:
bno = BNO055.BNO055(serial_port='/dev/serial0', rst=18)
# Enable verbose debug logging if -v is passed as a parameter.
if len(sys.argv) == 2 and sys.argv[1].lower() == '-v':
logging.basicConfig(level=logging.DEBUG)
# Initialize the BNO055 and stop if something went wrong.
if not bno.begin():
raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
# Print BNO055 software revision and other diagnostic data.
sw, bl, accel, mag, gyro = bno.get_revision()
print('Software version: {0}'.format(sw))
print('Bootloader version: {0}'.format(bl))
print('Accelerometer ID: 0x{0:02X}'.format(accel))
print('Magnetometer ID: 0x{0:02X}'.format(mag))
print('Gyroscope ID: 0x{0:02X}\n'.format(gyro))
iterations = 0
data_file = open("data.csv", mode='w')
data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
input("Press Enter to continue ...")
print('Reading BNO055 data, press Ctrl-C to quit...')
def power_off(sig, frame):
GPIO.output(17, GPIO.LOW)
sys.exit(0)
signal.signal(signal.SIGINT, power_off)
GPIO.output(17, GPIO.HIGH)
start = time.time()
onTime = 0
offTime = 0
ledState = GPIO.LOW
prevTime = 0
while True:
t = time.time() - start
# Read the Euler angles for heading, roll, pitch (all in degrees).
heading, roll, pitch = bno.read_euler()
# Read the calibration status, 0=uncalibrated and 3=fully calibrated.
sys, gyro, accel, mag = bno.get_calibration_status()
# Other values you can optionally read:
# Orientation as a quaternion:
q_x, q_y, q_z, q_w = bno.read_quaternion()
# Sensor temperature in degrees Celsius:
# temp_c = bno.read_temp()
# Magnetometer data (in micro-Teslas):
mag_x, mag_y, mag_z = bno.read_magnetometer()
# Gyroscope data (in degrees per second):
# x,y,z = bno.read_gyroscope()
# Accelerometer data (in meters per second squared):
accel_x, accel_y, accel_z = bno.read_accelerometer()
# Linear acceleration data (i.e. acceleration from movement, not gravity--
# returned in meters per second squared):
lin_accel_x, lin_accel_y, lin_accel_z = bno.read_linear_acceleration()
# Gravity acceleration data (i.e. acceleration just from gravity--returned
# in meters per second squared):
g_x, g_y, g_z = bno.read_gravity()
if sys >= 3:
data_writer.writerow([t, heading, roll, pitch, q_x, q_y, q_z, q_w, g_x, g_y, g_z, mag_x, mag_y, mag_z])
# Write data to the Network Table
nt.putNumber('time', t)
nt.putNumber('mag_x', mag_x)
nt.putNumber('mag_y', mag_y)
nt.putNumber('mag_z', mag_z)
nt.putNumber('g_x', g_x)
nt.putNumber('g_y', g_y)
nt.putNumber('g_z', g_z)
print('Heading={0:0.2F} Roll={1:0.2F} Pitch={2:0.2F} mag_x={3:0.2F} mag_y={4:0.2F} mag_z={5:0.2F}'.format(
heading, roll, pitch, mag_x, mag_y, mag_z))
else:
# Display Calibration Data Continuously until system is Calibrated
os.system("clear")
currentTime = time.time()
print("Calibrating ...")
print('sys: {0} gyro: {1} accel: {2} mag: {3}'.format(sys, gyro, accel, mag))
# Update LED state
if (currentTime - prevTime) >= 0.25:
prevTime = time.time()
if ledState == GPIO.LOW:
ledState = GPIO.HIGH
else:
ledState = GPIO.LOW
GPIO.output(17, ledState)
|
python
|
#2. Sorteie 20 inteiros entre 1 e 100 numa lista. Armazene os números
#pares na lista PAR e os números ímpares na lista IMPAR. Imprima as três listas.
import random
num = 0
controle = 0
pares = []
impares = []
todos = []
a , b = 0 , 0
while(controle <=19):
num = random.randrange(1,100)
todos.insert(controle,num)
print(num)
if(num % 2 == 0) and (num > 0):
pares.insert(a,num)
a += 1
elif(num > 0):
impares.insert(b,num)
b += 1
controle += 1
print("A lista de todos os números: ", todos)
print("\nOs números pares são: ", pares)
print("\nOs números impares são: ", impares)
input("Pressione enter para sair")
exit()#Saindo do sistema com segurança
|
python
|
from material.frontend import Module
class Sample(Module):
icon = 'mdi-image-compare'
|
python
|
# -*- coding: utf-8 -*-
"""Submit a test calculation on localhost.
Usage: verdi run submit.py
"""
from __future__ import absolute_import
from __future__ import print_function
import os
from aiida_ce import tests, helpers
from aiida.plugins import DataFactory, CalculationFactory
from aiida.engine import run
# get code
computer = helpers.get_computer()
code = helpers.get_code(entry_point='ce.train', computer=computer)
# Prepare input parameters
ClusterSpaceData = DataFactory('ce.cluster')
StructureSet = DataFactory('ce.structures')
prim = bulk('Ag')
cs_dict = {
'cell': prim.cell.tolist(),
'positions': prim.positions.tolist(),
'pbc': prim.pbc.tolist(),
'cutoffs': [13.5, 6.0],
'chemical_symbols': [['Ag', 'Pd']]
}
cs = ClusterSpaceData(cs_dict)
db_file = os.path.join(tests.TEST_DIR, 'input_files', 'ref.db')
db = connect(db_file)
structurelist = [row.toatoms() for row in db.select('natoms<=8')]
energies = [row.mixing_energy for row in db.select('natoms<=8')]
structures = StructureSet(structurelist=structurelist)
structures.set_energies(numpy.array(energies))
inputs = {
'code': ce_train_code,
'structures': structures,
'cluster_space': cs,
'fit_method': Str('lasso'),
'metadata': {
'options': {
'max_wallclock_seconds': 300,
},
},
}
# Note: in order to submit your calculation to the aiida daemon, do:
# from aiida.engine import submit
# future = submit(CalculationFactory('ce'), **inputs)
result = run(CalculationFactory('ce'), **inputs)
|
python
|
# pylint: disable=C0103,R0201
from typing import Tuple, Union, Callable, Optional, Any
from django.http import HttpResponse
from django.urls import re_path, path, URLPattern, URLResolver
# in case of include we get a tuple
ViewType = Optional[Union[Callable[..., HttpResponse], Tuple[Any, Any, Any]]]
class url:
"""
An elegant and DRY way to define urlpatterns with so many nested levels and syntax sugars.
It is just a wrapper behind the standard re_path/path functions.
Usage:
### urls.py ###
urlpatterns = list(
url('editor')[
url.int('doc_pk')[
url('edit', DocEditorView.as_view(), 'edit-doc'),
url('export', DocExporterView.as_view(), 'export-doc'),
]
]
+ url('docs', Docs.as_view(), 'student-documents')
+ url('publish', DeleteOrPublistDocument.as_view(), 'publish_document', action='publish')
+ url('delete', DeleteOrPublistDocument.as_view(), 'delete_document')
)
see tests/test_router.py for more use cases
"""
def __init__(self, prefix: str, view: ViewType = None, name=None, is_regex=False, **kwargs):
slash = "" if (not prefix or prefix.endswith("/")) else "/"
self.prefix = f"{prefix}{slash}"
self.view = view
self.name = name
self.kwargs = kwargs
self.is_regex = is_regex # regex path
self.others: Tuple["url", ...] = ()
def __repr__(self):
return f"url`{self.prefix}`" + (f".({len(self.others)})" if self.others else "")
def _prefix(self, other: "url"):
slash = "" if not self.prefix or self.prefix.endswith("/") or other.prefix.startswith("/") else "/"
end_slash = "" if not other.prefix or other.prefix.endswith("/") else "/"
other.prefix = f"{self.prefix}{slash}{other.prefix}{end_slash}"
return other
def __getitem__(self, uobjs: Union['url', Tuple['url', ...]]) -> 'url':
if not isinstance(uobjs, tuple):
uobjs = (uobjs,)
for obj in uobjs:
self.others += (self._prefix(obj),) + tuple(self._prefix(innu) for innu in obj.others)
return self
def __add__(self, other: "url") -> 'url':
self.others += (other,) + other.others
return self
def _path(self) -> Union[URLPattern, URLResolver]:
if self.is_regex:
func = re_path
else:
func = path
return func(self.prefix, self.view, kwargs=self.kwargs, name=self.name)
def __iter__(self):
uobjs = (self,) + self.others
for obj in uobjs:
if obj.view:
yield obj._path()
@classmethod
def re(cls, var_name: str, regex: str, view: ViewType = None, name=None, **kwargs) -> "url":
"""implements urls.re_path"""
return cls(rf'(?P<{var_name}>{regex})', view, name=name, is_regex=True, **kwargs)
@classmethod
def var(cls, var_name, view=None, name=None, dtype=None, **kwargs) -> "url":
"""Implements having url-arguments. dtype is the casting argument.
the default cast-type is str as Django."""
route = f"{dtype}:{var_name}" if dtype else str(var_name)
return cls(f"<{route}>", view, name, **kwargs)
@classmethod
def int(cls, var_name, view=None, name=None, **kwargs) -> "url":
"""Implements
..
path("<int:var_name>", view, )
"""
return cls.var(var_name, view, name, dtype="int", **kwargs)
@classmethod
def slug(cls, view=None, name=None, var_name="slug", **kwargs) -> "url":
"""Implements
..
path("<slug:slug>", view, )
"""
return cls.var(var_name=var_name, view=view, name=name, dtype='slug', **kwargs)
@classmethod
def uuid(cls, view=None, name=None, var_name="uuid", **kwargs) -> "url":
"""Implements
..
path("<uuid:uuid>", view, )
"""
return cls.var(var_name, view, name, dtype="uuid", **kwargs)
@classmethod
def path(cls, var_name, view=None, name=None, **kwargs) -> "url":
return cls.var(var_name, view, name, dtype="path", **kwargs)
@classmethod
def pk(cls, view=None, name=None, dtype="int", **kwargs) -> "url":
return cls.var('pk', view=view, name=name, dtype=dtype, **kwargs)
__all__ = ['url', ]
|
python
|
import paho.mqtt.client as mqtt
import sqlite3
import datetime
broker_address = "127.0.0.1"
dataToPush = {}
changed = []
bedToSub = []
bedToId = {} # lookup dict
def getBeds(db):
"""
Populate the bedToSub list from the db
"""
global bedToSub
cursorDb = db.cursor()
command = "SELECT WardNo, BedNo FROM patient;"
cursorDb.execute(command)
for element in cursorDb.fetchall():
out = str(element[0])+"_"+str(element[1])
bedToSub.append(out)
bedToSub = list(dict.fromkeys(bedToSub))
print(bedToSub)
def getPatientID(db, key):
"""
provide the latest patient id for respective bedNo and wardNo
"""
# (SELECT * FROM medicData ORDER BY Time DESC) WHERE id = {} LIMIT 1;"
global bedToId
if bedToId.get(key, None) == None:
cursorDb = db.cursor()
command = "SELECT id FROM patient WHERE BedNo = {} AND WardNo = {};".format(
key.split("_")[1], key.split("_")[0])
cursorDb.execute(command)
bedToId[key] = cursorDb.fetchall()[0][-1]
return bedToId[key]
def subToChannel(id):
print("Sub to", id)
client.subscribe(id+"/Details")
client.subscribe(id+"/Temp")
client.subscribe(id+"/HeartRate")
client.subscribe(id+"/BP")
client.subscribe(id+"/SpO2")
def subToChannels(channels):
"""
Using subToChannel Subscribe to all the mqtt topics
"""
for channel in channels:
subToChannel(channel)
def createTable(db):
"""
Iinitialize the DB
"""
cursorDb = db.cursor()
cursorDb.execute(
"CREATE TABLE IF NOT EXISTS patient(id integer PRIMARY KEY,Name text NOT NULL,Age integer,Sex text,BedNo integer,WardNo integer,Time text);")
cursorDb.execute(
"CREATE TABLE IF NOT EXISTS medicData(id integer ,Time text,tempCurrent real,tempAvg real,bpmCurrent real,bpmAvg real,bpCurrent real,bpAvg real,spO2Current real,spO2Avg real);")
db.commit()
cursorDb.close()
def newPatient(db, details):
"""
Insert New patient details into DB
"""
print("Adding patient", details, datetime.datetime.now())
cursorDb = db.cursor()
cursorDb.execute(
"INSERT OR REPLACE INTO patient(id, Name, Age, Sex, BedNo,WardNo,Time) VALUES({},'{}',{},'{}',{},{},'{}');".format(details[0], details[1], details[2], details[3], details[4], details[5], datetime.datetime.now()))
db.commit()
cursorDb.close()
global bedToSub
out = str(details[5])+"_"+str(details[4])
bedToSub.append(out)
bedToSub = list(dict.fromkeys(bedToSub))
subToChannel(out)
print(bedToSub)
def pushData(db):
"""
Push data to medicTable
"""
# ID,Time,Archived,CurrentTemp,AvgTemp
global changed
global dataToPush
cursorDb = db.cursor()
for changedId in changed:
command = """INSERT INTO medicData(id, Time, tempCurrent, tempAvg,
bpmCurrent, bpmAvg, bpCurrent, bpAvg, spO2Current, spO2Avg)
VALUES({},"{}",{},{},{},{},{},{},{},{});""".format(getPatientID(db, changedId), datetime.datetime.now(),
dataToPush[changedId]["temp"][0], dataToPush[changedId]["temp"][1],
dataToPush[changedId]["heartRate"][0], dataToPush[changedId]["heartRate"][1],
dataToPush[changedId]["BP"][0], dataToPush[changedId]["BP"][1],
dataToPush[changedId]["SpO2"][0], dataToPush[changedId]["SpO2"][1])
print("SQLITE COMMAND", command)
cursorDb.execute(command)
dataToPush[changedId] = None
changed.remove(changedId)
db.commit()
cursorDb.close()
def pullRecord(db, searchParam):
cursorDb = db.cursor()
command = "SELECT * FROM (SELECT * FROM medicData ORDER BY Time DESC) WHERE id = {} LIMIT 1;".format(
getPatientID(db, searchParam))
cursorDb.execute(command)
return cursorDb.fetchall()[0]
def addToDict(payload, changed, data, db):
"""
Append the latest readings of the patient into the dictionary
"""
deviceID = str(payload.topic).split("/")[0]
parameter = str(payload.topic).split("/")[1]
message = str(payload.payload.decode("utf-8")).split(",")
if data.get(deviceID) == None:
data[deviceID] = {}
changed.append(deviceID)
if (parameter == "Details"):
# id, Name, Age, Sex, BedNo,WardNo
newPatient(db, [message[0],
message[1], message[2], message[3], deviceID.split("_")[1], deviceID.split("_")[0]])
if(parameter == "Temp"):
# current Reading , Avg Reading
data[deviceID]["temp"] = [float(message[0]), float(message[1])]
if(parameter == "HeartRate"):
data[deviceID]["heartRate"] = [float(message[0]), float(message[1])]
if(parameter == "BP"):
data[deviceID]["BP"] = [float(message[0]), float(message[1])]
if(parameter == "SpO2"):
data[deviceID]["SpO2"] = [float(message[0]), float(message[1])]
# message = str(payload.payload.decode("utf-8")).split(",")
def on_message(client, userdata, message):
global changed
# print("message received ", str(message.payload.decode("utf-8")).split(","))
# print("message topic=", str(message.topic).split("/")[1])
addToDict(message, changed, dataToPush, dataBase)
print("Changed: ", changed)
print("DataFrame: ", dataToPush)
dataBase = sqlite3.connect('patientInfo.db')
createTable(dataBase)
client = mqtt.Client("baseStation")
client.connect(broker_address, 1883, 60)
getBeds(dataBase)
subToChannels(bedToSub)
client.on_message = on_message
while(1):
client.loop()
# print(pullRecord(dataBase, "1_2"))
try:
pushData(dataBase)
except KeyError as e:
pass
# print(e)
# print("DataIncomplete")
# time.sleep(5)
|
python
|
import copy
import time
from .core import ProgramLearningAlgorithm
from program_graph import ProgramGraph
from utils.logging import log_and_print, print_program, print_program_dict
from utils.training import execute_and_train
class ENUMERATION(ProgramLearningAlgorithm):
def __init__(self, max_num_programs=100):
self.max_num_programs = max_num_programs
def run(self, graph, trainset, validset, train_config, device, verbose=False):
assert isinstance(graph, ProgramGraph)
symbolic_programs = []
enum_depth = 1
while len(symbolic_programs) < self.max_num_programs:
print("DEBUG: starting enumerative synthesis with depth {}".format(enum_depth))
symbolic_programs = self.enumerate2depth(graph, enum_depth)
print("DEBUG: {} programs found".format(len(symbolic_programs)))
enum_depth += 1
if enum_depth > graph.max_depth:
break
log_and_print("Symbolic Synthesis: generated {}/{} symbolic programs from candidate program.".format(
len(symbolic_programs), self.max_num_programs))
total_eval = min(self.max_num_programs, len(symbolic_programs))
symbolic_programs.sort(key=lambda x: x["struct_cost"])
symbolic_programs = symbolic_programs[:total_eval]
best_program = None
best_total_cost = float('inf')
best_programs_list = []
start_time = time.time()
num_programs_trained = 1
for prog_dict in symbolic_programs:
child_start_time = time.time()
candidate = prog_dict["program"]
log_and_print("Training candidate program ({}/{}) {}".format(
num_programs_trained, total_eval, print_program(candidate, ignore_constants=(not verbose))))
num_programs_trained += 1
score = execute_and_train(candidate, validset, trainset, train_config,
graph.output_type, graph.output_size, neural=False, device=device)
total_cost = score + prog_dict["struct_cost"]
log_and_print("Structural cost is {} with structural penalty {}".format(prog_dict["struct_cost"], graph.penalty))
log_and_print("Time to train child {:.3f}".format(time.time()-child_start_time))
log_and_print("Total time elapsed is: {:.3f}".format(time.time()-start_time))
if total_cost < best_total_cost:
best_program = copy.deepcopy(prog_dict["program"])
best_total_cost = total_cost
prog_dict["score"] = score
prog_dict["path_cost"] = total_cost
prog_dict["time"] = time.time()-start_time
best_programs_list.append(prog_dict)
log_and_print("New BEST program found:")
print_program_dict(best_programs_list[-1])
return best_programs_list
def enumerate2depth(self, graph, enumeration_depth):
max_depth_copy = graph.max_depth
graph.max_depth = enumeration_depth
all_programs = []
enumerated = {}
root = copy.deepcopy(graph.root_node)
def enumerate_helper(program_node):
program_name = print_program(program_node.program, ignore_constants=True)
assert not enumerated.get(program_name)
enumerated[program_name] = True
if graph.is_fully_symbolic(program_node.program):
all_programs.append({
"program" : copy.deepcopy(program_node.program),
"struct_cost" : program_node.cost,
"depth" : program_node.depth
})
elif program_node.depth < enumeration_depth:
all_children = graph.get_all_children(program_node, in_enumeration=True)
for childnode in all_children:
if not enumerated.get(print_program(childnode.program, ignore_constants=True)):
enumerate_helper(childnode)
enumerate_helper(root)
graph.max_depth = max_depth_copy
return all_programs
|
python
|
from __future__ import division
import sys, glob
sys.path.append('/home/richard/scicore/GROUP/data/2017_Karolinska_EV-D68/SVVC/src')
sys.path.append('/scicore/home/neher/GROUP/data/2017_Karolinska_EV-D68/SVVC/src')
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
#import seaborn as sns
from create_allele_counts import load_allele_counts
from coverage_consensus_diversity import coverage, consensus
from minor_variant import trim_ac
def peptide_to_nuc(pos):
return 699 + (pos-1)*3
def load_ctl_prediction(ctl_fname):
import pandas as pd
pred = pd.read_csv(ctl_fname)
regions = []
for ri, row in pred.iterrows():
nuc1,nuc2 = peptide_to_nuc(row.start),peptide_to_nuc(row.end+1)
regions.append([row.allele, nuc1, nuc2, row.peptide, row.percentile_rank])
return regions
def div_func(a):
return a.mean()
# return (a>0.01).mean()
plt.ion()
if __name__ == '__main__':
freqs = {}
diversity = {}
cov = {}
samples = glob.glob('mapped_data/1*')
ctl_predictions = glob.glob('CTL_predictions/1*csv')
predictions_by_sample = defaultdict(list)
for ctl_fname in ctl_predictions:
sname = ctl_fname.rstrip('/').split('/')[-1].split('_')[0]
predictions_by_sample[sname].append(ctl_fname)
ref='KX675261.1'
comparison = [[], [],[]]
for sname in predictions_by_sample:
ctls = []
for ctl_fname in predictions_by_sample[sname]:
ctls.extend(load_ctl_prediction(ctl_fname))
ctls.sort(key=lambda x:x[-1])
ac,ins = load_allele_counts('mapped_data/'+sname)
cov[sname] = coverage(ac[0][1] )
freqs[sname] = trim_ac(ac, n_states=5)
diversity[sname] = {ref:1 - np.sum(x**2, axis=0) for ref, x in freqs[sname].items()}
CTL_mask = np.zeros_like(diversity[sname][ref], dtype=bool)
good_ind = cov[sname]>1000
all_pos = np.arange(good_ind.shape[0])
for cutoff in [50]:
for region in ctls[:cutoff]:
CTL_mask[region[1]:region[2]]=True
for i in range(3):
rf = (all_pos>699)&(all_pos<7266)&((all_pos-699)%3==i)
epi_div = div_func(diversity[sname][ref][CTL_mask&good_ind&rf])
nonepi_div = div_func(diversity[sname][ref][(~CTL_mask)&good_ind&rf])
print(epi_div, nonepi_div)
if np.isnan(epi_div):
continue
comparison[i].append((epi_div, nonepi_div))
comparison = np.array(comparison)
dcomp = comparison[:,:,1]-comparison[:,:,0]
print(np.median(dcomp, axis=1))
print(comparison.mean(axis=1))
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
import os
from setuptools import setup, find_packages
from setuptools.command.install import install
class Install_osm_im(install):
"""Generation of .py files from yang models"""
model_dir = "models/yang"
im_dir = "osm_im"
def pipinstall(self, package):
"""pip install for executable dependencies"""
subprocess.call([sys.executable, "-m", "pip", "install", package])
def run(self):
self.pipinstall('pyang')
self.pipinstall('pyangbind')
import pyangbind
print("Using dir {}/{} for python artifacts".format(os.getcwd(), self.im_dir))
path = "{}/{}".format(os.getcwd(), self.im_dir)
for files_item in ['vnfd', 'nsd', 'nst']:
protoc_command = ["pyang",
"-Werror",
"--plugindir",
"{}/plugin".format(os.path.dirname(pyangbind.__file__)),
"--path",
self.model_dir,
"-f", "pybind",
"-o",
"{}/{}.py".format(self.im_dir, files_item),
"{}/{}.yang".format(self.model_dir, files_item)]
print("Generating {}.py from {}.yang".format(files_item, files_item))
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
# To ensure generated files are copied to the python installation folder
self.copy_tree(self.im_dir, "{}{}".format(self.install_lib, self.im_dir))
install.run(self)
setup(
name='osm_im',
description='OSM Information Model',
long_description=open('README.rst').read(),
#version_command=('git describe --tags --long --dirty --match v*', 'pep440-git-full'),
# author='Mike Marchetti',
# author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
setup_requires=['setuptools-version-command'],
install_requires=['pyang', 'pyangbind'],
test_suite='nose.collector',
url='https://github.com/nikosPsar/IM.git',
license='Apache 2.0',
cmdclass={'install': Install_osm_im},
)
|
python
|
# kakao api 사용=>계정등록=>앱생성=>인증키 발급(restful key)
from urllib.request import urlopen
import xmltodict
import pandas as pd
import datetime
from sqlalchemy import create_engine
def upload_data():
GEV_API = 'STUV378914'
url= 'http://api.gevolution.co.kr/rank/xml/?aCode={GEV_API}&market=g&appType=game&rankType=1&rank=20'.format(GEV_API=GEV_API)
doc = xmltodict.parse(urlopen(url).read())
gevo_df = pd.read_csv('./data/game.csv', encoding='utf-8')
gevo_df_beta = gevo_df.T
gevo_df_beta
for i in range(20):
info_i = []
now = '%s'% str(datetime.datetime.now())[:10]
Base = doc['response']['items']['item'][i]
info_i.append(Base['gameName'])
info_i.append(Base['ranking'])
info_i.append(Base['lastWeek'])
info_i.append(Base['publisher'])
info_i.append(Base['gevolUrl'])
info_i.append(Base['rating'])
info_i.append(Base['movieUrl'])
info_i.append(Base['cafeUrl'])
info_i.append(now)
gevo_df_beta.insert(loc=i, column=i, value=info_i)
gevo_df_beta2 = gevo_df_beta.T
gevo_df_final = gevo_df_beta2.set_index('gameName')
gevo_df_final
# 연결
engine = create_engine('mysql+pymysql://root:sb0515@localhost:3306/pythondb', encoding = 'utf8')
conn = engine.connect()
# SQL로 보내기
gevo_df_final.to_sql( name='game_info',
con=conn,
if_exists='append')
# 닫기
conn.close()
print('수집완료')
return None
if __name__ == '__main__':
upload_data()
else:
pass
"""
upload_data.bat
@echo off
"C:\ProgramData\Anaconda3\python.exe" "C:\Users\ADMIN\Desktop\LiveShare\GEV_API\upload_data.py"
"""
|
python
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
import matplotlib as mpl
# Make some style choices for plotting
color_wheel = ['#329932',
'#ff6961',
'b',
'#6a3d9a',
'#fb9a99',
'#e31a1c',
'#fdbf6f',
'#ff7f00',
'#cab2d6',
'#6a3d9a',
'#ffff99',
'#b15928',
'#67001f',
'#b2182b',
'#d6604d',
'#f4a582',
'#fddbc7',
'#f7f7f7',
'#d1e5f0',
'#92c5de',
'#4393c3',
'#2166ac',
'#053061']
dashes_styles = [[3, 1],
[1000, 1],
[2, 1, 10, 1],
[4, 1, 1, 1, 1, 1]]
# ===========================================================
# Directory and filename; style file open
# ===========================================================
def configure_plot(fig):
dirFile = os.path.dirname(os.path.realpath(__file__))
# Load style file
fig.style.use(os.path.join(dirFile, 'PaperDoubleFig.mplstyle'))
|
python
|
customer = {
"name": "Fahad Hafeez",
"age": 30,
"is_verified": True,
}
customer["name"] = "Fahad Hafeez"
print(customer.get("birthdate", "Oct 18 2005"))
|
python
|
#
# Turn off logging in extensions (too loud!)
import vb2py.extensions
import unittest
vb2py.extensions.disableLogging()
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
from vb2py.plugins.attributenames import TranslateAttributes
class TestAttributeNames(unittest.TestCase):
def setUp(self):
"""Setup the tests"""
self.p = TranslateAttributes()
# << Tests >>
def testAll(self):
"""Do some tests on the attribute"""
names =(("Text", "text"),
("Visible", "visible"),)
for attribute, replaced in names:
for pattern in ("a.%s=b", ".%s=b", "b=a.%s", "b=.%s",
"a.%s.b=c", ".%s.c=b", "b=a.%s.c", "b=.%s.c",
"a.%s.b+10=c", ".%s.c+10=b", "b=a.%s.c+10", "b=.%s.c+10",):
test = pattern % attribute
new = self.p.postProcessPythonText(test)
self.assertEqual(new, pattern % replaced)
for attribute, replaced in names:
for pattern in ("a.%slkjlk=b", ".%slkjlk=b", "b=a.%slkjl", "b=.%slkjl",
"a.%slkj.b=c", ".%slkj.c=b", "b=a.%slkj.c", "b=.%slkj.c",
"a.%slkj.b+10=c", ".%slkj.c+10=b", "b=a.%slkj.c+10", "b=.%slkj.c+10",):
test = pattern % attribute
new = self.p.postProcessPythonText(test)
self.assertNotEqual(new, pattern % replaced)
# -- end -- << Tests >>
if __name__ == "__main__":
unittest.main()
|
python
|
"""
=============================================================
Bisecting K-Means and Regular K-Means Performance Comparison
=============================================================
This example shows differences between Regular K-Means algorithm and Bisecting K-Means.
While K-Means clusterings are different when with increasing n_clusters,
Bisecting K-Means clustering build on top of the previous ones.
This difference can visually be observed.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import BisectingKMeans, KMeans
print(__doc__)
# Generate sample data
n_samples = 1000
random_state = 0
X, _ = make_blobs(n_samples=n_samples, centers=2, random_state=random_state)
# Number of cluster centers for KMeans and BisectingKMeans
n_clusters_list = [2, 3, 4, 5]
# Algorithms to compare
clustering_algorithms = {
"Bisecting K-Means": BisectingKMeans,
"K-Means": KMeans,
}
# Make subplots for each variant
fig, axs = plt.subplots(
len(clustering_algorithms), len(n_clusters_list), figsize=(15, 5)
)
axs = axs.T
for i, (algorithm_name, Algorithm) in enumerate(clustering_algorithms.items()):
for j, n_clusters in enumerate(n_clusters_list):
algo = Algorithm(n_clusters=n_clusters, random_state=random_state)
algo.fit(X)
centers = algo.cluster_centers_
axs[j, i].scatter(X[:, 0], X[:, 1], s=10, c=algo.labels_)
axs[j, i].scatter(centers[:, 0], centers[:, 1], c="r", s=20)
axs[j, i].set_title(f"{algorithm_name} : {n_clusters} clusters")
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
ax.set_xticks([])
ax.set_yticks([])
plt.show()
|
python
|
#!/usr/bin/env python2.7
'''
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import print_function
import numpy as np
import os
import random
from builtins import range
from functools import partial
import grpc
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
import tensorrtserver.api.model_config_pb2 as model_config
from PIL import Image
def model_dtype_to_np(model_dtype):
if model_dtype == model_config.TYPE_BOOL:
return np.bool
elif model_dtype == model_config.TYPE_INT8:
return np.int8
elif model_dtype == model_config.TYPE_INT16:
return np.int16
elif model_dtype == model_config.TYPE_INT32:
return np.int32
elif model_dtype == model_config.TYPE_INT64:
return np.int64
elif model_dtype == model_config.TYPE_UINT8:
return np.uint8
elif model_dtype == model_config.TYPE_UINT16:
return np.uint16
elif model_dtype == model_config.TYPE_FP16:
return np.float16
elif model_dtype == model_config.TYPE_FP32:
return np.float32
elif model_dtype == model_config.TYPE_FP64:
return np.float64
elif model_dtype == model_config.TYPE_STRING:
return np.dtype(object)
return None
def parse_model(status, model_name, batch_size, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an image classification network (as expected by
this client)
"""
server_status = status.server_status
if model_name not in server_status.model_status.keys():
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got {}".format(len(config.input)))
if len(config.output) != 1:
raise Exception("expecting 1 output, got {}".format(len(config.output)))
input = config.input[0]
output = config.output[0]
if output.data_type != model_config.TYPE_FP32:
raise Exception("expecting output datatype to be TYPE_FP32, model '" +
model_name + "' output type is " +
model_config.DataType.Name(output.data_type))
# Output is expected to be a vector. But allow any number of
# dimensions as long as all but 1 is size 1 (e.g. { 10 }, { 1, 10
# }, { 10, 1, 1 } are all ok).
non_one_cnt = 0
for dim in output.dims:
if dim > 1:
non_one_cnt += 1
if non_one_cnt > 1:
raise Exception("expecting model output to be a vector")
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception("batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception(
"expecting batch size <= {} for model '{}'".format(max_batch_size, model_name))
# Model input must have 3 dims, either CHW or HWC
if len(input.dims) != 3:
raise Exception(
"expecting input to have 3 dimensions, model '{}' input has {}".format(
model_name, len(input.dims)))
if ((input.format != model_config.ModelInput.FORMAT_NCHW) and
(input.format != model_config.ModelInput.FORMAT_NHWC)):
raise Exception("unexpected input format " + model_config.ModelInput.Format.Name(input.format) +
", expecting " +
model_config.ModelInput.Format.Name(model_config.ModelInput.FORMAT_NCHW) +
" or " +
model_config.ModelInput.Format.Name(model_config.ModelInput.FORMAT_NHWC))
if input.format == model_config.ModelInput.FORMAT_NHWC:
h = input.dims[0]
w = input.dims[1]
c = input.dims[2]
else:
c = input.dims[0]
h = input.dims[1]
w = input.dims[2]
return (input.name, output.name, c, h, w, input.format, model_dtype_to_np(input.data_type))
def preprocess(img, format, dtype, c, h, w):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
# np.set_printoptions(threshold='nan')
if c == 1:
sample_img = img.convert('L')
else:
sample_img = img.convert('RGB')
resized_img = sample_img.resize((w, h), Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:, :, np.newaxis]
typed = resized.astype(dtype)
scaled = (typed / 255) - 0.5
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return scaled
def postprocess(results, filenames, batch_size):
"""
Post-process results to show classifications.
"""
if len(results) != 1:
raise Exception("expected 1 result, got {}".format(len(results)))
batched_result = results[0].batch_classes
if len(batched_result) != batch_size:
raise Exception("expected {} results, got {}".format(batch_size, len(batched_result)))
if len(filenames) != batch_size:
raise Exception("expected {} filenames, got {}".format(batch_size, len(filenames)))
label, score = [], []
# batch size is always 1 here, need to modify if were to larger batch_size
for (index, result) in enumerate(batched_result):
print("Image '{}':".format(filenames[index]))
for cls in result.cls:
label.append(cls.label)
score += [{"index": cls.label, "val": cls.value}]
print(" {} ({}) = {}".format(cls.idx, cls.label, cls.value))
return label[0], score
def requestGenerator(input_name, output_name, c, h, w, format, dtype, model_name, model_version, image_filename,
result_filenames):
# Prepare request for Infer gRPC
# The meta data part can be reused across requests
request = grpc_service_pb2.InferRequest()
request.model_name = model_name
if model_version is None:
request.model_version = -1
else:
request.model_version = model_version
# optional pass in a batch size for generate requester over a set of image files, need to refactor
batch_size = 1
request.meta_data.batch_size = batch_size
output_message = api_pb2.InferRequestHeader.Output()
output_message.name = output_name
# Number of class results to report. Default is 10 to match with demo.
output_message.cls.count = 10
request.meta_data.output.extend([output_message])
filenames = []
if os.path.isdir(image_filename):
filenames = [os.path.join(image_filename, f)
for f in os.listdir(image_filename)
if os.path.isfile(os.path.join(image_filename, f))]
else:
filenames = [image_filename, ]
filenames.sort()
# Preprocess the images into input data according to model
# requirements
image_data = []
for filename in filenames:
img = Image.open(filename)
image_data.append(preprocess(img, format, dtype, c, h, w))
request.meta_data.input.add(name=input_name)
# Send requests of batch_size images. If the number of
# images isn't an exact multiple of batch_size then just
# start over with the first images until the batch is filled.
image_idx = 0
last_request = False
while not last_request:
input_bytes = None
input_filenames = []
del request.raw_input[:]
for idx in range(batch_size):
input_filenames.append(filenames[image_idx])
if input_bytes is None:
input_bytes = image_data[image_idx].tobytes()
else:
input_bytes += image_data[image_idx].tobytes()
image_idx = (image_idx + 1) % len(image_data)
if image_idx == 0:
last_request = True
request.raw_input.extend([input_bytes])
result_filenames.append(input_filenames)
yield request
def get_prediction(image_filename, server_host='localhost', server_port=8001,
model_name="bolt", model_version=None):
"""
Retrieve a prediction from a TensorFlow model server
:param image: a bolt image
:param server_host: the address of the TensorRT inference server
:param server_port: the port used by the server
:param model_name: the name of the model
:param timeout: the amount of time to wait for a prediction to complete
:return 0: the integer predicted in the bolt image
:return 1: the confidence scores for all classes
"""
channel = grpc.insecure_channel(server_host + ':' + str(server_port))
grpc_stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
# Prepare request for Status gRPC
request = grpc_service_pb2.StatusRequest(model_name=model_name)
# Call and receive response from Status gRPC
response = grpc_stub.Status(request)
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
batch_size = 1
verbose = False
input_name, output_name, c, h, w, format, dtype = parse_model(
response, model_name, batch_size, verbose)
filledRequestGenerator = partial(requestGenerator, input_name, output_name, c, h, w, format, dtype, model_name,
model_version, image_filename)
# Send requests of batch_size images. If the number of
# images isn't an exact multiple of batch_size then just
# start over with the first images until the batch is filled.
result_filenames = []
requests = []
responses = []
# Send request
for request in filledRequestGenerator(result_filenames):
responses.append(grpc_stub.Infer(request))
# For async, retrieve results according to the send order
for request in requests:
responses.append(request.result())
idx = 0
for response in responses:
print("Request {}, batch size {}".format(idx, batch_size))
label, score = postprocess(response.meta_data.output, result_filenames[idx], batch_size)
idx += 1
return label, score
def random_bolt(img_path='/workspace/web_server/static/small_bolt'):
"""
Pull a random image out of the small bolt dataset
:param savePath: the path to save the file to. If None, file is not saved
:return 0: file selected
:return 1: label selelcted
"""
random_dir = random.choice(os.listdir(img_path))
random_file = random.choice(os.listdir(img_path + '/' + random_dir))
return img_path + '/' + random_dir + '/' + random_file, random_dir, 'static/small_bolt' + '/' + random_dir + '/' + random_file
|
python
|
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
'''
Class Chapter11_1
Class Chapter11_2
Class Chapter11_3
Class Chapter11_4
Class Chapter11_5
'''
from __future__ import division, absolute_import, print_function
import sys as _sys
import math as _math
import random as _random
import time as _time
from random import randint as _randint
from copy import copy as _copy, deepcopy as _deepcopy
from numpy import arange as _arange
import numpy as np
class Chapter11_1:
'''
chpater11.1 note and function
'''
def ELFhash(self, key : str, mod):
h = 0
for c in key:
h = (h << 4) + ord(c)
g = h & 0xF0000000
if g != 0:
h ^= g >> 24;
h &= ~g;
return h // mod
def note(self):
'''
Summary
====
Print chapter11.1 note
Example
====
```python
Chapter11_1().note()
```
'''
print('chapter11.1 note as follow')
print('第11章 散列表')
print('散列表(hash table 哈希表),是根据关键码值(key value)而直接进行访问的数据结构')
print(' 通过把关键码值映射到表中一个位置来访问记录,以加快查找的速度')
print(' 这个函数叫做散列函数,存放记录的数组叫散列表')
print('对不同的关键字可能得到同一散列地址,即k1≠k2,而f(k1)=f(k2),这种现象称为碰撞',
'(英语:Collision)。具有相同函数值的关键字对该散列函数来说称做同义词。',
'综上所述,根据散列函数f(k)和处理碰撞的方法将一组关键字映射到一个有限的连续的地址集',
'(区间)上,并以关键字在地址集中的“像”作为记录在表中的存储位置,这种表便称为散列表,',
'这一映射过程称为散列造表或散列,所得的存储位置称散列地址。')
print('字符串dugu的一个哈希值为:', self.ELFhash('dugu', 31))
print('在很多应用中,都要用到一种动态集合结构,它仅支持INSERT,SEARCH的DELETE字典操作')
print('实现字典的一种有效数据结构为散列表(HashTable)')
print('在最坏情况下,在散列表中,查找一个元素的时间在与链表中查找一个元素的时间相同')
print('在最坏情况下都是Θ(n)')
print('但在实践中,散列技术的效率是很高的。在一些合理的假设下,在散列表中查找一个元素的期望时间为O(1)')
print('散列表是普通数组概念的推广,因为可以对数组进行直接寻址,故可以在O(1)时间内访问数组的任意元素')
print('如果存储空间允许,可以提供一个数组,为每个可能的关键字保留一个位置,就可以应用直接寻址技术')
print('当实际存储的关键字数比可能的关键字总数较小时,这是采用散列表就会较直接数组寻址更为有效')
print('在散列表中,不是直接把关键字用作数组下标,而是根据关键字计算出下标。')
print('11.2着重介绍解决碰撞的链接技术。')
print('所谓碰撞,就是指多个关键字映射到同一个数组下标位置')
print('11.3介绍如何利用散列函数,根据关键字计算出数组的下标。')
print('11.4介绍开放寻址法,它是处理碰撞的另一种方法。散列是一种极其有效和实用的技术,基本的字典操作只需要O(1)的平均时间')
print('11.5解释当待排序的关键字集合是静态的,\"完全散列\"如何能够在O(1)最坏情况时间内支持关键字查找')
print('11.1 直接寻址表')
print('当关键字的全域U比较小时,直接寻址是一种简单而有效的技术。假设某应用要用到一个动态集合')
print('其中每个元素都有一个取自全域U的关键字,此处m是一个不很大的数。另外假设没有两个元素具有相同的关键字')
print('为表示动态集合,用一个数组(或直接寻址表)T[0...m-1],其中每个位置(或称槽)对应全域U中的一个关键字')
print('对于某些应用,动态集合中的元素可以放在直接寻址表中。亦即不把每个元素的关键字及其卫星数据都放在直接寻址表外部的一个对象中')
print('但是,如果不存储关键字,就必须有某种办法来确定某个槽是否为空')
print('练习11.1-1: 考虑一个由长度为m的直接寻址表T表示的动态集合S。给出一个查找S的最大元素的算法过程')
print(' 所给的过程在最坏情况下的运行时间是O(m)')
print('练习11.1-2: 位向量(bit vector)是一种仅包含0和1的数组。长度为m的位向量所占空间要比包含m个指针的数组少得多')
print(' 请说明如何用一个位向量来表示一个包含不同元素的动态集合。字典操作的运行时间应该是O(1)')
print('练习11.1-3: 说明如何实现一个直接寻址表,使各元素的关键字不必都相同,且各元素可以有卫星数据。')
print('练习11.1-4: 希望通过利用一个非常大的数组上直接寻址的方式来实现字典')
print(' 开始时,该数组中可能包含废料,但要对整个数组进行初始化是不实际的,因为该组的规模太大')
print(' 请给出在大数组上实现直接寻址字典的方案。每个存储的对象占用O(1)空间')
print(' 操作SEARCH,INSERT和DELETE的时间为O(1),对数据结构初始化的时间O(1)')
print(' 可以利用另外一个栈,其大小等于实际存储在字典中的关键字数目,以帮助确定大型数组中某个给定的项是否是有效的')
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
class Chapter11_2:
'''
chpater11.2 note and function
'''
def note(self):
'''
Summary
====
Print chapter11.2 note
Example
====
```python
Chapter11_2().note()
```
'''
print('chapter11.2 note as follow')
print('11.2 散列表')
print('直接寻址技术存在着一个明显的问题:如果域U很大,',
'在一台典型计算机的可用内存容量限制下,要在机器中存储大小为U的一张表T就有点不实际甚至是不可能的了')
print('实际要存储的关键字集合K相对于U来说可能很小,因而分配给T的大部分空间都要浪费掉')
print('当存储在字典中的关键字集合K比所有可能的关键字域U要小的多时,散列表需要的存储空间要比直接寻址少很多')
print('特别地,在保持仅需O(1)时间即可在散列表中查找一个元素的好处情况下,存储要求可以降至Θ(|K|)')
print('在直接寻址方式下,具有关键字k的元素被存放在槽k中。在散列方式下,该元素处于h(k)中')
print('亦即,利用散列函数h,根据关键字k计算出槽的位置。函数h将关键字域U映射懂啊散列表T[0..m-1]的槽位上:')
print('这时,可以说一个具有关键字k到元素是被散列在槽h(k)上,或说h(k)是关键字k的散列值')
print('两个关键字可能映射到同一个槽上。将这种情形称为发生了碰撞')
print('当然,最理想的解决方法是完全避免碰撞')
print('可以考虑选用合适的散列函数h。在选择时有一个主导思想,就是使h尽可能地\"随机\",从而避免或至少最小化碰撞')
print('当然,一个散列函数h必须是确定的,即某一给定的输入k应始终产生相同的结果h(k),')
print('通过链接法解决碰撞')
print(' 在链接法中,把散列到同一槽中的所有元素都放在同一个链表中,槽j中有一个指针,它指向由所有散列到j的元素构成的链表的头')
print(' 插入操作的最坏情况运行时间为O(1).插入过程要快一些,因为假设要插入的元素x没有出现在表中;如果需要,在插入前执行搜索,可以检查这个假设(付出额外代价)')
print('CHAINED-HASH-INSERT(T, x)')
print(' insert x at the head of list T[h(key[x])]')
print('CHAINED-HASH-SEARCH(T, x)')
print(' search for an element with k in list T[h(k)]')
print('CHAINED-HASH-DELETE(T, x)')
print(' delete x from the list T[h(key[x])]')
print('对用链接法散列的分析')
print(' 采用链接法后散列的性能怎样呢?特别地,要查找一个具有给定关键字的原宿需要多长时间呢?')
print(' 给定一个能存放n个元素的,具有m个槽位的散列表T,定义T的装载因子a为n/m,即一个链表中平均存储的元素数')
print(' 分析以a来表达,a可以小于、等于或者大于1')
print('用链接法散列的最坏情况性能很差;所有的n个关键字都散列到同一个槽中,从而产生出一个长度为n的链表')
print('最坏情况下查找的时间为Θ(n),再加上计算散列函数的时间,这么一来就和用一个链表来来链接所有的元素差不多了。显然,')
print('散列方法的平均性态依赖于所选取的散列函数h在一般情况下,将所有的关键字分布在m个槽位上的均匀程度')
print('先假定任何元素散列到m个槽中每一个的可能性是相同的,且与其他元素已被散列到什么位置上是独立无关的')
print('称这个假设为简单一致散列')
print('假定可以在O(1)时间内计算出散列值h(k),从而查找具有关键字为k的元素的时间线性地依赖于表T[h(k)]的长度为n')
print('先不考虑计算散列函数和寻址槽h(k)的O(1)时间')
print('定理11.1 对一个用链接技术来解决碰撞的散列表,在简单一致散列的假设下,一次不成功查找期望时间为Θ(1+a)')
print('定理11.2 在简单一致散列的假设下,对于用链接技术解决碰撞的散列表,平均情况下一次成功的查找需要Θ(1+a)时间')
print('练习11.2-1: 假设用一个散列函数h,将n个不同的关键字散列到一个长度为m的数组T中。')
print(' 假定采用的是简单一致散列法,那么期望的碰撞数是多少?')
print('练习11.2-2: 对于一个利用链接法解决碰撞的散列表,说明将关键字5,28,19,15,20,33,12,17,10')
print(' 设该表中有9个槽位,并设散列函数为h(k)=k mod 9')
print('练习11.2-3: 如果将链接模式改动一下,使得每个链表都能保持已排序顺序,散列的性能就可以有很大的提高。')
print(' 这样的改动对成功查找、不成功查找、插入和删除操作的运行时间有什么影响')
print('练习11.2-4: 在散列表内部,如何通过将所有未占用的槽位链接成一个自由链表,来分配和去分配元素的存储空间')
print(' 假定一个槽位可以存储一个标志、一个元素加上一个或两个指针')
print(' 所有的字典和自由链表操作应具有O(1)的期望运行时间')
print('练习11.2-5: 有一个U的大小为n的子集,它包含了均散列到同一个槽位中的关键字,这样对于带链接的散列表,最坏情况下查找时间为Θ(n)')
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
class Chapter11_3:
'''
chpater11.3 note and function
'''
def note(self):
'''
Summary
====
Print chapter11.3 note
Example
====
```python
Chapter11_3().note()
```
'''
print('chapter11.3 note as follow')
print('11.3 散列函数')
print('好的散列函数的特点')
print('一个好的散列函数应近似地满足简单一致散列的假设:每个关键字都等可能地散列到m个槽位的任何一个之中去')
print('并与其它的关键字已被散列到哪一个槽位中无关')
print('不幸的是:一般情况下不太可能检查这一条件是否成立,因为人们很少可能知道关键字所符合的概率分布,而各关键字可能并不是完全互相独立的')
print('有时也能知道关键字的概率分布。例如:已知各关键字都是随机的实数k,独立地、一致地分布于范围[0,1)')
print('在实践中,常常可以运用启发式技术来构造好的散列函数')
print('例如,在一个编译器的符号表中,关键字都是字符串,表示程序中的标识符')
print('同一个程序中,经常会出现一些很相近的符号,如pt和pts。')
print('一个好的散列函数应能最小化将这些相近符号散列到同一个槽中的可能性')
print('\"除法散列\"用一个特定的质数来除所给的关键字,所得的余数即为该关键字的散列值')
print('假定所选择的质数与关键字分布中的任何模式都是无关的,这种方法常常可以给出很好的结果')
print('散列函数的某些应用可能会要求比简单一致散列更强的性质,例如可能希望某些很近似的关键字具有截然不同的散列值')
print('将关键字解释为自然数')
print(' 如果所给关键字不是自然数,则必须有一种方法来将它们解释为自然数')
print(' 标识符pt可以被解释为十进制整数对(112,116),pt即为(112*128)+116=14452')
print('11.3.1 除法散列法')
print(' 通过取k除以m的余数,来将关键字k映射到m个槽的摸一个中去,亦即散列函数为h(k) = k mod m')
print(' 例如,如果散列表的大小为m=12,所给关键字为k=100,则h(k)=4。这种方法只要一次除法操作,所以比较快')
print(' 应用除法散列时,要注意m的选择,m不应是2的幂;可以选作m的值常常是与2的整数幂不太接近的质数')
print(' 例如,假设我们要分配一张散列表,并用链接法解决碰撞,表中大约要存放n=2000个字符串,每个字符有8位')
print(' 一次不成功的查找大约要检查3个元素,但我们并不在意,故分配散列表的大小为m=701.')
print(' 之所以选择701这个数,是因为它是个接近a=2000/3,但又不接近2的任何幂次的质数。把每个关键字k视为一个整数')
print(' 则有散列函数h(k) = k mod 701')
print('11.3.2 乘法散列法')
print(' 构造散列函数的乘法方法包含两个步骤。第一步,用关键字k乘上常数A(0<A<1),',
'并抽出kA的小数部分。然后,用m乘以这个值,再取结果的底(floor)。总之,散列函数为h(k)=[m(kA mod 1)]')
print(' 其中kA mod 1 即kA的小数部分,亦即kA-[kA]')
print(' 乘法方法的一个优点是对m的选择没有特别的要求,一般选择它为2的某个幂次m=2^p')
print(' 虽然这个方法对任何的A值都适用,但对某些值效果更好。Knuth认为最佳的选择与待散列的数据的特征有关A=(sqrt(5)-1)/2=0.6180339887...就是一个比较理想的值')
print(' 例子:假设有k=123456,p=14,m=2^14=16384,w=32,根据Knuth的建议')
print(' 取A为形如s/2^32的分数,它与(sqrt(5)-1)/2最为接近,于是A=2654435769/2^32')
print(' k*s=32770622297664=(76300*2^32)+17612864,从而有r1=76300,r0=17612864,r0的14个最高有效位产生了散列值h(k)=67')
print('11.3.3 全域散列')
print('如果让某个与你作对的人来选择要散列的关键字,那么他会选择全部散列到同一槽中的n个关键字,使得平均检索值为Θ(n)')
print('任何一个特定的散列函数都可能出现这种最坏情况性态:唯一有效的改进方法是随机地选择散列函数,使之独立要存储的关键字。')
print('这种方法称作全域散列(universal hashing),不管对手选择了怎样的关键字,其平均性态都很好')
print('全域散列的基本思想是在执行开始时,就从一族仔细设计的函数中,随机地选择一个座位散列函数')
print('就像在快速排序中一样,随机化保证了没有哪一种输入会导致最坏情况性态。')
print('同时,随机化使得即使对同一个输入,算法在每一次执行时的性态也都不一样')
print('这样就可以确保对于任何输入,算法都具有较好的平均情况性态')
print('设H为有限的一组散列函数,它将给定的关键字域U映射到{0,1,..,m-1}中。这样的一个函数组称为是全域的')
print('定理11.3 如果h选自一组全域的散列函数,并用于将n个关键字散列到一个大小为m的、用链接法解决碰撞的表T中')
print('对于每一个关键字k,定义一个随机变量Yk,它等于非k的、与k散列到同一槽位中的其他关键字的数目')
print('推论11.4 对于一个具有m个槽位的表,利用全域散列和链接法解决碰撞,需要Θ(n)的期望时间来处理任何包含了n')
print(' 个INSERT,SEARCH和DELETE操作的操作序列,该序列中包含了O(m)个INSERT操作')
print('证明:由于插入操作的数目为O(m),有n=O(m),从而a=O(1)。')
print('INSERT操作和DELETE操作需要常量时间,根据定理11.3,每一个INSERT操作的期望时间为O(1)')
print('于是,根据期望值的线性性质,整个操作序列的期望时间为O(n)')
print('很容易设计出一个全域散列函数类,这一点只需一点点数论方面的知识即可加以证明')
print('首先选择一个足够大的质数p,使得每一个可能的关键字k都落在0到p-1的范围内')
print('由于p是一个质数,解决模p的方程。假定了关键字域的大小大于散列表中的槽位数,故有p>m')
print('定义散列函数h,利用一次线性变换,后跟模p、再模m的归纳,有h=((ak+b) mod p) mod m')
print('定理11.5 由上述公式定义的散列函数类是全域的')
print('练习11.3-1 假设希望查找一个长度为n的链表,其中每一个元素都包含一个关键字k和一个散列值h。每一个关键字都是长字符串')
print('练习11.3-2 假设一个长度为r的字符串被散列到m个槽中,方法是将其视为一个以128为基数的数,然后应用除法方法')
print('练习11.3-3 考虑除法方法的另一种版本,其中h(k)=k mod n,m=2^p-1,k为按基数2^p解释的字符串')
print(' 证明:如果串x可由串y通过其自身的置换排列导出,则x和y具有相同的散列')
print('练习11.3-4 考虑一个大小为m=1000的散列表和对应一个散列函数h(k)=m(kA mod 1)')
print(' A=(sqrt(5)-1)/5,计算61,62,63,64,65被映射到的位置')
print('练习11.3-5 定义一个从有限集合U到有限集合B上的散列函数簇H为全域的')
print('练习11.3-6 略')
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
class Chapter11_4:
'''
chpater11.4 note and function
'''
def note(self):
'''
Summary
====
Print chapter11.4 note
Example
====
```python
Chapter11_4().note()
```
'''
print('chapter11.4 note as follow')
print('11.4 开放寻址法')
print('在开放寻址法中,所有的元素都存放在散列表里面,即每个表项或包含一个动态元素的集合,或包含nil')
print('当检查一个元素时,要检查所有的表项,直到找到所有的元素,或者最终发现该元素不在表中')
print('不像在链接法中,没有链表,也没有元素存放在散列表之外。')
print('这种方法中,散列表可能存满,以至于不能插入新的元素。但是装载因子a是不可能超过1的')
print('当然,也可以将用作链接的链表存放在散列表未用的槽中。但开放寻址法的好处是它根本不需要指针。')
print('不用存储指针而节省空间,从而可以用同样的空间提供更多的槽,减小碰撞,提高查找速度')
print('在开放寻址法中,当要插入一个元素时,可以连续地检查散列表的各项')
print('直到找到一个空槽来存放插入的关键字为止。检查的顺序不一定是0,1,2...,m-1(这种顺序下查找时间为Θ(n))')
print('开放寻址法中,对散列表元素的删除操作执行起来会比较困难')
print('当从槽i中删除关键字时,不能仅将NIL置于其中标识它为空')
print('有三种技术常用来计算开放寻址法中探查序列,线性探查,二次探查以及双重探查。')
print('但是,这三种技术都不能实现一致散列的假设。')
print('在这三种技术中,双重散列能产生的探查序列最多,因而能给出最好的结果')
print('线性探查存在着一次群集问题,随着时间的推移,连续被占用的槽不断增加,平均查找的时间也不断增加')
print('定理11.6 给定一个装载因子为a=n/m<1的开放寻址散列表,在一次不成功的查找中,期望的探查次数至多为1/(1-a),假设散列是一致的')
print('推论11.7 平均情况下,向一个装载因子为a的开放寻址散列表插入一个元素时,至多只需要做1/(1-a)次探查,假设采用的是一次散列')
print('定理11.8 给定一个装载因子为a<1的开放寻址散列表,一次成功查找中的期望探查数至多为1/aln1/(1-a)')
print('假定散列是一致的,且表中的每个关键字被查找的可能性是相同的')
print('练习11.4-1 考虑将关键字10、22、31、4、15、28、17、88、59用开放寻址法插入到一个长度为m=11的散列表中')
print(' 主散列函数为h(k)=k mod m,说明用线性探查、二次探查以及双重散列h2(k)=1+(k mod (m-1))将这些关键字插入散列表的结果')
print('练习11.4-2 请写出HASH-DELETE的伪代码;修改HASH-INSERT,使之能处理特殊值DELETED。')
print('练习11.4-3 假设采用双重散列来解决碰撞;亦即,所用的散列函数为h(k,i)=(h1(k)+ih2(k)) mod m')
print(' 证明如果对某个关键字k,m和h2(k)有最大公约数d>=1,则在对关键字k的一次不成功的查找中,在回到槽h1(k)之前,要检查散列表的1/d。')
print(' 于是,当d=1时,m与h2(k)互质,查找操作可能要检查整个散列表。')
print('练习11.4-4 考虑一个采用了一致散列的开放寻址散列表。给出当装载因子为3/4和7/8时')
print(' 在一次不成功查找中期望探查数的上界,以及一次成功查找中期望探查数的上界')
print('练习11.4-5 考虑一个装载因子为a的开放寻址散列表。给出一个非0值a,使得在一次不成功的查找中')
print(' 期望的探查数等于成功查找中期望探查数的两倍。此处的两个期望探查数上界可以根据定理11.6和定理11.8得出')
print('')
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
class Chapter11_5:
'''
chpater11.5 note and function
'''
def note(self):
'''
Summary
====
Print chapter11.5 note
Example
====
```python
Chapter11_5().note()
```
'''
print('chapter11.5 note as follow')
print('11.5 完全散列')
print('人们之所以使用散列技术,主要是因为它有出色的性能,其实,当关键字集合是静态的时,散列技术还可以用来获得出色的最坏情况性能')
print('所谓静态就是指一旦各关键字存入表中后,关键字集合就不再变化了')
print('如果某一种散列技术在进行查找时,其最坏情况内存访问次数为O(1)的话,则称其为完全散列(perfect hashing)')
print('设计完全散列方案的基本想法是比较简单的。利用一种两级的散列方案,每一级上都采用全域散列')
print('第一级与带链接的散列基本上是一样的:利用从某一全域散列函数簇中仔细选出的一个散列函数h,将n个关键字散列到m个槽中')
print('然而,我们不是对散列到槽j中的所有关键字建立一个链表,而是采用了一个较小的二次散列表Sj,与其相关的散列函数为hj')
print('通过仔细地选取散列函数hj,可以确保在第二级上不出现碰撞')
print('但是,为了能真正确保在第二级上不出现碰撞,需要让散列表Sj的大小mj为散列到槽j中的关键字数nj的平方')
print('mj对nj的这种二次依赖关系看上去可能使得总体存储需求很大')
print('后面会说明,通过适当地选择第一次散列函数,预期使用的总存储空间仍然为O(n)')
print('定理11.9 如果利用从一个全域散列函数类中随机选出的散列函数h,将n个关键字存储在一个大小为m=n^2的散列表中,那么出现碰撞的概率小于1/2')
print('证明:共有(n 2)对关键字可能发生碰撞,如果h是从一个全域散列函数类H中随机选出的话,每一对关键字碰撞的概率为1/m。')
print('设X为一个随机变量,它统计了碰撞的次数,当m=n^2时,期望的碰撞次数为E[X]<1/2')
print('定理11.10 如果利用从某一全域散列函数类中随机选出的散列函数h,来将n个关键字存储到一个大小为m=n的散列表中')
print('推论11.11 如果利用从某一全域散列函数类中随机选出的散列函数h,来将n个关键字存储到一个大小为m=n的散列表中,并将每个二次散列表的大小置为m=n^2')
print(' 则在一个完全散列方案中,存储所有二次散列表所需的存储总量的期望值小于2n')
print('推论11.12 如果利用从某一全域散列函数类中随机选出的散列函数h,来将n个关键字存储到一个大小为m=n的散列表中,并将每个二次散列表的大小置为m=n^2')
print(' 则用于存储所有二次散列表的存储总量超过4n的概率小于1/2')
print('练习11.5-1 假设要将n个关键字插入到一个大小为m,采用了开放寻址法和一致散列技术的散列表中。')
print(' 设p(n,m)为没有碰撞发生的概率。证明:p(n,m)<=e^(-n(n-1)/2m)')
print(' 论证当n超过sqrt(m)时,不发生碰撞的概率迅速趋于0')
print('思考题11-1 最长探查的界:用一个大小为m的散列表来存储n个数据项目,并且有n<=m/2。采用开放寻址法来解决碰撞问题')
print(' a) 假设采用了一致散列,证明对于i=1,2,...,n,第i次插入需要严格多余k次探查的概率至多为2^-k')
print( 'b) 证明:对于i=1,2,...,n, 第i次插入需要多于2lgn次探查的概率至多是1/n^2')
print(' 设随机变量Xi表示第i次插入所需要的探查数。在上面b)已证明Pr{Xi>2lgn}<=1/n^2')
print(' 设随机变量X=maxXi表示插入中所需探查数的最大值')
print( 'c) 证明:Pr{X>2lgn}<=1/n')
print( 'd) 证明:最长探查序列的期望长度为E[x]=O(lgn)')
print('思考题11-2 链接法中槽大小的界')
print(' 假设有一个含有n个槽的散列表,并用链接法来解决碰撞问题。另假设向表中插入n个关键字。')
print(' 每个关键字被等可能地散列到每个槽中。设在所有关键字被插入后,M是各槽中所含关键字数的最大值')
print('思考题11-3 二次探查')
print(' 假设要在一个散列表(表中的各个位置为0,1,...,m-1)中查找关键字k,并假设有一个散列函数h将关键字空间映射到集合{0,1,...,m-1},查找方法如下')
print(' 1) 计算值i<-h(k),置j<-0')
print(' 2) 探查位置i,如果找到了所需的关键字k,或如果这个位置是空的,则结束查找')
print(' 3) 置j<-(j+1) mod m,i<-(i+j) mod m,则返回步骤2)')
print(' 设m是2的幂:')
print(' a) 通过给出c1和c2的适当值,来证明这个方案是一般的\"二次探查\"法的一个实例')
print(' b) 证明:在最坏情况下,这个算法要检查表中的每一个位置')
print('思考题11-4 k全域散列和认证')
print(' 设H={h}为一个散列函数类,其中每个h将关键字域U映射到{0,1,...,m-1}上。称H是k全域的')
print(' 如果对每个由k个不同的关键字<x(1),x(2),...,x(k)>构成的固定序列,以及从H中随机选出的任意h')
print(' a) 证明:如果H是2全域的,则它是全域的。')
print(' b) 设U为取自Zp中数值的n元组集合,并设B=Zp,此处p为质数')
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
chapter11_1 = Chapter11_1()
chapter11_2 = Chapter11_2()
chapter11_3 = Chapter11_3()
chapter11_4 = Chapter11_4()
chapter11_5 = Chapter11_5()
def printchapter11note():
'''
print chapter11 note.
'''
print('Run main : single chapter eleven!')
chapter11_1.note()
chapter11_2.note()
chapter11_3.note()
chapter11_4.note()
chapter11_5.note()
# python src/chapter11/chapter11note.py
# python3 src/chapter11/chapter11note.py
if __name__ == '__main__':
printchapter11note()
else:
pass
|
python
|
from dataclasses import dataclass, field
from itertools import product as product_
from typing import (Callable, Dict, Iterable, Iterator, List, Optional, Tuple,
Union)
import pandas as pd
from ivory.common.context import np
from ivory.common.dataset import Dataset
from ivory.core.model import Model
from ivory.core.model import sequential as sequence_
from ivory.core.optimizer import Optimizer, get_optimizer
from ivory.core.variable import Data
EpochData = Union[Tuple[Data, ...], Dict[str, Tuple[Data, ...]]]
@dataclass(eq=False)
class Trainer:
model: Model
optimizer: Optimizer
metrics: List[str] = field(default_factory=list)
dataset: Optional[Dataset] = None
epoch_data: EpochData = field(default_factory=tuple)
max_grad: Optional[float] = None
def __post_init__(self):
self.optimizer.set_model(self.model)
def __repr__(self):
cls = self.__class__.__name__
inputs = [v.shape for v in self.model.data_input_variables]
r = f"{cls}(inputs={inputs}, optimizer='{self.optimizer.name}', "
return r + f"metrics={self.metrics})"
def init(self, **kwargs) -> "Trainer":
self.model.init(**kwargs)
return self
def build(self):
self.model.build()
self.optimizer.set_model(self.model)
def set_model(self, model: Model) -> "Trainer":
self.model = model
self.optimizer.set_model(self.model)
return self
def set_net(self, net: Iterable) -> "Trainer":
return self.set_model(sequence_(net))
def set_data(self, *data: Data) -> None:
self.model.set_data(*data)
def get_metrics(self, metrics: str) -> float:
if metrics == "loss":
return self.model.loss
elif metrics in ["acc", "accuracy"]:
return self.model.accuracy
elif metrics in ["ppl", "perplexity"]:
return np.exp(self.model.loss)
else:
raise ValueError(f"Unknown metrics: {metrics}.")
def evaluate(self, *data: Data) -> Tuple[float, ...]:
if data:
self.set_data(*data)
self.model.forward()
return tuple(self.get_metrics(metrics) for metrics in self.metrics)
def predict(self, *data: Data):
return self.model.predict(*data)
def fit(self, dataset: Dataset, epoch_data: EpochData = ()) -> "Trainer":
self.dataset = dataset
self.epoch_data = epoch_data
return self
def __iter__(self) -> Iterator[Tuple]:
if self.dataset is None:
raise StopIteration
epoch = -1
for data in self.dataset:
self.set_data(*data)
self.model.forward()
self.model.backward()
if self.max_grad is not None:
self.model.clip_grads(self.max_grad)
self.optimizer.update()
if not self.epoch_data:
yield (self.dataset.iteration,) + self.evaluate() # type:ignore
elif self.dataset.epoch != epoch:
self.model.set_train(False)
epoch = self.dataset.epoch
if isinstance(self.epoch_data, tuple):
yield (epoch,) + self.evaluate(*self.epoch_data) # type:ignore
else:
for key, value in self.epoch_data.items():
yield (epoch, key) + self.evaluate(*value) # type:ignore
self.model.set_train(True)
def to_frame(self, factory=list) -> pd.DataFrame:
columns = to_columns(self.epoch_data) + self.metrics
return pd.DataFrame(factory(iter(self)), columns=columns)
def to_columns(epoch_data: EpochData) -> List[str]:
if not epoch_data:
return ["iteration"]
elif isinstance(epoch_data, tuple):
return ["epoch"]
else:
return ["epoch", "data"]
def sequential(
net: Iterable,
optimizer: Union[str, Optimizer] = "sgd",
dataset: Optional[Dataset] = None,
metrics: Optional[List[str]] = None,
) -> Trainer:
if isinstance(optimizer, str):
optimizer = get_optimizer(optimizer)
if metrics is None:
metrics = ["loss"]
return Trainer(sequence_(net), optimizer, metrics, dataset=dataset)
@dataclass(eq=False)
class Product:
trainer_factory: Callable[..., Trainer]
iterables: Tuple
trainer: Optional[Trainer] = None
dataset: Optional[Dataset] = None
epoch_data: EpochData = ()
def __repr__(self):
cls = self.__class__.__name__
iterables = ", ".join(f"<{len(x)}>" for x in self.iterables)
r = f"{cls}(iterables=({iterables})"
if self.trainer:
r += f", trainer={self.trainer}"
return r + ")"
def fit(self, dataset: Dataset, epoch_data: EpochData = ()) -> "Product":
self.dataset = dataset
self.epoch_data = epoch_data
return self
def __iter__(self) -> Iterator[Tuple]:
if self.dataset is None:
raise StopIteration
for args in product_(*self.iterables):
self.trainer = self.trainer_factory(*args)
for result in self.trainer.fit(self.dataset, epoch_data=self.epoch_data):
yield args + result
def to_frame(self, columns: Optional[List[str]] = None, factory=list):
if columns is None:
columns = [f"var_{k+1}" for k in range(len(self.iterables))]
columns += to_columns(self.epoch_data)
columns += self.trainer.metrics # type:ignore
return pd.DataFrame(factory(iter(self)), columns=columns)
def product(trainer_factory: Callable[..., Trainer], *iterables) -> Product:
return Product(trainer_factory, iterables)
|
python
|
from __future__ import annotations
import pytest
import resist
class TestUser:
@pytest.fixture()
def client(self) -> resist.WebSocketClient:
return resist.WebSocketClient("REVOLT_TOKEN")
def test_attributes(self, client: resist.WebSocketClient) -> None:
user = resist.User(client, {"_id": "foo", "username": "bar"})
assert user.unique == "foo"
assert user.username == "bar"
assert hasattr(user, "fetch")
assert hasattr(user, "cache")
|
python
|
# -*- coding: utf-8 -*-
# @Time : 18/12/10 上午10:35
# @Author : Edward
# @Site :
# @File : DynamicPool.py
# @Software: PyCharm Community Edition
import time
from threading import Thread
from multiprocessing import Process
class Pool(object):
'''
阻塞动态线程池。
创建完线程池后,可以动态地往里面丢任务。不用预先构建完要执行的任务,可以边执行边添加。
'''
def __init__(self, size=10, check_interval=0.1, multiprocess=False):
self.workers = []
self.worker_size = size
# 监测时间间隔
self.check_interval = check_interval
self.worker_type = Process if multiprocess else Thread
# 添加一个新的任务
def add_task(self, func, args):
# 如果当前正在执行的线程数大于限制的数量则持续检测是否有已执行完成的线程
while True:
self.check_thread()
if len(self.workers) == self.worker_size:
time.sleep(self.check_interval)
else:
break
new_thread = self.worker_type(target=func, args=args)
self.workers.append(new_thread)
new_thread.start()
# 等待任务全部执行完成
def wait(self):
# 当workers不为空时就一直循环,直到变为空
while self.workers:
self.check_thread()
time.sleep(self.check_interval)
# 监测是否有已经执行完的线程,如果有就删除它
def check_thread(self):
for index, worker in enumerate(self.workers):
if worker.is_alive():
continue
else:
print('线程 [ %s ] 已结束' % self.workers[index].name)
del self.workers[index]
if __name__ == '__main__':
# pool = Pool(5)
pool = Pool(5, multiprocess=True)
pp = lambda x: time.sleep(2) or print(x)
for i in range(10):
pool.add_task(pp, [i])
pool.wait()
|
python
|
try:
import json
except ImportError:
import simplejson as json
from createsend import CreateSendBase, BadRequest
from utils import json_to_py
class Administrator(CreateSendBase):
"""Represents an administrator and associated functionality."""
def __init__(self, email_address=None):
self.email_address = email_address
super(Administrator, self).__init__()
def get(self, email_address):
"""Gets an administrator by email address."""
params = { "email": email_address }
response = self._get("/admins.json", params=params)
return json_to_py(response)
def add(self, email_address, name):
"""Adds an administrator to an account."""
body = {
"EmailAddress": email_address,
"Name": name}
response = self._post("/admins.json", json.dumps(body))
return json_to_py(response)
def update(self, new_email_address, name):
"""Updates the details for an administrator."""
params = { "email": self.email_address }
body = {
"EmailAddress": new_email_address,
"Name": name}
response = self._put("/admins.json",
body=json.dumps(body), params=params)
# Update self.email_address, so this object can continue to be used reliably
self.email_address = new_email_address
def delete(self):
"""Deletes the administrator from the account."""
params = { "email": self.email_address }
response = self._delete("/admins.json", params=params)
|
python
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import json
import random
import logging
from datetime import datetime
gm_logger = logging.getLogger('grounding_mapper')
gm_logger.setLevel(logging.WARNING)
sm_logger = logging.getLogger('sitemapper')
sm_logger.setLevel(logging.WARNING)
ps_logger = logging.getLogger('phosphosite')
ps_logger.setLevel(logging.WARNING)
pa_logger = logging.getLogger('preassembler')
pa_logger.setLevel(logging.WARNING)
from indra.db import util as db_util
from indra.db import client as db_client
from indra.db import preassembly_manager as pm
from indra.db.preassembly_manager import shash
from indra.statements import Statement
from indra.tools import assemble_corpus as ac
from nose.plugins.attrib import attr
from .util import needs_py3
from .test_db_client import _PrePaDatabaseTestSetup
from indra.statements import Phosphorylation, Agent, Evidence
from indra.db.util import NestedDict
from indra.db.util import reader_versions as rv_dict
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
MAX_NUM_STMTS = 11721
BATCH_SIZE = 2017
STMTS = None
# ==============================================================================
# Support clases and functions
# ==============================================================================
def make_raw_statement_set_for_distillation():
d = NestedDict()
stmts = []
target_sets = []
bettered_sids = set()
# Create a function which will update all possible outcome scenarios given a
# set of some_stmts.
def add_stmts_to_target_set(some_stmts):
# If we don't have any target sets of statements, initialize with the
# input statements.
if not target_sets:
for stmt in some_stmts:
target_sets.append(({stmt},
{stmts.index(s) for s in some_stmts
if s is not stmt}))
else:
# Make a copy and empty the current list.
old_target_sets = target_sets[:]
try: # Python 3
target_sets.clear()
except AttributeError: # Python 2
del target_sets[:]
# Now for every previous scenario, pick a random possible "good"
# statement, update the corresponding duplicate trace.
for stmt_set, dup_set in old_target_sets:
for stmt in some_stmts:
# Here we consider the possibility that each of the
# potential valid statements may be chosen, and record that
# possible alteration to the set of possible histories.
new_set = stmt_set.copy()
new_set.add(stmt)
new_dups = dup_set.copy()
new_dups |= {stmts.index(s) for s in some_stmts
if s is not stmt}
target_sets.append((new_set, new_dups))
return target_sets
# Create a function to handle the creation of the metadata.
def add_content(trid, src, tcid, reader, rv_idx, rid, a, b, ev_num, copies,
is_target=False):
# Add the new statements to the over-all list.
stmts.extend(__make_test_statements(a, b, reader, ev_num, copies))
# If we are making multiple copies, the latest copies should have the
# same overall hash. If it's not a copy, the hashes should be different.
if copies > 1:
# The above only applies if the evidence was specified to be the
# same, otherwise it assumed the evidence, and therefore the hash,
# is different.
if ev_num is not None:
assert stmts[-1].get_hash() == stmts[-2].get_hash()
else:
assert stmts[-1].get_hash() != stmts[-2].get_hash()
# Populate the provenance for the dict.
rv = rv_dict[reader][rv_idx]
r_dict = d[trid][src][tcid][reader][rv][rid]
# If the evidence variation was specified, the evidence in any copies is
# identical, and they will all have the same hash. Else, the hash is
# different and the statements need to be iterated over.
if ev_num is not None:
s_hash = stmts[-1].get_hash()
# Check to see if we have a matching statment yet.
if r_dict.get(s_hash) is None:
r_dict[s_hash] = set()
# Set the value
d[trid][src][tcid][reader][rv][rid][stmts[-1].get_hash()] |= \
{(stmts.index(s), s) for s in stmts[-copies:]}
else:
for s in stmts[-copies:]:
s_hash = s.get_hash()
if r_dict.get(s_hash) is None:
r_dict[s_hash] = set()
d[trid][src][tcid][reader][rv][rid][s_hash].add(
(stmts.index(s), s)
)
# If this/these statement/s is intended to be picked up, add it/them to
# the target sets.
if is_target:
global target_sets
target_sets = add_stmts_to_target_set(stmts[-copies:])
return
# We produced statements a coupld of times with and old reader version
# trid tcid reader vrsn idx distinct evidence id
# | source | reader | reading id | number of copies
# | | | | | | Agents | | Is it a target?
add_content(1, 'pubmed', 1, 'reach', 0, 1, 'A1', 'B1', 1, 2, False)
add_content(1, 'pubmed', 1, 'reach', 0, 1, 'A1', 'B1', 2, 1)
add_content(1, 'pubmed', 1, 'reach', 0, 1, 'A2', 'B2', 1, 1)
# Do it again for a new reader version.
add_content(1, 'pubmed', 1, 'reach', 1, 2, 'A1', 'B1', 1, 2)
add_content(1, 'pubmed', 1, 'reach', 1, 2, 'A1', 'B1', 2, 1)
# Add some for sparser.
add_content(1, 'pubmed', 1, 'sparser', 1, 3, 'A1', 'B1', 1, 2)
add_content(1, 'pubmed', 1, 'sparser', 1, 3, 'A2', 'B2', 1, 1)
# Now add statements from another source.
add_content(1, 'pmc_oa', 2, 'reach', 0, 4, 'A1', 'B1', 1, 2)
add_content(1, 'pmc_oa', 2, 'reach', 0, 4, 'A1', 'B1', 2, 1)
add_content(1, 'pmc_oa', 2, 'reach', 0, 4, 'A2', 'B2', 1, 1)
# All the statements up until now will be skipped, if all goes well.
bettered_sids |= set(range(len(stmts)))
# ...and again for a new reader version.
add_content(1, 'pmc_oa', 2, 'reach', 1, 4, 'A1', 'B1', 1, 2, True)
add_content(1, 'pmc_oa', 2, 'reach', 1, 4, 'A1', 'B1', 2, 1, True)
add_content(1, 'pmc_oa', 2, 'reach', 1, 4, 'A2', 'B2', 1, 1, True)
add_content(1, 'pmc_oa', 2, 'reach', 1, 4, 'A3', 'B3', 1, 1, True)
# Add some results from sparser
add_content(1, 'pmc_oa', 2, 'sparser', 1, 5, 'A1', 'B1', 1, 2, True)
add_content(1, 'pmc_oa', 2, 'sparser', 1, 5, 'A2', 'B2', 1, 1, True)
# Add some content for another text ref.
add_content(2, 'pmc_oa', 3, 'sparser', 1, 6, 'A3', 'B3', 1, 1, True)
add_content(2, 'manuscripts', 4, 'sparser', 1, 7, 'A3', 'B3', 1, 1)
# This last statement should also be skipped, if all goes well.
bettered_sids.add(len(stmts) - 1)
return d, stmts, target_sets, bettered_sids
def __make_test_statements(a, b, source_api, ev_num=None, copies=1):
stmts = []
A = Agent(a)
B = Agent(b)
for i in range(copies):
if ev_num is None:
ev_num = i
ev_text = "Evidence %d for %s phosphorylates %s." % (ev_num, a, b)
ev_list = [Evidence(text=ev_text, source_api=source_api)]
stmts.append(Phosphorylation(Agent(A), Agent(B), evidence=ev_list))
return stmts
class _DatabaseTestSetup(_PrePaDatabaseTestSetup):
"""This object is used to setup the test database into various configs."""
def add_statements(self, fraction=1, pam=None):
"""Add statements and agents to the database.
Parameters
----------
fraction : float between 0 and 1
Default is 1. The fraction of remaining statements to be added.
with_pa : bool
Default False. Choose to run pre-assembly/incremental-preassembly
on the added statements.
"""
available_tuples = self.get_available_stmt_tuples()
if fraction is not 1:
num_stmts = int(fraction*len(available_tuples))
input_tuples = random.sample(available_tuples, num_stmts)
else:
input_tuples = available_tuples
self.insert_the_statements(input_tuples)
if pam:
print("Preassembling new statements...")
if self.used_stmt_tuples:
pam.supplement_corpus(self.test_db)
else:
pam.create_corpus(self.test_db)
self.used_stmt_tuples |= set(input_tuples)
return
def _get_loaded_db(num_stmts, split=None, pam=None):
print("Creating and filling a test database:")
dts = _DatabaseTestSetup(num_stmts)
dts.load_background()
if split is None:
dts.add_statements(pam=pam)
else:
dts.add_statements(split, pam=pam)
dts.add_statements()
return dts.test_db
def _str_large_set(s, max_num):
if len(s) > max_num:
values = list(s)[:max_num]
ret_str = '{' + ', '.join([str(v) for v in values]) + ' ...}'
ret_str += ' [length: %d]' % len(s)
else:
ret_str = str(s)
return ret_str
def _do_old_fashioned_preassembly(stmts):
grounded_stmts = ac.map_grounding(stmts)
ms_stmts = ac.map_sequence(grounded_stmts)
opa_stmts = ac.run_preassembly(ms_stmts, return_toplevel=False)
return opa_stmts
def _get_opa_input_stmts(db):
stmt_nd = db_util._get_reading_statement_dict(db, get_full_stmts=True)
reading_stmts, _, _ =\
db_util._get_filtered_rdg_statements(stmt_nd, get_full_stmts=True,
ignore_duplicates=True)
db_stmts = db_client.get_statements([db.RawStatements.reading_id == None],
preassembled=False, db=db)
stmts = reading_stmts | set(db_stmts)
print("Got %d statements for opa." % len(stmts))
return stmts
def _check_against_opa_stmts(db, raw_stmts, pa_stmts):
def _compare_list_elements(label, list_func, comp_func, **stmts):
(stmt_1_name, stmt_1), (stmt_2_name, stmt_2) = list(stmts.items())
vals_1 = [comp_func(elem) for elem in list_func(stmt_1)]
vals_2 = []
for element in list_func(stmt_2):
val = comp_func(element)
if val in vals_1:
vals_1.remove(val)
else:
vals_2.append(val)
if len(vals_1) or len(vals_2):
print("Found mismatched %s for hash %s:\n\t%s=%s\n\t%s=%s"
% (label, shash(stmt_1), stmt_1_name, vals_1, stmt_2_name,
vals_2))
return {'diffs': {stmt_1_name: vals_1, stmt_2_name: vals_2},
'stmts': {stmt_1_name: stmt_1, stmt_2_name: stmt_2}}
return None
opa_stmts = _do_old_fashioned_preassembly(raw_stmts)
old_stmt_dict = {shash(s): s for s in opa_stmts}
new_stmt_dict = {shash(s): s for s in pa_stmts}
new_hash_set = set(new_stmt_dict.keys())
old_hash_set = set(old_stmt_dict.keys())
hash_diffs = {'extra_new': [new_stmt_dict[h]
for h in new_hash_set - old_hash_set],
'extra_old': [old_stmt_dict[h]
for h in old_hash_set - new_hash_set]}
if hash_diffs['extra_new']:
elaborate_on_hash_diffs(db, 'new', hash_diffs['extra_new'],
old_stmt_dict.keys())
if hash_diffs['extra_old']:
elaborate_on_hash_diffs(db, 'old', hash_diffs['extra_old'],
new_stmt_dict.keys())
print(hash_diffs)
tests = [{'funcs': {'list': lambda s: s.evidence[:],
'comp': lambda ev: ev.matches_key()},
'label': 'evidence text',
'results': []},
{'funcs': {'list': lambda s: s.supports[:],
'comp': lambda s: shash(s)},
'label': 'supports matches keys',
'results': []},
{'funcs': {'list': lambda s: s.supported_by[:],
'comp': lambda s: shash(s)},
'label': 'supported-by matches keys',
'results': []}]
comp_hashes = new_hash_set & old_hash_set
for mk_hash in comp_hashes:
for test_dict in tests:
res = _compare_list_elements(test_dict['label'],
test_dict['funcs']['list'],
test_dict['funcs']['comp'],
new_stmt=new_stmt_dict[mk_hash],
old_stmt=old_stmt_dict[mk_hash])
if res is not None:
test_dict['results'].append(res)
def all_tests_passed():
test_results = [not any(hash_diffs.values())]
for td in tests:
test_results.append(len(td['results']) == 0)
print("%d/%d tests passed." % (sum(test_results), len(test_results)))
return all(test_results)
def write_report(num_comps):
ret_str = "Some tests failed:\n"
ret_str += ('Found %d/%d extra old stmts and %d/%d extra new stmts.\n'
% (len(hash_diffs['extra_old']), len(old_hash_set),
len(hash_diffs['extra_new']), len(new_hash_set)))
for td in tests:
ret_str += ('Found %d/%d mismatches in %s.\n'
% (len(td['results']), num_comps, td['label']))
return ret_str
# Now evaluate the results for exceptions
assert all_tests_passed(), write_report(len(comp_hashes))
def str_imp(o, uuid=None, other_stmt_keys=None):
if o is None:
return '~'
cname = o.__class__.__name__
if cname == 'TextRef':
return ('<TextRef: trid: %s, pmid: %s, pmcid: %s>'
% (o.id, o.pmid, o.pmcid))
if cname == 'TextContent':
return ('<TextContent: tcid: %s, trid: %s, src: %s>'
% (o.id, o.text_ref_id, o.source))
if cname == 'Reading':
return ('<Reading: rid: %s, tcid: %s, reader: %s, rv: %s>'
% (o.id, o.text_content_id, o.reader, o.reader_version))
if cname == 'RawStatements':
s = Statement._from_json(json.loads(o.json.decode()))
s_str = ('<RawStmt: %s sid: %s, uuid: %s, type: %s, iv: %s, hash: %s>'
% (str(s), o.id, o.uuid[:8] + '...', o.type,
o.indra_version[:14] + '...', o.mk_hash))
if other_stmt_keys and shash(s) in other_stmt_keys:
s_str = '+' + s_str
if s.uuid == uuid:
s_str = '*' + s_str
return s_str
def elaborate_on_hash_diffs(db, lbl, stmt_list, other_stmt_keys):
print("#"*100)
print("Elaboration on extra %s statements:" % lbl)
print("#"*100)
for s in stmt_list:
print(s)
uuid = s.uuid
print('-'*100)
print('uuid: %s\nhash: %s\nshallow hash: %s'
% (s.uuid, s.get_hash(), shash(s)))
print('-'*100)
db_pas = db.select_one(db.PAStatements,
db.PAStatements.mk_hash == shash(s))
print('\tPA statement:', db_pas.__dict__ if db_pas else '~')
print('-'*100)
db_s = db.select_one(db.RawStatements, db.RawStatements.uuid == s.uuid)
print('\tRaw statement:', str_imp(db_s, uuid, other_stmt_keys))
if db_s is None:
continue
print('-'*100)
if db_s.reading_id is None:
print("Statement was from a database: %s" % db_s.db_info_id)
continue
db_r = db.select_one(db.Reading, db.Reading.id == db_s.reading_id)
print('\tReading:', str_imp(db_r))
tc = db.select_one(db.TextContent,
db.TextContent.id == db_r.text_content_id)
print('\tText Content:', str_imp(tc))
tr = db.select_one(db.TextRef, db.TextRef.id == tc.text_ref_id)
print('\tText ref:', str_imp(tr))
print('-'*100)
for tc in db.select_all(db.TextContent,
db.TextContent.text_ref_id == tr.id):
print('\t', str_imp(tc))
for r in db.select_all(db.Reading,
db.Reading.text_content_id == tc.id):
print('\t\t', str_imp(r))
for s in db.select_all(db.RawStatements,
db.RawStatements.reading_id == r.id):
print('\t\t\t', str_imp(s, uuid, other_stmt_keys))
print('='*100)
# ==============================================================================
# Generic test definitions
# ==============================================================================
@needs_py3
def _check_statement_distillation(num_stmts):
db = _get_loaded_db(num_stmts)
assert db is not None, "Test was broken. Got None instead of db insance."
stmts = db_util.distill_stmts(db, get_full_stmts=True)
assert len(stmts), "Got zero statements."
assert isinstance(list(stmts)[0], Statement), type(list(stmts)[0])
stmt_ids = db_util.distill_stmts(db)
assert len(stmts) == len(stmt_ids), \
"stmts: %d, stmt_ids: %d" % (len(stmts), len(stmt_ids))
assert isinstance(list(stmt_ids)[0], int), type(list(stmt_ids)[0])
stmts_p = db_util.distill_stmts(db, num_procs=2)
assert len(stmts_p) == len(stmt_ids)
stmt_ids_p = db_util.distill_stmts(db, num_procs=2)
assert stmt_ids_p == stmt_ids
@needs_py3
def _check_preassembly_with_database(num_stmts, batch_size, n_proc=1):
db = _get_loaded_db(num_stmts)
# Now test the set of preassembled (pa) statements from the database against
# what we get from old-fashioned preassembly (opa).
opa_inp_stmts = _get_opa_input_stmts(db)
# Get the set of raw statements.
raw_stmt_list = db.select_all(db.RawStatements)
all_raw_ids = {raw_stmt.id for raw_stmt in raw_stmt_list}
assert len(raw_stmt_list)
# Run the preassembly initialization.
start = datetime.now()
pa_manager = pm.PreassemblyManager(batch_size=batch_size, n_proc=n_proc,
print_logs=True)
pa_manager.create_corpus(db)
end = datetime.now()
print("Duration:", end-start)
# Make sure the number of pa statements is within reasonable bounds.
pa_stmt_list = db.select_all(db.PAStatements)
assert 0 < len(pa_stmt_list) < len(raw_stmt_list)
# Check the evidence links.
raw_unique_link_list = db.select_all(db.RawUniqueLinks)
assert len(raw_unique_link_list)
all_link_ids = {ru.raw_stmt_id for ru in raw_unique_link_list}
all_link_mk_hashes = {ru.pa_stmt_mk_hash for ru in raw_unique_link_list}
assert len(all_link_ids - all_raw_ids) is 0
assert all([pa_stmt.mk_hash in all_link_mk_hashes
for pa_stmt in pa_stmt_list])
# Check the support links.
sup_links = db.select_all([db.PASupportLinks.supporting_mk_hash,
db.PASupportLinks.supported_mk_hash])
assert sup_links
assert not any([l[0] == l[1] for l in sup_links]),\
"Found self-support in the database."
# Try to get all the preassembled statements from the table.
pa_stmts = db_client.get_statements([], preassembled=True, db=db,
with_support=True)
assert len(pa_stmts) == len(pa_stmt_list), (len(pa_stmts),
len(pa_stmt_list))
self_supports = {
shash(s): shash(s) in {shash(s_) for s_ in s.supported_by + s.supports}
for s in pa_stmts
}
if any(self_supports.values()):
assert False, "Found self-support in constructed pa statement objects."
_check_against_opa_stmts(db, opa_inp_stmts, pa_stmts)
return
@needs_py3
def _check_db_pa_supplement(num_stmts, batch_size, split=0.8, n_proc=1):
pa_manager = pm.PreassemblyManager(batch_size=batch_size, n_proc=n_proc,
print_logs=True)
db = _get_loaded_db(num_stmts, split=split, pam=pa_manager)
opa_inp_stmts = _get_opa_input_stmts(db)
start = datetime.now()
print("Beginning supplement...")
pa_manager.supplement_corpus(db)
end = datetime.now()
print("Duration of incremental update:", end-start)
pa_stmts = db_client.get_statements([], preassembled=True, db=db,
with_support=True)
_check_against_opa_stmts(db, opa_inp_stmts, pa_stmts)
return
# ==============================================================================
# Specific Tests
# ==============================================================================
def test_distillation_on_curated_set():
stmt_dict, stmt_list, target_sets, target_bettered_ids = \
make_raw_statement_set_for_distillation()
filtered_set, duplicate_ids, bettered_ids = \
db_util._get_filtered_rdg_statements(stmt_dict, get_full_stmts=True)
for stmt_set, dup_set in target_sets:
if stmt_set == filtered_set:
break
else:
assert False, "Filtered set does not match any valid possibilities."
assert bettered_ids == target_bettered_ids
assert dup_set == duplicate_ids, (dup_set - duplicate_ids,
duplicate_ids - dup_set)
stmt_dict, stmt_list, target_sets, target_bettered_ids = \
make_raw_statement_set_for_distillation()
filtered_id_set, duplicate_ids, bettered_ids = \
db_util._get_filtered_rdg_statements(stmt_dict, get_full_stmts=False)
assert len(filtered_id_set) == len(filtered_set), \
(len(filtered_set), len(filtered_id_set))
@attr('nonpublic')
def test_statement_distillation_small():
_check_statement_distillation(1000)
@attr('nonpublic', 'slow')
def test_statement_distillation_large():
_check_statement_distillation(11721)
@attr('nonpublic', 'slow')
def test_statement_distillation_extra_large():
_check_statement_distillation(1001721)
@attr('nonpublic')
def test_db_preassembly_small():
_check_preassembly_with_database(200, 40)
@attr('nonpublic', 'slow')
def test_db_preassembly_large():
_check_preassembly_with_database(11721, 2017)
@attr('nonpublic', 'slow')
def test_db_preassembly_extra_large():
_check_preassembly_with_database(101721, 20017)
@attr('nonpublic', 'slow')
def test_db_preassembly_supremely_large():
_check_preassembly_with_database(1001721, 200017)
@attr('nonpublic')
def test_db_incremental_preassembly_small():
_check_db_pa_supplement(200, 40)
@attr('nonpublic', 'slow')
def test_db_incremental_preassembly_large():
_check_db_pa_supplement(11721, 2017)
@attr('nonpublic', 'slow')
def test_db_incremental_preassembly_very_large():
_check_db_pa_supplement(100000, 20000, n_proc=2)
@attr('nonpublic', 'slow')
def test_db_incremental_preassembly_1M():
_check_db_pa_supplement(1000000, 200000, n_proc=6)
|
python
|
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from toontown.minigame import ToonBlitzGlobals
from toontown.minigame import TwoDSection
from toontown.minigame import TwoDSpawnPointMgr
from toontown.minigame import TwoDBlock
from direct.gui import DirectGui
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
class TwoDSectionMgr(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDSectionMgr')
def __init__(self, game, sectionsSelected):
self.game = game
self.sectionsPool = []
self.sectionsSelected = []
self.sections = []
self.sectionNPList = []
self.activeSection = 0
self.setupStartSection()
self.setupSections(sectionsSelected)
self.setupEndSection(len(sectionsSelected))
def destroy(self):
while len(self.sections):
section = self.sections[0]
section.destroy()
self.sections.remove(section)
self.sections = []
self.sectionsPool = []
self.sectionsSelected = []
self.sectionNPList = []
self.startWall.removeNode()
del self.startWall
self.startPipe.removeNode()
del self.startPipe
self.startArrow.removeNode()
del self.startArrow
self.endArrow.removeNode()
del self.endArrow
self.game = None
self.activeSection = 0
return
def setupStartSection(self):
self.startSectionNP = NodePath('StartSection')
self.startSectionNP.reparentTo(self.game.assetMgr.world)
self.startSectionNP.setX(-48)
self.startWall = self.game.assetMgr.startingWall.copyTo(self.startSectionNP)
self.startWall.setPos(-28, 0, 4)
self.startWall.setScale(0.8)
self.startPipe = self.game.assetMgr.startingPipe.copyTo(self.startSectionNP)
self.startPipe.setPos(12, 0, 44)
self.startArrow = self.game.assetMgr.arrow.copyTo(self.startSectionNP)
self.startArrow.setPos(23, 1.5, 12.76)
for index in xrange(len(ToonBlitzGlobals.BlockListStart)):
blockAttribs = ToonBlitzGlobals.BlockListStart[index]
fileName = ToonBlitzGlobals.BlockTypes[blockAttribs[0]][0]
blockIndex = int(fileName[-1])
blockType = self.game.assetMgr.blockTypes[blockIndex]
sectionizedId = 'start-' + str(index)
newBlock = TwoDBlock.TwoDBlock(blockType, sectionizedId, blockAttribs)
newBlock.model.reparentTo(self.startSectionNP)
def setupEndSection(self, index):
aspectSF = 0.7227
self.endSectionNP = NodePath('EndSection')
self.endSectionNP.reparentTo(self.game.assetMgr.world)
self.endSectionNP.setX(self.incrementX)
self.endWall = self.game.assetMgr.startingWall.copyTo(self.endSectionNP)
self.endWall.setPos(100, 0, 4)
self.endWall.setScale(0.8)
self.endArrow = self.game.assetMgr.arrow.copyTo(self.endSectionNP)
self.endArrow.setPos(6, 1.5, 12.76)
self.exitElevator = self.game.assetMgr.exitElevator.copyTo(self.endSectionNP)
self.exitElevator.setPos(52, -2, 12.7)
cogSignModel = loader.loadModel('phase_4/models/props/sign_sellBotHeadHQ')
cogSign = cogSignModel.find('**/sign_sellBotHeadHQ')
cogSignSF = 23
elevatorSignSF = 15
sideDoor = self.exitElevator.find('**/doorway2')
sdSign = cogSign.copyTo(sideDoor)
sdSign.setPosHprScale(0, 1.9, 15, 0, 0, 0, elevatorSignSF, elevatorSignSF, elevatorSignSF * aspectSF)
sdSign.node().setEffect(DecalEffect.make())
sdText = DirectGui.OnscreenText(text=TTLocalizer.TwoDGameElevatorExit, font=ToontownGlobals.getSuitFont(), pos=(0, -0.34), scale=0.15, mayChange=False, parent=sdSign)
sdText.setDepthWrite(0)
self.sectionNPList.append(self.endSectionNP)
endSectionInfo = ('end',
[],
[],
[0],
[])
endSection = TwoDSection.TwoDSection(index, endSectionInfo, self.endSectionNP, self)
self.sections.append(endSection)
self.incrementX += endSection.length
def setupSections(self, sectionsSelected):
self.incrementX = -24
for index in xrange(0, len(sectionsSelected)):
sectionNP = NodePath('Section' + str(index))
sectionNP.reparentTo(self.game.assetMgr.world)
sectionNP.setX(self.incrementX)
self.sectionNPList.append(sectionNP)
section = TwoDSection.TwoDSection(index, sectionsSelected[index], sectionNP, self)
self.sections.append(section)
self.incrementX += section.length
def enterPlay(self, elapsedTime):
for section in self.sections:
section.enterPlay(elapsedTime)
def exitPlay(self):
pass
def enterPause(self):
for section in self.sections:
section.enterPause()
def exitPause(self):
for section in self.sections:
section.exitPause()
def updateActiveSection(self, sectionIndex):
if self.activeSection != sectionIndex:
self.activeSection = sectionIndex
self.notify.debug('Toon is in section %s.' % sectionIndex)
def getLastSpawnPoint(self):
relativePoint = Point3(self.sections[self.activeSection].spawnPointMgr.getSpawnPoint())
relativePoint.setX(relativePoint.getX() + self.sectionNPList[self.activeSection].getX())
return relativePoint
|
python
|
import numpy as np
"""
Solve the standard linear programming problem:
maximize c^{T}x
s. t. Ax <= b and x >= 0
by the simplex method.
Remind, enter A, b as usually, but enter -c into the varible c.
For example, try to minimize
x_{1} + x_{2}
s. t. ([1, 2], [3, 4])x <= (5, 6) and x >= 0
then
A = np.array([
[1, 2],
[3, 4],
])
b = np.array([5, 6])
c = np.array([-1, -1])
"""
def simplex(A, b, c, r, t, v):
"""
The Simplex Method with Regard to a Standard Problem
- Initialize the Optimal Value v as 0
- Determine tehe Current Case
- Pivot until It Reaches the Base Case
- Conclude the Final Results
"""
M = np.shape(A)[0]
N = np.shape(A)[1]
"""Determine Cases"""
case = case_determination(b, c)
if case == 1: # Base Case
solution(M, N, b, c, r, t, v)
return
else: # Recursive Case
h, k = pivot_index(A, b, c, M, N, case)
if h < 0 or k < 0:
# Protection
# The problem can be infeasible or unbounded.
return
else:
A, b, c, r, t, v = pivot(A, b, c, r, t, M, N, v, h, k)
return simplex(A, b, c, r, t, v)
def case_determination(b, c):
"""Determine the Current Case in Terms of b and c."""
# Case 1
if b.min() >= 0 and c.min() >= 0:
return 1
# Case 2
if b.min() >= 0 and c.min() < 0:
return 2
# Case 3
if b.min() < 0:
return 3
def pivot_index(A, b, c, M, N, case):
"""Find the pivot A[h, k] in cases 2 or 3."""
"""Case 2"""
if case == 2:
"""Find the Column Index k"""
for j in range(N):
if c[j] < 0:
k = j
break
"""Find the Row Index h"""
# Compute the values of b[i] / A[i, k]
h_value = []
h_index = []
for i in range(M):
if A[i, k] > 0:
h_value.append(b[i] / A[i, k])
h_index.append(i)
if h_value:
h = h_index[h_value.index(min(h_value))]
else:
# Protection
print("The standard problem is unbounded feasible.")
return -1, -1
return h, k
"""Case 3"""
if case == 3:
"""Find the First Negative Row in b"""
for i in range(M):
if b[i] < 0:
f = i
break
"""Find the Column Index k"""
k = -1
for j in range(N):
if A[f, j] < 0:
k = j
break
# Protection
if k == -1:
print("The standard problem is infeasible.")
return -1, -1
"""Find the Row Index h"""
# Compute the values of b[i] / A[i, k]
f_value = b[f] / A[f, k]
h_value = []
h_index = []
for i in range(N):
if b[i] >= 0 and A[i, k] > 0:
h_value.append(b[i] / A[i, k])
h_index.append(i)
if not h_value:
h = f
else:
if f_value < min(h_value):
h = f
else:
h = h_index[h_value.index(min(h_value))]
return h, k
def pivot(A, b, c, r, t, M, N, v, h, k):
"""Pivot the Simplex Tableau"""
"""Combine Matrices"""
S = np.empty((M + 1, N + 1))
S[:M, :N] = A
S[:M, N] = b
S[M, :N] = c
S[M, N] = v
"""Pivot Operation"""
S_hat = np.empty((M + 1, N + 1))
p = S[h, k]
for i in range(M + 1):
for j in range(N + 1):
if i == h and j == k:
S_hat[i, j] = 1 / p
if i == h and j != k:
S_hat[i, j] = S[h, j] / p
if i != h and j == k:
S_hat[i, j] = -S[i, k] / p
if i != h and j != k:
S_hat[i, j] = S[i, j] - S[h, j] * S[i, k] / p
"""Decompose Matrix"""
A = S_hat[:M, :N]
b = S_hat[:M, N]
c = S_hat[M, :N]
v = S_hat[M, N]
"""Pivot r and t"""
swap = np.empty((1, 2))
swap[0, :] = r[k]
r[k] = t[h]
t[h] = swap
return A, b, c, r, t, v
def solution(M, N, b, c, r, t, v):
"""Process r, t, b, and c to find the solution(optimal vector/value)."""
"""The Standard Problem"""
print("Optimal Vector x for the Primal:")
for i in range(M):
if t[i, 0] == 0:
print("x_" + str(int(t[i, 1])) + " = " + str(b[i]))
for j in range(N):
if r[j, 0] == 0:
print("x_" + str(int(r[j, 1])) + " = 0")
"""The Dual Problem"""
print("Optimal Vector y for the Dual:")
for i in range(M):
if t[i, 0] == 1:
print("y_" + str(int(t[i, 1])) + " = 0")
for j in range(N):
if r[j, 0] == 1:
print("y_" + str(int(r[j, 1])) + " = " + str(c[j]))
"""Value"""
print("The Optimal Value: " + str(v))
return
def initialization(A):
"""Initialize r, t, and v"""
M = np.shape(A)[0]
N = np.shape(A)[1]
"""
r & t
- The First Column: 0 denotes x; 1 denotes y.
- The Second Column: Denotes the subscript of x or y.
"""
r = np.empty((N, 2))
r[:, 0] = 0
r[:, 1] = range(1, N + 1)
t = np.empty((M, 2))
t[:, 0] = 1
t[:, 1] = range(1, M + 1)
v = 0
return r, t, v
"""Test Samples"""
if __name__ == '__main__':
# Sample 1
A = np.array([
[2, 1, -7],
[-1, 0, 4],
[1, 2, -6],
])
b = np.array([3, -1, 2])
c = np.array([1, -2, -1])
# Sample 2
A = np.array([
[1, -1, -2, -1],
[2, 0, 1, -4],
[-2, 1, 0, 1],
])
b = np.array([4, 2, 1])
c = np.array([-1, 2, 3, 1])
# Sample 3
A = np.array([
[-3, 3, 1],
[2, -1, -2],
[-1, 0, 1],
])
b = np.array([3, 1, 1])
c = np.array([1, 1, -2])
# Implementation
r, t, v = initialization(A)
simplex(A, b, c, r, t, v)
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2018 Whitestack, LLC
# *************************************************************
# This file is part of OSM Monitoring module
# All Rights Reserved to Whitestack, LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# For those usages not covered by the Apache License, Version 2.0 please
# contact: [email protected] or [email protected]
##
import logging
import time
import socket
import peewee
from osm_mon.dashboarder.service import DashboarderService
from osm_mon.core.config import Config
log = logging.getLogger(__name__)
class Dashboarder:
def __init__(self, config: Config):
self.conf = config
self.service = DashboarderService(config)
def dashboard_forever(self):
log.debug('dashboard_forever')
while True:
try:
socket.gethostbyname("grafana")
log.debug("Dashboard backend is running")
except socket.error:
log.debug("Dashboard backend is not available")
time.sleep(int(self.conf.get('dashboarder', 'interval')))
continue
try:
self.create_dashboards()
time.sleep(int(self.conf.get('dashboarder', 'interval')))
except peewee.PeeweeException:
log.exception("Database error consuming message: ")
raise
except Exception:
log.exception("Error creating dashboards")
def create_dashboards(self):
self.service.create_dashboards()
log.debug('I just called the dashboarder service!')
|
python
|
from dataclasses import dataclass
from .flipper_output import FlipperOutput
@dataclass
class Paddle:
x: int = 0
y: int = 0
direction: int = 0
def update(self, output: FlipperOutput):
if self.x > output.x:
self.direction = -1
elif self.x < output.x:
self.direction = 1
else:
self.direction = 0
self.x = output.x
self.y = output.y
|
python
|
"""
Zendesk Proxy Configuration
"""
from django.apps import AppConfig
from edx_django_utils.plugins import PluginURLs, PluginSettings
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
class ZendeskProxyConfig(AppConfig):
"""
AppConfig for zendesk proxy app
"""
name = 'openedx.core.djangoapps.zendesk_proxy'
plugin_app = {
PluginURLs.CONFIG: {
ProjectType.CMS: {
PluginURLs.NAMESPACE: '',
PluginURLs.REGEX: r'^zendesk_proxy/',
PluginURLs.RELATIVE_PATH: 'urls',
},
ProjectType.LMS: {
PluginURLs.NAMESPACE: '',
PluginURLs.REGEX: r'^zendesk_proxy/',
PluginURLs.RELATIVE_PATH: 'urls',
}
},
PluginSettings.CONFIG: {
ProjectType.CMS: {
SettingsType.COMMON: {PluginSettings.RELATIVE_PATH: 'settings.common'},
SettingsType.PRODUCTION: {PluginSettings.RELATIVE_PATH: 'settings.production'},
},
ProjectType.LMS: {
SettingsType.COMMON: {PluginSettings.RELATIVE_PATH: 'settings.common'},
SettingsType.PRODUCTION: {PluginSettings.RELATIVE_PATH: 'settings.production'},
}
}
}
|
python
|
# Copyright 2014 Emmanuele Bassi
#
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import re
NUMBER_REGEX = re.compile(r'([0-9])([a-z])')
def to_camel_case(text):
# We only care about Graphene types
if not text.startswith('graphene_') and not text.endswith('_t'):
return text
res = []
for token in text[:-2].split('_'):
uc_token = token.title()
# We need to do this for types like graphene_point3d_t, which
# need to be transformed into GraphenePoint3D, not GraphenePoint3d
matches = NUMBER_REGEX.match(uc_token)
if matches and matches.group(2):
uc_token = ''.join([matches.group(1), matches.group(2).title])
res.append(uc_token)
return ''.join(res)
if __name__ == '__main__':
in_text = sys.stdin.read()
sys.stdout.write(to_camel_case(in_text))
|
python
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for transitfeed/util.py
try:
import mock
except ImportError:
import unittest.mock as mock
import datetime
import re
from six import StringIO
import tests.util as test_util
from transitfeed import problems
from transitfeed.problems import ProblemReporter
from transitfeed import stop
from transitfeed import util
from transitfeed import version
import unittest
try:
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
except ImportError:
from urllib2 import urlopen, Request, HTTPError, URLError
class ColorLuminanceTestCase(test_util.TestCase):
def runTest(self):
self.assertEqual(util.ColorLuminance('000000'), 0,
"ColorLuminance('000000') should be zero")
self.assertEqual(util.ColorLuminance('FFFFFF'), 255,
"ColorLuminance('FFFFFF') should be 255")
RGBmsg = ("ColorLuminance('RRGGBB') should be "
"0.299*<Red> + 0.587*<Green> + 0.114*<Blue>")
decimal_places_tested = 8
self.assertAlmostEqual(util.ColorLuminance('640000'), 29.9,
decimal_places_tested, RGBmsg)
self.assertAlmostEqual(util.ColorLuminance('006400'), 58.7,
decimal_places_tested, RGBmsg)
self.assertAlmostEqual(util.ColorLuminance('000064'), 11.4,
decimal_places_tested, RGBmsg)
self.assertAlmostEqual(util.ColorLuminance('1171B3'),
0.299*17 + 0.587*113 + 0.114*179,
decimal_places_tested, RGBmsg)
class FindUniqueIdTestCase(test_util.TestCase):
def test_simple(self):
d = {}
for i in range(0, 5):
d[util.FindUniqueId(d)] = 1
k = list(d.keys())
k.sort()
self.assertEqual(('0', '1', '2', '3', '4'), tuple(k))
def test_AvoidCollision(self):
d = {'1': 1}
d[util.FindUniqueId(d)] = 1
self.assertEqual(2, len(d))
self.assertFalse('3' in d, "Ops, next statement should add something to d")
d['3'] = None
d[util.FindUniqueId(d)] = 1
self.assertEqual(4, len(d))
class ApproximateDistanceBetweenStopsTestCase(test_util.TestCase):
def testEquator(self):
stop1 = stop.Stop(lat=0, lng=100, name='Stop one', stop_id='1')
stop2 = stop.Stop(lat=0.01, lng=100.01, name='Stop two', stop_id='2')
self.assertAlmostEqual(
util.ApproximateDistanceBetweenStops(stop1, stop2),
1570, -1) # Compare first 3 digits
def testWhati(self):
stop1 = stop.Stop(lat=63.1, lng=-117.2, name='whati one', stop_id='1')
stop2 = stop.Stop(lat=63.102, lng=-117.201, name='whati two', stop_id='2')
self.assertAlmostEqual(
util.ApproximateDistanceBetweenStops(stop1, stop2),
228, 0)
class TimeConversionHelpersTestCase(test_util.TestCase):
def testTimeToSecondsSinceMidnight(self):
self.assertEqual(util.TimeToSecondsSinceMidnight("01:02:03"), 3723)
self.assertEqual(util.TimeToSecondsSinceMidnight("00:00:00"), 0)
self.assertEqual(util.TimeToSecondsSinceMidnight("25:24:23"), 91463)
try:
util.TimeToSecondsSinceMidnight("10:15:00am")
except problems.Error:
pass # expected
else:
self.fail("Should have thrown Error")
def testFormatSecondsSinceMidnight(self):
self.assertEqual(util.FormatSecondsSinceMidnight(3723), "01:02:03")
self.assertEqual(util.FormatSecondsSinceMidnight(0), "00:00:00")
self.assertEqual(util.FormatSecondsSinceMidnight(91463), "25:24:23")
def testDateStringToDateObject(self):
self.assertEqual(util.DateStringToDateObject("20080901"),
datetime.date(2008, 9, 1))
self.assertEqual(util.DateStringToDateObject("20080841"), None)
class ValidationUtilsTestCase(test_util.TestCase):
def testIsValidURL(self):
self.assertTrue(util.IsValidURL("http://www.example.com"))
self.assertFalse(util.IsValidURL("ftp://www.example.com"))
self.assertFalse(util.IsValidURL(""))
def testValidateURL(self):
accumulator = test_util.RecordingProblemAccumulator(self)
problems = ProblemReporter(accumulator)
self.assertTrue(util.ValidateURL("", "col", problems))
accumulator.AssertNoMoreExceptions()
self.assertTrue(util.ValidateURL("http://www.example.com", "col",
problems))
accumulator.AssertNoMoreExceptions()
self.assertFalse(util.ValidateURL("ftp://www.example.com", "col",
problems))
e = accumulator.PopInvalidValue("col")
accumulator.AssertNoMoreExceptions()
def testIsValidHexColor(self):
self.assertTrue(util.IsValidHexColor("33FF00"))
self.assertFalse(util.IsValidHexColor("blue"))
self.assertFalse(util.IsValidHexColor(""))
def testIsValidLanguageCode(self):
self.assertTrue(util.IsValidLanguageCode("de"))
self.assertFalse(util.IsValidLanguageCode("Swiss German"))
self.assertFalse(util.IsValidLanguageCode(""))
def testValidateLanguageCode(self):
accumulator =test_util. RecordingProblemAccumulator(self)
problems = ProblemReporter(accumulator)
self.assertTrue(util.ValidateLanguageCode("", "col", problems))
accumulator.AssertNoMoreExceptions()
self.assertTrue(util.ValidateLanguageCode("de", "col", problems))
accumulator.AssertNoMoreExceptions()
self.assertFalse(util.ValidateLanguageCode("Swiss German", "col",
problems))
e = accumulator.PopInvalidValue("col")
accumulator.AssertNoMoreExceptions()
def testIsValidTimezone(self):
self.assertTrue(util.IsValidTimezone("America/Los_Angeles"))
self.assertFalse(util.IsValidTimezone("Switzerland/Wil"))
self.assertFalse(util.IsValidTimezone(""))
def testValidateTimezone(self):
accumulator = test_util.RecordingProblemAccumulator(self)
problems = ProblemReporter(accumulator)
self.assertTrue(util.ValidateTimezone("", "col", problems))
accumulator.AssertNoMoreExceptions()
self.assertTrue(util.ValidateTimezone("America/Los_Angeles", "col",
problems))
accumulator.AssertNoMoreExceptions()
self.assertFalse(util.ValidateTimezone("Switzerland/Wil", "col",
problems))
e = accumulator.PopInvalidValue("col")
accumulator.AssertNoMoreExceptions()
def testIsValidDate(self):
self.assertTrue(util.IsValidDate("20100801"))
self.assertFalse(util.IsValidDate("20100732"))
self.assertFalse(util.IsValidDate(""))
def testValidateDate(self):
accumulator = test_util.RecordingProblemAccumulator(self)
problems = ProblemReporter(accumulator)
self.assertTrue(util.ValidateDate("", "col", problems))
accumulator.AssertNoMoreExceptions()
self.assertTrue(util.ValidateDate("20100801", "col", problems))
accumulator.AssertNoMoreExceptions()
self.assertFalse(util.ValidateDate("20100732", "col", problems))
e = accumulator.PopInvalidValue("col")
accumulator.AssertNoMoreExceptions()
class FloatStringToFloatTestCase(test_util.TestCase):
def runTest(self):
accumulator = test_util.RecordingProblemAccumulator(self)
problems = ProblemReporter(accumulator)
self.assertAlmostEqual(0, util.FloatStringToFloat("0", problems))
self.assertAlmostEqual(0, util.FloatStringToFloat(u"0", problems))
self.assertAlmostEqual(1, util.FloatStringToFloat("1", problems))
self.assertAlmostEqual(1, util.FloatStringToFloat("1.00000", problems))
self.assertAlmostEqual(1.5, util.FloatStringToFloat("1.500", problems))
self.assertAlmostEqual(-2, util.FloatStringToFloat("-2.0", problems))
self.assertAlmostEqual(-2.5, util.FloatStringToFloat("-2.5", problems))
self.assertRaises(ValueError, util.FloatStringToFloat, ".", problems)
self.assertRaises(ValueError, util.FloatStringToFloat, "0x20", problems)
self.assertRaises(ValueError, util.FloatStringToFloat, "-0x20", problems)
self.assertRaises(ValueError, util.FloatStringToFloat, "0b10", problems)
# These should issue a warning, but otherwise parse successfully
self.assertAlmostEqual(0.001, util.FloatStringToFloat("1E-3", problems))
e = accumulator.PopException("InvalidFloatValue")
self.assertAlmostEqual(0.001, util.FloatStringToFloat(".001", problems))
e = accumulator.PopException("InvalidFloatValue")
self.assertAlmostEqual(-0.001, util.FloatStringToFloat("-.001", problems))
e = accumulator.PopException("InvalidFloatValue")
self.assertAlmostEqual(0, util.FloatStringToFloat("0.", problems))
e = accumulator.PopException("InvalidFloatValue")
accumulator.AssertNoMoreExceptions()
class NonNegIntStringToIntTestCase(test_util.TestCase):
def runTest(self):
accumulator = test_util.RecordingProblemAccumulator(self)
problems = ProblemReporter(accumulator)
self.assertEqual(0, util.NonNegIntStringToInt("0", problems))
self.assertEqual(0, util.NonNegIntStringToInt(u"0", problems))
self.assertEqual(1, util.NonNegIntStringToInt("1", problems))
self.assertEqual(2, util.NonNegIntStringToInt("2", problems))
self.assertEqual(10, util.NonNegIntStringToInt("10", problems))
self.assertEqual(1234567890123456789,
util.NonNegIntStringToInt("1234567890123456789",
problems))
self.assertRaises(ValueError, util.NonNegIntStringToInt, "", problems)
self.assertRaises(ValueError, util.NonNegIntStringToInt, "-1", problems)
self.assertRaises(ValueError, util.NonNegIntStringToInt, "0x1", problems)
self.assertRaises(ValueError, util.NonNegIntStringToInt, "1.0", problems)
self.assertRaises(ValueError, util.NonNegIntStringToInt, "1e1", problems)
self.assertRaises(ValueError, util.NonNegIntStringToInt, "0x20", problems)
self.assertRaises(ValueError, util.NonNegIntStringToInt, "0b10", problems)
self.assertRaises(TypeError, util.NonNegIntStringToInt, 1, problems)
self.assertRaises(TypeError, util.NonNegIntStringToInt, None, problems)
# These should issue a warning, but otherwise parse successfully
self.assertEqual(1, util.NonNegIntStringToInt("+1", problems))
e = accumulator.PopException("InvalidNonNegativeIntegerValue")
self.assertEqual(1, util.NonNegIntStringToInt("01", problems))
e = accumulator.PopException("InvalidNonNegativeIntegerValue")
self.assertEqual(0, util.NonNegIntStringToInt("00", problems))
e = accumulator.PopException("InvalidNonNegativeIntegerValue")
accumulator.AssertNoMoreExceptions()
class CheckVersionTestCase(test_util.TempDirTestCaseBase):
def setUp(self):
self.orig_urlopen = urlopen
self.mock = MockURLOpen()
self.accumulator = test_util.RecordingProblemAccumulator(self)
self.problems = ProblemReporter(self.accumulator)
def tearDown(self):
self.mock = None
urlopen = self.orig_urlopen
def testAssignedDifferentVersion(self):
util.CheckVersion(self.problems, '100.100.100')
e = self.accumulator.PopException('NewVersionAvailable')
self.assertEqual(e.version, '100.100.100')
self.assertEqual(e.url, 'https://github.com/google/transitfeed')
self.accumulator.AssertNoMoreExceptions()
def testAssignedSameVersion(self):
util.CheckVersion(self.problems, version.__version__)
self.accumulator.AssertNoMoreExceptions()
@mock.patch('transitfeed.util.urlopen')
def testGetCorrectReturns(self, mock_urlopen):
mock_urlopen.return_value = StringIO('latest_version=100.0.1')
util.CheckVersion(self.problems)
self.accumulator.PopException('NewVersionAvailable')
@mock.patch('transitfeed.util.urlopen')
def testPageNotFound(self, mock_urlopen):
mock_urlopen.side_effect = self.mock.mockedPageNotFound
util.CheckVersion(self.problems)
e = self.accumulator.PopException('OtherProblem')
self.assertTrue(re.search(r'we failed to reach', e.description))
self.assertTrue(re.search(r'Reason: Not Found \[404\]', e.description))
@mock.patch('transitfeed.util.urlopen')
def testConnectionTimeOut(self, mock_urlopen):
mock_urlopen.side_effect = self.mock.mockedConnectionTimeOut
util.CheckVersion(self.problems)
e = self.accumulator.PopException('OtherProblem')
self.assertTrue(re.search(r'we failed to reach', e.description))
self.assertTrue(re.search(r'Reason: Connection timed', e.description))
@mock.patch('transitfeed.util.urlopen')
def testGetAddrInfoFailed(self, mock_urlopen):
mock_urlopen.side_effect = self.mock.mockedGetAddrInfoFailed
util.CheckVersion(self.problems)
e = self.accumulator.PopException('OtherProblem')
self.assertTrue(re.search(r'we failed to reach', e.description))
self.assertTrue(re.search(r'Reason: Getaddrinfo failed', e.description))
@mock.patch('transitfeed.util.urlopen', autospec=True)
def testEmptyIsReturned(self, mock_urlopen):
mock_urlopen.side_effect = self.mock.mockedEmptyIsReturned
util.CheckVersion(self.problems)
e = self.accumulator.PopException('OtherProblem')
self.assertTrue(re.search(r'we had trouble parsing', e.description))
class MockURLOpen:
"""Pretend to be a urlopen suitable for testing."""
def mockedConnectSuccess(self, request):
return StringIO('latest_version=100.0.1')
def mockedPageNotFound(self, request):
raise HTTPError(request.get_full_url(), 404, 'Not Found',
request.header_items(), None)
def mockedConnectionTimeOut(self, request):
raise URLError('Connection timed out')
def mockedGetAddrInfoFailed(self, request):
raise URLError('Getaddrinfo failed')
def mockedEmptyIsReturned(self, request):
return StringIO()
if __name__ == '__main__':
unittest.main()
|
python
|
from Session import Session
from Letter import Letter
from Word import Word
from Grid import Grid
from ApiException import ApiErrorCode, ApiException
|
python
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Venue.website'
db.alter_column(u'venues_venue', 'website', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Changing field 'Venue.style'
db.alter_column(u'venues_venue', 'style', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Venue.capacity'
db.alter_column(u'venues_venue', 'capacity', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Venue.twitter'
db.alter_column(u'venues_venue', 'twitter', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Venue.address'
db.alter_column(u'venues_venue', 'address', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Venue.contact'
db.alter_column(u'venues_venue', 'contact', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Changing field 'Venue.phone'
db.alter_column(u'venues_venue', 'phone', self.gf('django.db.models.fields.CharField')(max_length=30, null=True))
# Changing field 'Venue.cost'
db.alter_column(u'venues_venue', 'cost', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Venue.contact_email'
db.alter_column(u'venues_venue', 'contact_email', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Changing field 'Venue.postcode'
db.alter_column(u'venues_venue', 'postcode', self.gf('django.db.models.fields.CharField')(max_length=10, null=True))
# Changing field 'Venue.contact_twitter'
db.alter_column(u'venues_venue', 'contact_twitter', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
def backwards(self, orm):
# Changing field 'Venue.website'
db.alter_column(u'venues_venue', 'website', self.gf('django.db.models.fields.CharField')(default='', max_length=50))
# Changing field 'Venue.style'
db.alter_column(u'venues_venue', 'style', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Venue.capacity'
db.alter_column(u'venues_venue', 'capacity', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Venue.twitter'
db.alter_column(u'venues_venue', 'twitter', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Venue.address'
db.alter_column(u'venues_venue', 'address', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Venue.contact'
db.alter_column(u'venues_venue', 'contact', self.gf('django.db.models.fields.CharField')(default='', max_length=50))
# Changing field 'Venue.phone'
db.alter_column(u'venues_venue', 'phone', self.gf('django.db.models.fields.CharField')(default='', max_length=30))
# Changing field 'Venue.cost'
db.alter_column(u'venues_venue', 'cost', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Venue.contact_email'
db.alter_column(u'venues_venue', 'contact_email', self.gf('django.db.models.fields.CharField')(default='', max_length=50))
# Changing field 'Venue.postcode'
db.alter_column(u'venues_venue', 'postcode', self.gf('django.db.models.fields.CharField')(default='', max_length=10))
# Changing field 'Venue.contact_twitter'
db.alter_column(u'venues_venue', 'contact_twitter', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
models = {
u'venues.city': {
'Meta': {'object_name': 'City'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.country': {
'Meta': {'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Venue']"})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.City']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact_twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Country']", 'null': 'True', 'blank': 'True'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['venues.Facility']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['venues']
|
python
|
import asyncio
import os
import fastapi
# noinspection PyPackageRequirements
import pytest
from jinja2.exceptions import TemplateNotFound
from starlette.requests import Request
import fastapi_jinja as fj
here = os.path.dirname(__file__)
folder = os.path.join(here, "templates")
fake_request = Request(scope={'type': 'http'})
def test_cannot_decorate_missing_template():
with pytest.raises(TemplateNotFound):
@fj.template("home/missing.j2")
def view_method(request: Request):
return {}
view_method(fake_request)
def test_can_decorate_dict_sync_method():
@fj.template("home/index.j2")
def view_method(request: Request, a, b, c):
return {"a": a, "b": b, "c": c}
resp = view_method(fake_request, 1, 2, 3)
assert isinstance(resp, fastapi.Response)
assert resp.status_code == 200
def test_can_decorate_dict_async_method():
@fj.template("home/index.j2")
async def view_method(request: Request, a, b, c):
return {"a": a, "b": b, "c": c}
resp = asyncio.run(view_method(fake_request, 1, 2, 3))
assert isinstance(resp, fastapi.Response)
assert resp.status_code == 200
def test_direct_response_pass_through():
@fj.template("home/index.j2")
def view_method(request: Request, a, b, c):
return fastapi.Response(content="abc", status_code=418)
resp = view_method(fake_request, 1, 2, 3)
assert isinstance(resp, fastapi.Response)
assert resp.status_code == 418
assert resp.body == b"abc"
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 17:02:33 2020
@author: lenovo
"""
# AdaBoost
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_iris
from sklearn.ensemble import AdaBoostClassifier
X, y = load_iris(return_X_y=True)
base_clf = LogisticRegression(C=1.)
clf = AdaBoostClassifier(base_estimator=base_clf, n_estimators=100)
scores = cross_val_score(clf, X, y, cv=5)
scores.mean()
# # Ridge classification
# from sklearn.datasets import load_breast_cancer
# from sklearn.linear_model import RidgeClassifier
# X, y = load_breast_cancer(return_X_y=True)
# clf = RidgeClassifier().fit(X, y)
# clf.score(X, y)
# LogisticRegression
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
X, y = make_classification(n_classes=3, n_informative=5, n_redundant=0, random_state=42)
clf = LogisticRegression(random_state=0).fit(X, y)
clf.predict(X[:2, :])
clf.predict_proba(X[:2, :])
clf.score(X, y)
|
python
|
# native packs
import requests
import json
# installed packs
import pandas as pd
from pandas import json_normalize
auth_endpoint = "https://auth.emsicloud.com/connect/token" # auth endpoint
# replace 'your_client_id' with your client id from your api invite email
client_id = "5f379hywuvh7fvan"
# replace 'your_client_secret' with your client secret from your api invite email
client_secret = "hfCkXQEy"
scope = "emsi_open" # ok to leave as is, this is the scope we will used
# set credentials and scope
payload = f"client_id={client_id}&client_secret={client_secret}&grant_type=client_credentials&scope={scope}"
# headers for the response
headers = {'content-type': 'application/x-www-form-urlencoded'}
access_token = json.loads((requests.request("POST", auth_endpoint, data=payload, headers=headers)).text)[
'access_token'] # grabs request's text and loads as JSON, then pulls the access token from that
def fetch_skills_list() -> pd.DataFrame:
# List of all skills endpoint
all_skills_endpoint = "https://emsiservices.com/skills/versions/latest/skills"
# Auth string including access token from above
auth = f"Authorization: Bearer {access_token}"
headers = {'authorization': auth} # headers
response = requests.request(
"GET", all_skills_endpoint, headers=headers) # response
response = response.json()['data'] # the data
# all_skills_df = pd.DataFrame(json_normalize(response)); # Where response is a JSON object drilled down to the level of 'data' key
return response
|
python
|
from collections import Counter
def duplicate_sandwich(arr):
check=Counter(arr)
temp1=arr.index(check.most_common()[0][0])
temp2=arr[temp1+1:].index(check.most_common()[0][0])
return arr[temp1+1:temp2+temp1+1]
|
python
|
import math
from django.urls import reverse
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
from .utils import escape_reserved_characters
MAIN_INDEX_NAME = 'main_index'
PAGE_SIZE = 10
class Elastic:
def __init__(self):
self.es = Elasticsearch(['http://elasticsearch:9200'])
self._create_main_index_if_not_exists()
def _create_main_index_if_not_exists(self):
"""
method that creates new elastic index if not existed
:return:
"""
ic = IndicesClient(self.es)
if not ic.exists(MAIN_INDEX_NAME):
ic.create(MAIN_INDEX_NAME)
def index_document(self, body, elastic_id):
"""
method that puts prepared(like dict) data to elastic inded
:param body: {'key': value}
:param elastic_id:
:return:
"""
self.es.index(MAIN_INDEX_NAME, body, id=elastic_id)
def get_all_documents(self, page=1):
"""
for debugging purposes only
:param page:
:return:
"""
body = self._get_pagineted_query(self._build_match_all_query, page)
return self._prepare_response(self.es.search(index=MAIN_INDEX_NAME, body=body), page)
def search_documents(self, query, page=1):
"""
method for pagineted search in elastic index
:param query:
:param page:
:return:
"""
body = self._get_pagineted_query(self._build_query, page, query)
return self._prepare_response(self.es.search(index=MAIN_INDEX_NAME, body=body), page)
@staticmethod
def _build_query(query, size, start=0):
"""
method for creating search body
:param query:
:param size: size of return dataset
:param start: offset
:return: template for search query
"""
return {
"query": {
"query_string": {
"query": f"{escape_reserved_characters(query)}"
}
},
"size": f'{size}',
"from": f'{start}',
}
@staticmethod
def _build_match_all_query(query, size, start=0):
"""
method for creating search body
for debugging purposes only
:param query:
:param size: size of return dataset
:param start: offset
:return: template for search query
"""
return {
"query": {
"match_all": {}
},
"size": f'{size}',
"from": f'{start}',
}
@staticmethod
def _get_pagineted_query(template, page, query=None):
"""
build paginated body for search
:param template:
:param page:
:param query:
:return:
"""
page = (page - 1) * PAGE_SIZE
if page < 0:
return template(query, PAGE_SIZE, 0)
return template(query, PAGE_SIZE, page)
def _prepare_response(self, elasic_reponse, page):
"""
works with response from elastic and get in paginated
:param elasic_reponse:
:param page:
:return: paginated data
Note:
pagination only works with response that contains less then 10000 documents
"""
pages = math.ceil(elasic_reponse['hits']['total']['value'] / PAGE_SIZE) - 1
if pages < 0:
pages = 0
pagineted_data = {
'count': elasic_reponse['hits']['total']['value'],
'pages': pages,
'current_page': page,
'previous': reverse('search_api', kwargs={'page': page - 1 if page > 0 else 0}),
'next': reverse('search_api', kwargs={'page': page + 1 if page < pages else page}),
'results': elasic_reponse['hits']['hits']
}
return pagineted_data
|
python
|
"""Module for storing coinchoose data in the database."""
import coinchoose
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import os
import psycopg2 as pg2
import psycopg2.extras as pg2ext
import random
import unittest
# Configuration variables
batchLimit = 1000
tables = {
"currency": "currency",
"currency_historical": "currency_historical",
"network_status": "network_status",
"network_status_latest": "network_status_latest"
}
# Pull in postgres configuration information
# Pull in postgres configuration information
dbcFile = open(
"{0}/.pgpass".format(os.path.dirname(os.path.abspath(__file__))),
'r')
dbcRaw = dbcFile.readline().strip().split(':')
dbcParams = {
'database': dbcRaw[2],
'user': dbcRaw[3],
'password': dbcRaw[4],
'host': dbcRaw[0],
'port': dbcRaw[1]
}
dbcFile.close()
# Connection variable
conn = None
def connect():
"""Connect to the database."""
global conn
if conn is not None:
return conn
else:
conn = pg2.connect(**dbcParams)
return conn
def cursor():
""""Pull a cursor from the connection."""
return connect().cursor()
def dictCursor():
""""Pull a dictionary cursor from the connection."""
return connect().cursor(cursor_factory=pg2ext.RealDictCursor)
def _createStaging(tableName, cursor):
"""Create staging table."""
stagingTable = "{0}_{1}".format(
tableName, str(int(pow(10, random.random()*10))).zfill(10))
cursor.execute("""CREATE TABLE {0} (LIKE {1}
INCLUDING DEFAULTS)""".format(stagingTable, tableName))
return stagingTable
def _dropStaging(tableName, cursor):
"""Drop staging table."""
cursor.execute("""
DROP TABLE {0}""".format(tableName))
def insertLatestCurrencies(data, withHistory=True):
"""Insert latest currency data."""
cursor = dictCursor()
targetTable = tables['currency']
# Create staging table
stagingTable = _createStaging(targetTable, cursor)
# Move data into staging table
cursor.executemany("""
INSERT INTO {0} (
symbol, name, algo)
VALUES (
%(symbol)s,
%(name)s,
%(algo)s
)""".format(stagingTable), data)
# Update any altered currencies
cursor.execute("""
UPDATE {0} tgt
SET name = stg.name, algo = stg.algo,
db_update_time = stg.db_update_time
FROM {1} stg
WHERE tgt.symbol = stg.symbol
AND (tgt.name <> stg.name OR
tgt.algo <> stg.algo)""".format(
targetTable, stagingTable))
# Merge any new currencies into target table
cursor.execute("""
INSERT INTO {0} (
symbol, name, algo, db_update_time)
(SELECT stg.*
FROM {1} stg
LEFT JOIN {0} tgt ON tgt.symbol = stg.symbol
WHERE tgt.symbol IS NULL)""".format(
targetTable, stagingTable))
# If requested, merge data into the historical table
if withHistory:
historicalTable = tables['currency_historical']
cursor.execute("""
INSERT INTO {0} (
symbol, name, algo, db_update_time)
(SELECT stg.*
FROM {1} stg
LEFT JOIN {0} tgt ON
tgt.symbol = stg.symbol AND
tgt.name = stg.name AND
tgt.algo = stg.algo
WHERE tgt.symbol IS NULL)""".format(
historicalTable, stagingTable))
# Drop staging table
_dropStaging(stagingTable, cursor)
# Commit
cursor.execute("""COMMIT""")
def insertLatestNetworkStatus(data):
"""Insert latest network status data."""
cursor = dictCursor()
targetTable = tables['network_status']
latestTable = tables['network_status_latest']
# Create staging table
stagingTable = _createStaging(targetTable, cursor)
# Move data into staging table
cursor.executemany("""
INSERT INTO {0}
(scrape_time, symbol, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate)
VALUES (
%(scrape_time)s,
%(symbol)s,
%(current_blocks)s,
%(difficulty)s,
%(reward)s,
%(hash_rate)s,
%(avg_hash_rate)s
)""".format(stagingTable), data)
# Update target table where we have new data
cursor.execute("""
INSERT INTO {0}
(scrape_time, symbol, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate, db_update_time)
(SELECT stg.*
FROM {1} stg
LEFT JOIN {2} lt
ON lt.symbol = stg.symbol
AND lt.current_blocks = stg.current_blocks
AND lt.difficulty = stg.difficulty
AND lt.reward = stg.reward
AND lt.hash_rate = stg.hash_rate
AND lt.avg_hash_rate = stg.avg_hash_rate
WHERE lt.scrape_time IS NULL)""".format(
targetTable, stagingTable, latestTable))
# Replace data in latest table with new data in staging
cursor.execute("""DELETE FROM {0}""".format(latestTable))
cursor.execute("""INSERT INTO {0}
SELECT *
FROM {1}""".format(latestTable, stagingTable))
# Drop staging table
_dropStaging(stagingTable, cursor)
# Commit
cursor.execute("""COMMIT""")
class PgTest(unittest.TestCase):
"""Testing suite for pg module."""
def setUp(self):
"""Setup tables for test."""
# Swap and sub configuration variables
global tables
self.tablesOriginal = tables
tables = {}
for key, table in self.tablesOriginal.iteritems():
tables[key] = "{0}_test".format(table)
global batchLimit
self.batchLimitOriginal = batchLimit
batchLimit = 20
# Create test tables
cur = cursor()
for key, table in tables.iteritems():
cur.execute("""CREATE TABLE IF NOT EXISTS
{0} (LIKE {1} INCLUDING ALL)""".format(
table, self.tablesOriginal[key]))
cur.execute("""COMMIT""")
def tearDown(self):
"""Teardown test tables."""
# Drop test tables
global tables
cur = cursor()
for table in tables.values():
cur.execute("""DROP TABLE IF EXISTS
{0}""".format(table))
# Undo swap / sub
tables = self.tablesOriginal
global batchLimit
batchLimit = self.batchLimitOriginal
def testInsertLatestCurrencies(self):
"""Test insertLatestCurrencies function."""
fileString = "{0}/example/api.json"
f = open(fileString.format(
os.path.dirname(os.path.abspath(__file__))), 'r')
jsonDump = f.read()
f.close()
data = coinchoose.parseLatestCurrencies(jsonDump)
insertLatestCurrencies(data)
# Test out some basic count statistics
cur = dictCursor()
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['currency']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 59)
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['currency_historical']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 59)
# Test out contents of first and last row
expectedFirst = {
'symbol': 'ALF',
'name': 'Alphacoin',
'algo': 'scrypt'
}
cur.execute("""SELECT symbol, name, algo
FROM {0}
WHERE symbol = '{1}'""".format(
tables['currency'], 'ALF'))
datumFirst = cur.fetchone()
self.assertEqual(datumFirst, expectedFirst)
expectedLast = {
'symbol': 'GLC',
'name': 'GlobalCoin',
'algo': 'scrypt'
}
cur.execute("""SELECT symbol, name, algo
FROM {0}
WHERE symbol = '{1}'""".format(
tables['currency'], 'GLC'))
datumLast = cur.fetchone()
self.assertEqual(datumLast, expectedLast)
# Update the data in a way that modifies what's in the DB
updatedData = [
{
'symbol': 'ALF',
'name': 'XXAlphacoinXX',
'algo': 'scrypt'
},
{
'symbol': 'GLC',
'name': 'GlobalCoin',
'algo': 'SHA-256'
}
]
insertLatestCurrencies(updatedData)
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['currency']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 59)
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['currency_historical']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 61)
cur.execute("""SELECT symbol, name, algo
FROM {0}
WHERE symbol = '{1}'""".format(
tables['currency'], 'ALF'))
newDatumFirst = cur.fetchone()
self.assertEqual(newDatumFirst, updatedData[0])
cur.execute("""SELECT symbol, name, algo
FROM {0}
WHERE symbol = '{1}'""".format(
tables['currency'], 'GLC'))
newDatumFirst = cur.fetchone()
self.assertEqual(newDatumFirst, updatedData[1])
def testInsertLatestNetworkStatus(self):
"""Test insertLatestNetworkStatus function."""
fileString = "{0}/example/api.json"
f = open(fileString.format(
os.path.dirname(os.path.abspath(__file__))), 'r')
jsonDump = f.read()
f.close()
now = datetime.utcnow()
data = coinchoose.parseLatestNetworkStatus(jsonDump, scrapeTime=now)
insertLatestNetworkStatus(data)
# Test out some basic count statistics
cur = dictCursor()
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['network_status']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 59)
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['network_status_latest']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 59)
# Test out contents of first and last row
expectedFirst = {
'symbol': 'ALF',
'scrape_time': now,
'current_blocks': long(655258),
'difficulty': Decimal("1.52109832"),
'reward': Decimal(50),
'hash_rate': long(10308452),
'avg_hash_rate': Decimal("10308452.0000")
}
cur.execute("""SELECT
symbol, scrape_time, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate
FROM {0}
WHERE symbol = '{1}'""".format(
tables['network_status'], 'ALF'))
datumFirst = cur.fetchone()
self.assertEqual(datumFirst, expectedFirst)
expectedLast = {
'symbol': 'GLC',
'scrape_time': now,
'current_blocks': long(300011),
'difficulty': Decimal("0.768"),
'reward': Decimal(100),
'hash_rate': long(0),
'avg_hash_rate': Decimal("0")
}
cur.execute("""SELECT
symbol, scrape_time, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate
FROM {0}
WHERE symbol = '{1}'""".format(
tables['network_status'], 'GLC'))
datumLast = cur.fetchone()
self.assertEqual(datumLast, expectedLast)
# Update the data in a way that modifies some of what's in the DB
updatedData = [
{
'symbol': 'ALF',
'scrape_time': now + timedelta(days=1),
'current_blocks': long(655258),
'difficulty': Decimal("1.52109832"),
'reward': Decimal(50),
'hash_rate': long(10308452),
'avg_hash_rate': Decimal("10308452.0000")
},
{
'symbol': 'GLC',
'scrape_time': now + timedelta(days=1),
'current_blocks': long(300155),
'difficulty': Decimal("1.234"),
'reward': Decimal(100),
'hash_rate': long(20),
'avg_hash_rate': Decimal("20.34")
}
]
insertLatestNetworkStatus(updatedData)
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['network_status']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 60)
cur.execute("""SELECT COUNT(*) cnt FROM {0}""".format(
tables['network_status_latest']))
row = cur.fetchone()
self.assertEqual(row['cnt'], 2)
cur.execute("""SELECT COUNT(*) cnt
FROM {0}
WHERE symbol = '{1}'""".format(
tables['network_status'], 'ALF'))
row = cur.fetchone()
self.assertEqual(row['cnt'], 1)
cur.execute("""SELECT COUNT(*) cnt
FROM {0}
WHERE symbol ='{1}'""".format(
tables['network_status'], 'GLC'))
row = cur.fetchone()
self.assertEqual(row['cnt'], 2)
cur.execute("""SELECT
symbol, scrape_time, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate
FROM {0}
WHERE symbol = '{1}'""".format(
tables['network_status'], 'ALF'))
newDatumFirst = cur.fetchone()
self.assertEqual(newDatumFirst, expectedFirst)
cur.execute("""SELECT
symbol, scrape_time, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate
FROM {0}
WHERE symbol = '{1}'
ORDER BY scrape_time
DESC LIMIT 1""".format(
tables['network_status'], 'GLC'))
newDatumLast = cur.fetchone()
self.assertEqual(newDatumLast, updatedData[-1])
cur.execute("""SELECT
symbol, scrape_time, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate
FROM {0}
WHERE symbol = '{1}'""".format(
tables['network_status_latest'], 'ALF'))
newDatumFirst = cur.fetchone()
self.assertEqual(newDatumFirst, updatedData[0])
cur.execute("""SELECT
symbol, scrape_time, current_blocks, difficulty,
reward, hash_rate, avg_hash_rate
FROM {0}
WHERE symbol = '{1}'""".format(
tables['network_status_latest'], 'GLC'))
newDatumLast = cur.fetchone()
self.assertEqual(newDatumLast, updatedData[-1])
if __name__ == "__main__":
unittest.main()
|
python
|
# ROS imports
import roslib; roslib.load_manifest('freemovr_engine')
import scipy.optimize
import imageio
from pymvg.camera_model import CameraModel
from pymvg.util import get_rotation_matrix_and_quaternion
import freemovr_engine.simple_geom as simple_geom
import numpy as np
import os
import cv2
PLOT=int(os.environ.get('PLOT',0))
if PLOT:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from .plot_utils import get_3d_verts, plot_camera
import roslib; roslib.load_manifest('freemovr_engine')
from tf.transformations import quaternion_from_matrix, \
quaternion_matrix, rotation_from_matrix, rotation_matrix, \
quaternion_about_axis
from freemovr_engine.cvnumpy import rodrigues2matrix, matrix2rodrigues
def matrix2quaternion( R ):
rnew = np.eye(4)
rnew[:3,:3] = R
return quaternion_from_matrix(R)
def quaternion2matrix( q ):
R = quaternion_matrix(q)
return R[:3,:3]
class ObjectiveFunctionFancy:
"""Find pose using world-space object point relations as the error term.
For a similar idea, see 'Pose Estimation using Four Corresponding
Points' by Liu and Wong, 1998. This method uses arbitrary numbers
of points and (so far) does not use the Gauss-Newton method nor
require calculation of a Jacobian, although those things would be
straitforward from here.
"""
def __init__(self,base_cam,X3d,x2d):
self.base_cam = base_cam
self.X3d = X3d
self.x2d = x2d
intrinsics = self.base_cam.to_dict()
del intrinsics['Q']
del intrinsics['translation']
del intrinsics['name']
self.intrinsic_dict = intrinsics
self._obj_dist = []
self.npts = len(self.X3d)
self.d_actual = self.compute_distance_vector( self.X3d )
self.alpha = 1.0
def compute_distance_vector(self, pts ):
result = []
for i in range(self.npts):
for j in range(self.npts):
if i<j:
d = pts[i]-pts[j]
result.append(np.sqrt(np.sum(d**2)))
return np.array(result)
def compute_shape_scalar(self, pts):
# Compute some value that changes based on the chirality of
# the object. Here we use eqn 4 from Liu and Wong.
v21 = pts[2]-pts[1]
v23 = pts[2]-pts[3]
v20 = pts[2]-pts[0]
return -np.dot(np.cross(v21,v23),v20)
def get_start_guess(self):
# pts = camera.project_pixel_to_3d_ray(self.x2d)
# vecs = pts - camera.get_camcenter()
# distances = np.sqrt(np.sum(vecs**2,axis=1))
return np.ones( (len(self.x2d),) )
def make_cam_from_params(self, params):
# find location of camcenter by finding point of best fit with
# N spheres of radius params each centered at a point at
# self.X3d
raise NotImplementedError()
def err(self, params):
#x,y,z, rx, ry, rz = params
camera = self.make_cam_from_params( params)
pts_test = camera.project_pixel_to_3d_ray(self.x2d, distance=params)
d_test = self.compute_distance_vector(pts_test)
shape_test = self.compute_shape_scalar(pts_test)
err_d = np.sum((d_test - self.d_actual)**2)
err_shape = abs(shape_test - self.shape_actual)
return (err_d + self.alpha*err_shape)
class ObjectiveFunction:
"""Find pose using reprojection error and shape term"""
def __init__(self,base_cam,X3d,x2d,geom=None):
self.base_cam = base_cam
self.X3d = X3d
self.x2d = x2d
intrinsics = self.base_cam.to_dict()
del intrinsics['Q']
del intrinsics['translation']
del intrinsics['name']
self.intrinsic_dict = intrinsics
self._obj_dist = []
self.npts = len(self.X3d)
if geom is not None:
self.debug = True
else:
self.debug = False
if PLOT and self.debug:
plt.ion()
self.fig = plt.figure()
self.ax3d = self.fig.add_subplot(211, projection='3d')
self.ax2d = self.fig.add_subplot(212)
self.ax3d.set_xlabel('x')
self.ax3d.set_ylabel('y')
self.ax3d.set_zlabel('z')
self.geom = geom
self.plot_verts = get_3d_verts(self.geom)
def get_start_guess(self):
if 1:
rod = matrix2rodrigues(self.base_cam.get_rotation())
t = self.base_cam.get_translation()
t.shape= (3,)
rod.shape=(3,)
return np.array(list(t)+list(rod),dtype=np.float)
R = np.eye(4)
R[:3,:3] = self.base_cam.get_rotation()
angle, direction, point = rotation_from_matrix(R)
q = quaternion_about_axis(angle,direction)
#q = matrix2quaternion(R)
if 1:
R2 = rotation_matrix(angle, direction, point)
#R2 = quaternion2matrix( q )
try:
assert np.allclose(R, R2)
except:
print()
print('R')
print(R)
print('R2')
print(R2)
raise
C = self.base_cam.get_camcenter()
result = list(C) + list(q)
return result
def make_cam_from_params(self, params):
if 1:
t = params[:3]
rod = params[3:]
rmat = rodrigues2matrix( rod )
d = self.intrinsic_dict.copy()
d['translation'] = t
d['Q'] = rmat
cam_model = CameraModel.from_dict(d)
return cam_model
C = params[:3]
quat = params[3:]
qmag = np.sqrt(np.sum(quat**2))
quat = quat/qmag
R,rquat=get_rotation_matrix_and_quaternion(quat)
t = -np.dot(R, C)
d = self.intrinsic_dict.copy()
d['translation'] = t
d['Q'] = R
cam_model = CameraModel.from_dict(d)
return cam_model
def err(self, params):
camera = self.make_cam_from_params( params)
found = camera.project_3d_to_pixel(self.X3d)
each_err = np.sqrt(np.sum((found - self.x2d)**2,axis=1))
me = np.mean(each_err)
if 0:
print()
print('params', params)
print('found')
print(np.hstack( (found, self.x2d, each_err[:,np.newaxis]) ))
print('mean reproj error: ',me)
print()
if PLOT and self.debug:
assert len(each_err)==len(self.x2d)
self.ax3d.cla()
verts = self.plot_verts
self.ax3d.plot( verts[:,0], verts[:,1], verts[:,2], 'ko' )
plot_camera( self.ax3d, camera )
self.ax2d.cla()
self.ax2d.plot( self.x2d[:,0], self.x2d[:,1], 'go', mfc='none')
self.ax2d.plot( found[:,0], found[:,1], 'rx', mfc='none')
for i in range( len(found)):
self.ax2d.plot( [found[i,0],self.x2d[i,0]],
[found[i,1],self.x2d[i,1]], 'k-' )
plt.draw()
if 0:
df = found[1:]-found[:-1]
#print 'found'
#print found
#print 'df'
#print df
bunching_penalty = 1.0/np.sum(df**2)
#print 'mean reproj error: % 20.1f bunching penalty: % 20.1f '%(me,bunching_penalty)
#return me + bunching_penalty
return me
def fit_extrinsics_iterative(base_cam,X3d,x2d, geom=None):
"""find a camera with a better extrinsics than the input camera"""
prestages = True
if prestages:
# pre-stage 1 - point the camera in the right direction
world = np.array([np.mean( X3d, axis=0 )])
image = np.array([np.mean( x2d, axis=0 )])
obj = ObjectiveFunction(base_cam, world, image, geom=geom)
result = scipy.optimize.fmin( obj.err, obj.get_start_guess(),ftol=5.0)
base_cam = obj.make_cam_from_params(result)
if prestages:
# pre-stage 2 - get scale approximately OK
world = X3d[:2,:]
image = x2d[:2,:]
obj = ObjectiveFunction(base_cam, world, image, geom=geom)
result = scipy.optimize.fmin( obj.err, obj.get_start_guess())
base_cam = obj.make_cam_from_params(result)
if prestages:
# pre-stage 3 - start rotations
world = X3d[:3,:]
image = x2d[:3,:]
obj = ObjectiveFunction(base_cam, world, image, geom=geom)
result = scipy.optimize.fmin( obj.err, obj.get_start_guess())
base_cam = obj.make_cam_from_params(result)
# now, refine our guess, held in base_cam
last_fval = np.inf
for i in range(10):
cam = obj.make_cam_from_params(result)
obj = ObjectiveFunction(cam, X3d, x2d, geom=geom)
results = scipy.optimize.fmin( obj.err, obj.get_start_guess(),
full_output=True )
result, fval = results[:2]
print('fval, last_fval',fval, last_fval)
if fval > last_fval:
# we're not getting better
break
eps = 1e-2 # this is pixel reprojection error here. don't need better than this.
if abs(fval-last_fval) < eps:
break
last_fval=fval
print('did %d iterations'%(i+1,))
if 0:
obj = ObjectiveFunction(base_cam, X3d, x2d)#, geom=geom)
results = scipy.optimize.anneal( obj.err, obj.get_start_guess(),
learn_rate=0.5,
full_output=True, maxeval=50000, T0=1000.0,
maxiter=10000,
#disp=True,
)
#print 'results',results
result = results[0]
if 1:
result, Jmin, T, feval, iters, accept, retval = results
print('Jmin',Jmin)
print('T',T)
print('fevel',feval)
print('iters',iters)
print('accept',accept)
print('retval',retval)
cam = obj.make_cam_from_params(result)
if 1:
found = cam.project_3d_to_pixel(X3d)
orig = x2d
reproj_error = np.sqrt(np.sum((found-orig)**2, axis=1))
cum = np.mean(reproj_error)
mean_cam_z = np.mean(cam.project_3d_to_camera_frame(X3d)[:,2])
cam.name = base_cam.name
result = dict(
mean_err=cum,
mean_cam_z = mean_cam_z,
cam = cam)
return result
def save_point_image(fname, sz, x2d ):
im = np.zeros( (sz[1], sz[0]), dtype=np.uint8 )
for xy in x2d:
x,y=xy
im[y-3:y+3,x-3:x+3] = 255
imageio.imwrite(fname,im)
def fit_extrinsics(base_cam,X3d,x2d,geom=None):
assert x2d.ndim==2
assert x2d.shape[1]==2
assert X3d.ndim==2
assert X3d.shape[1]==3
if 0:
fname = 'x2d_'+base_cam.name + '.png'
fname = fname.replace('/','-')
save_point_image(fname, (base_cam.width, base_cam.height), x2d )
#print 'saved pt debug image to',fname
ipts = np.array(x2d,dtype=np.float64)
opts = np.array(X3d,dtype=np.float64)
K = np.array(base_cam.get_K(), dtype=np.float64)
dist_coeffs = np.array( base_cam.get_D(), dtype=np.float64)
retval, rvec, tvec = cv2.solvePnP( opts, ipts,
K,
dist_coeffs)
assert retval
# we get two possible cameras back, figure out which one has objects in front
rmata = rodrigues2matrix( rvec )
intrinsics = base_cam.to_dict()
del intrinsics['Q']
del intrinsics['translation']
del intrinsics['name']
d = intrinsics.copy()
d['translation'] = tvec
d['Q'] = rmata
d['name'] = base_cam.name
cam_model_a = CameraModel.from_dict(d)
mza = np.mean(cam_model_a.project_3d_to_camera_frame(X3d)[:,2])
# don't bother with second - it does not have a valid rotation matrix
if 1:
founda = cam_model_a.project_3d_to_pixel(X3d)
erra = np.mean(np.sqrt(np.sum((founda-x2d)**2, axis=1)))
cam_model = cam_model_a
if 1:
found = cam_model.project_3d_to_pixel(X3d)
orig = x2d
reproj_error = np.sqrt(np.sum((found-orig)**2, axis=1))
cum = np.mean(reproj_error)
mean_cam_z = np.mean(cam_model.project_3d_to_camera_frame(X3d)[:,2])
if (mean_cam_z < 0 or cum > 20) and 0:
# hmm, we have a flipped view of the camera.
print('-'*80,'HACK ON')
center, lookat, up = cam_model.get_view()
#cam2 = cam_model.get_view_camera( -center, lookat, -up )
cam2 = cam_model.get_view_camera( center, lookat, up )
cam2.name=base_cam.name
return fit_extrinsics_iterative(cam2,X3d,x2d, geom=geom)
result = dict(cam=cam_model,
mean_err=cum,
mean_cam_z = mean_cam_z,
)
return result
|
python
|
# Generated by Django 2.2.20 on 2021-10-04 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deeds', '0008_deed_target'),
]
operations = [
migrations.AddField(
model_name='deed',
name='enable_impact',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='deed',
name='target',
field=models.IntegerField(blank=True, help_text='The number of users you want to participate.', null=True),
),
]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from .property import Property
from .util import text, unicode_obj
from .propertymapping import ConstantPropertyMapping, DirectPropertyMapping, ConcatPropertyMapping
__author__ = "nebula"
variable_meta_registry = dict()
class VariableMeta(object):
_fixed_properties = {
"app": "string",
"name": "string",
"key": "string",
"timestamp": "long",
"value": "double"
}
TYPE2Class = {
}
@staticmethod
def from_dict(d):
type = d["type"]
cls = VariableMeta.TYPE2Class.get(type)
if not cls:
raise RuntimeError("unsupported property variabe meta type: {}".format(type))
return cls.from_dict(d)
@staticmethod
def from_json(jsonStr):
return VariableMeta.from_dict(json.loads(jsonStr))
def __init__(self, *args, **kwargs):
self._app = text(kwargs["app"])
self._name = text(kwargs["name"])
self._type = text(kwargs["type"])
self._src_variablesid = unicode_obj(kwargs["srcVariablesID"] or list())
self._src_eventid = unicode_obj(kwargs["srcEventID"] or None)
self._priority = kwargs["priority"]
self._properties = [Property.from_dict(_) for _ in kwargs["properties"]] or list()
self._expire = kwargs["expire"]
self._ttl = kwargs["ttl"]
self._internal = bool(kwargs["internal"])
self._topValue = bool(kwargs.get("topValue", False))
self._keyTopValue = bool(kwargs.get("keyTopValue", False))
self._remark = text(kwargs["remark"] or "")
@property
def app(self):
return self._app
@app.setter
def app(self, app):
self._app = text(app)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = text(name)
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = text(type)
@property
def src_variablesid(self):
return self._src_variablesid
@src_variablesid.setter
def src_variablesid(self, src_variablesid):
src_variablesid = src_variablesid or list()
self._src_variablesid = unicode_obj(src_variablesid)
@property
def src_eventid(self):
return self._src_eventid
@src_eventid.setter
def src_eventid(self, src_eventid):
src_eventid = src_eventid or list()
self._src_eventid = src_eventid
@property
def priority(self):
return self._priority
@priority.setter
def priority(self, priority):
self._priority = priority
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, properties):
self._properties = properties or list()
def has_property(self, property):
for p in self._properties:
if property.name == p.name and property.type == p.type:
return True
for n, t in VariableMeta._fixed_properties:
if property.name == n and property.type == t:
return True
return False
def find_property_by_name(self, field_name):
for p in self._properties:
if p.name == field_name:
return p
for key, type in VariableMeta._fixed_properties.iteritems():
if key == field_name:
return Property([self.app, self.name], key, type)
return None
@property
def expire(self):
return self._expire
@expire.setter
def expire(self, expire):
self._expire = expire
@property
def ttl(self):
return self._ttl
@ttl.setter
def ttl(self, ttl):
self._ttl = ttl
@property
def internal(self):
return self._internal
@internal.setter
def internal(self, internal):
self._internal = bool(internal)
@property
def topValue(self):
return self._topValue
@topValue.setter
def topValue(self, topValue):
self._topValue = bool(topValue)
@property
def keyTopValue(self):
return self._keyTopValue
@keyTopValue.setter
def keyTopValue(self, keyTopValue):
self._keyTopValue = bool(keyTopValue)
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, remark):
self._remark = text(remark or "")
@property
def config(self):
return self._config
@config.setter
def config(self, config):
self._config = unicode_obj(config)
@property
def data_schema(self):
result = {}
for p in self.properties:
result[p.name] = p.type
result.extend(VariableMeta._fixed_properties)
return result
@property
def propertyMappings(self):
raise RuntimeError("not implemented")
@property
def propertyReductions(self):
raise RuntimeError("not implemented")
@property
def propertyCondition(self):
raise RuntimeError("not implemented")
def get_dict(self):
return {
"app": self.app,
"name": self.name,
"type": self.type,
"srcVariablesID": self.src_variablesid,
"srcEventID": self.src_eventid,
"priority": self.priority,
"properties": [_.get_dict() for _ in self.properties],
"expire": self.expire,
"ttl": self.ttl,
"internal": self.internal,
"topValue": self.topValue,
"keyTopValue": self.keyTopValue,
"remark": self.remark,
}
def get_json(self):
return json.dumps(self.get_dict())
def genMappingsFromVariableData(self, customizedMappings, reductions, groupedKeys, ignoreNames):
reductions = reductions or []
groupedKeys = groupedKeys or []
customizedMappings = customizedMappings or []
ignoreNames = ignoreNames or []
resultMappings = []
thisid = [self.app, self.name]
if customizedMappings:
resultMappings.extend(customizedMappings)
# add grouped key mapping
if groupedKeys:
for p in groupedKeys:
if p.name in [_.destProperty.name for _ in resultMappings] or \
p.name in [_.destProperty.name for _ in reductions]:
continue
else:
srcProperty = p.get_dict()
destProperty = Property(thisid, p.name, p.type).get_dict()
resultMappings.append(DirectPropertyMapping(srcProperty=srcProperty,
destProperty=destProperty))
# add default key mapping if there is not one
if "key" in [_.destProperty.name for _ in resultMappings] or \
"key" in [_.destProperty.name for _ in reductions]:
pass
else:
destProperty = Property(thisid, "key", "string").get_dict()
if not groupedKeys:
resultMappings.append(ConstantPropertyMapping(type="string", param="",
destProperty=destProperty))
else:
resultMappings.append(ConcatPropertyMapping(type="concat",
srcProperties=[_.get_dict() for p in groupedKeys],
destProperty=destProperty))
# add default value mapping if there is not one
if "value" in [_.destProperty.name for _ in resultMappings] or \
"value" in [_.destProperty.name for _ in reductions]:
pass
else:
resultMappings.append(ConstantPropertyMapping(type="double", param=1.0,
destProperty=Property(thisid, "value", "double").get_dict()))
# deal with ignore names
if ignoreNames:
resultMappings = filter(lambda m: m.destProperty.name not in ignoreNames, resultMappings)
return resultMappings
def extractProperties(self):
result = []
mappings = self.propertyMappings
reductions = self.propertyReductions
if mappings:
result.extend([_.destProperty.copy() for _ in mappings])
if reductions:
result.extend([_.destProperty.copy() for _ in reductions])
thisid = [self.app, self.name]
for p in result:
p.identifier = thisid
return result
@staticmethod
def register_variable_meta(meta):
id = (meta.app, meta.name)
variable_meta_registry[id] = meta
@staticmethod
def unregister_variable_meta(meta):
id = (meta.app, meta.name)
if id in variable_meta_registry:
del variable_meta_registry[id]
@staticmethod
def list_variable_meta():
return variable_meta_registry.values()[:]
@staticmethod
def find_variable_meta_by_variable(variable):
id = (variable.app, variable.name)
return variable_meta_registry.get(id)
@staticmethod
def find_variable_meta_by_id(app, name):
id = (app, name)
return variable_meta_registry.get(id)
def copy(self):
return VariableMeta.from_dict(self.get_dict())
def __str__(self):
return "VariableMeta[{}]".format(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
|
python
|
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from config import config
from glob import glob
import imp
SQLALCHEMY_TRACK_MODIFICATIONS = False
plugins = []
bootstrap = Bootstrap()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
from .smbinterface import SMBInterface # has to be here, because it will import db and login_manager from this file
smbinterface = SMBInterface()
from .usagestats import UsageStatisticsThread # has to be here, because it will import db from this file
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from .main import main as main_blueprint
from .auth import auth as auth_blueprint
from .browser import browser as browser_blueprint
from .settings import settings as settings_blueprint
from .profile import profile as profile_blueprint
from .printdata import printdata as printdata_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(auth_blueprint, url_prefix='/auth')
app.register_blueprint(browser_blueprint, url_prefix='/browser')
app.register_blueprint(settings_blueprint, url_prefix='/settings')
app.register_blueprint(profile_blueprint, url_prefix='/profile')
app.register_blueprint(printdata_blueprint, url_prefix='/print')
# update activity types table
with app.app_context():
from .main.views import supported_targets
from .models import ActivityType
from sqlalchemy.exc import OperationalError
activity_types = ['selectsmbfile', 'login', 'logout']
try:
registered_activity_types = [at.description for at in ActivityType.query.all()]
for key, target in supported_targets.items():
activity_types.append('add:' + key)
activity_types.append('delete:' + key)
for field in target['fields']:
activity_types.append('update:' + key + ':' + field)
for at in activity_types:
if at not in registered_activity_types:
newat = ActivityType(description=at)
db.session.add(newat)
db.session.commit()
except OperationalError as e:
# in case the table is not created yet, do nothing (this happens when we do 'python manage.py db upgrade')
pass
# run usage statistics thread
UsageStatisticsThread(app)
# look for plugins
plugin_files = glob('plugins/*/*.py')
for f in plugin_files:
p = imp.load_source(f[8:-3], f)
if not hasattr(p, 'display') or not hasattr(p, 'title'):
# TODO: report this some other way, e.g. raise Exception or log warning...
print("Incompatible plugin: ", f[8:-3])
plugins.append(p)
return app
|
python
|
import gdb
def format_plan_tree(tree, indent=0):
'formats a plan (sub)tree, with custom indentation'
# if the pointer is NULL, just return (null) string
if (str(tree) == '0x0'):
return '-> (NULL)'
# format all the important fields (similarly to EXPLAIN)
retval = '''
-> %(type)s (cost=%(startup).3f...%(total).3f rows=%(rows)s width=%(width)s)
\ttarget list:
%(target)s
\t%(left)s
\t%(right)s''' % {
'type' : format_type(tree['type']), # type of the Node
'startup' : float(tree['startup_cost']), # startup cost
'total' : float(tree['total_cost']), # total cost
'rows' : str(tree['plan_rows']), # number of rows
'width' : str(tree['plan_width']), # tuple width (no header)
# format target list
'target' : format_node_list(tree['targetlist'], 2, True),
# left subtree
'left' : format_plan_tree(tree['lefttree'], 0),
# right subtree
'right' : format_plan_tree(tree['righttree'], 0)
}
return add_indent(retval, indent+1)
def format_type(t, indent=0):
'strip the leading T_ from the node type tag'
t = str(t)
if t.startswith('T_'):
t = t[2:]
return add_indent(t, indent)
def format_int_list(lst, indent=0):
'format list containing integer values directly (not warapped in Node)'
# handle NULL pointer (for List we return NIL
if (str(lst) == '0x0'):
return '(NIL)'
# we'll collect the formatted items into a Python list
tlist = []
item = lst['head']
# walk the list until we reach the last item
while str(item) != '0x0':
# get item from the list and just grab 'int_value as int'
tlist.append(int(item['data']['int_value']))
# next item
item = item['next']
return add_indent(str(tlist), indent)
def format_oid_list(lst, indent=0):
'format list containing Oid values directly (not warapped in Node)'
# handle NULL pointer (for List we return NIL)
if (str(lst) == '0x0'):
return '(NIL)'
# we'll collect the formatted items into a Python list
tlist = []
item = lst['head']
# walk the list until we reach the last item
while str(item) != '0x0':
# get item from the list and just grab 'oid_value as int'
tlist.append(int(item['data']['oid_value']))
# next item
item = item['next']
return add_indent(str(tlist), indent)
def format_node_list(lst, indent=0, newline=False):
'format list containing Node values'
# handle NULL pointer (for List we return NIL)
if (str(lst) == '0x0'):
return '(NIL)'
# we'll collect the formatted items into a Python list
tlist = []
item = lst['head']
# walk the list until we reach the last item
while str(item) != '0x0':
# we assume the list contains Node instances, so grab a reference
# and cast it to (Node*)
node = cast(item['data']['ptr_value'], 'Node')
# append the formatted Node to the result list
tlist.append(format_node(node))
# next item
item = item['next']
retval = str(tlist)
if newline:
retval = "\n".join([str(t) for t in tlist])
return add_indent(retval, indent)
def format_char(value):
'''convert the 'value' into a single-character string (ugly, maybe there's a better way'''
str_val = str(value.cast(gdb.lookup_type('char')))
# remove the quotes (start/end)
return str_val.split(' ')[1][1:-1]
def format_relids(relids):
return '(not implemented)'
def format_node_array(array, start_idx, length, indent=0):
items = []
for i in range(start_idx,start_idx + length - 1):
items.append(str(i) + " => " + format_node(array[i]))
return add_indent(("\n".join(items)), indent)
def format_node(node, indent=0):
'format a single Node instance (only selected Node types supported)'
if str(node) == '0x0':
return add_indent('(NULL)', indent)
retval = '';
type_str = str(node['type'])
if is_a(node, 'TargetEntry'):
# we assume the list contains Node instances (probably safe for Plan fields)
node = cast(node, 'TargetEntry')
name_ptr = node['resname'].cast(gdb.lookup_type('char').pointer())
name = "(NULL)"
if str(name_ptr) != '0x0':
name = '"' + (name_ptr.string()) + '"'
retval = 'TargetEntry (resno=%(resno)s resname=%(name)s origtbl=%(tbl)s origcol=%(col)s junk=%(junk)s expr=[%(expr)s])' % {
'resno' : node['resno'],
'name' : name,
'tbl' : node['resorigtbl'],
'col' : node['resorigcol'],
'junk' : (int(node['resjunk']) == 1),
'expr' : format_node(node['expr'])
}
elif is_a(node, 'Var'):
# we assume the list contains Node instances (probably safe for Plan fields)
node = cast(node, 'Var')
retval = 'Var (varno=%(no)s varattno=%(attno)s levelsup=%(levelsup)s)' % {
'no' : node['varno'],
'attno' : node['varattno'],
'levelsup' : node['varlevelsup']
}
elif is_a(node, 'RangeTblRef'):
node = cast(node, 'RangeTblRef')
retval = 'RangeTblRef (rtindex=%d)' % (int(node['rtindex']),)
elif is_a(node, 'RelOptInfo'):
node = cast(node, 'RelOptInfo')
retval = 'RelOptInfo (kind=%(kind)s relids=%(relids)s rtekind=%(rtekind)s relid=%(relid)s rows=%(rows)s width=%(width)s fk=%(fk)s)' % {
'kind' : node['reloptkind'],
'rows' : node['rows'],
'width' : node['width'],
'relid' : node['relid'],
'relids' : format_relids(node['relids']),
'rtekind' : node['rtekind'],
'fk' : (int(node['has_fk_join']) == 1)
}
elif is_a(node, 'RangeTblEntry'):
node = cast(node, 'RangeTblEntry')
retval = 'RangeTblEntry (kind=%(rtekind)s relid=%(relid)s relkind=%(relkind)s)' % {
'relid' : node['relid'],
'rtekind' : node['rtekind'],
'relkind' : format_char(node['relkind'])
}
elif is_a(node, 'PlannerInfo'):
retval = format_planner_info(node)
elif is_a(node, 'PlannedStmt'):
retval = format_planned_stmt(node)
elif is_a(node, 'List'):
retval = format_node_list(node, 0, True)
elif is_a(node, 'Plan'):
retval = format_plan_tree(node)
elif is_a(node, 'RestrictInfo'):
node = cast(node, 'RestrictInfo')
retval = '''RestrictInfo (pushed_down=%(push_down)s can_join=%(can_join)s delayed=%(delayed)s)
%(clause)s
%(orclause)s''' % {
'clause' : format_node(node['clause'], 1),
'orclause' : format_node(node['orclause'], 1),
'push_down' : (int(node['is_pushed_down']) == 1),
'can_join' : (int(node['can_join']) == 1),
'delayed' : (int(node['outerjoin_delayed']) == 1)
}
elif is_a(node, 'OpExpr'):
node = cast(node, 'OpExpr')
retval = format_op_expr(node)
elif is_a(node, 'BoolExpr'):
node = cast(node, 'BoolExpr')
print node
retval = format_bool_expr(node)
else:
# default - just print the type name
retval = format_type(type_str)
return add_indent(str(retval), indent)
def format_planner_info(info, indent=0):
# Query *parse; /* the Query being planned */
# *glob; /* global info for current planner run */
# Index query_level; /* 1 at the outermost Query */
# struct PlannerInfo *parent_root; /* NULL at outermost Query */
# List *plan_params; /* list of PlannerParamItems, see below */
retval = '''rel:
%(rel)s
rte:
%(rte)s
''' % {'rel' : format_node_array(info['simple_rel_array'], 1, int(info['simple_rel_array_size'])),
'rte' : format_node_array(info['simple_rte_array'], 1, int(info['simple_rel_array_size']))}
return add_indent(retval, indent)
def format_planned_stmt(plan, indent=0):
retval = ''' type: %(type)s
query ID: %(qid)s
param exec: %(nparam)s
returning: %(has_returning)s
modifying CTE: %(has_modify_cte)s
can set tag: %(can_set_tag)s
transient: %(transient)s
row security: %(row_security)s
plan tree: %(tree)s
range table:
%(rtable)s
relation OIDs: %(relation_oids)s
result rels: %(result_rels)s
utility stmt: %(util_stmt)s
subplans: %(subplans)s''' % {
'type' : plan['commandType'],
'qid' : plan['queryId'],
'nparam' : plan['nParamExec'],
'has_returning' : (int(plan['hasReturning']) == 1),
'has_modify_cte' : (int(plan['hasModifyingCTE']) == 1),
'can_set_tag' : (int(plan['canSetTag']) == 1),
'transient' : (int(plan['transientPlan']) == 1),
'row_security' : (int(plan['hasRowSecurity']) == 1),
'tree' : format_plan_tree(plan['planTree']),
'rtable' : format_node_list(plan['rtable'], 1, True),
'relation_oids' : format_oid_list(plan['relationOids']),
'result_rels' : format_int_list(plan['resultRelations']),
'util_stmt' : format_node(plan['utilityStmt']),
'subplans' : format_node_list(plan['subplans'], 1, True)
}
return add_indent(retval, indent)
def format_op_expr(node, indent=0):
return """OpExpr [opno=%(opno)s]
%(clauses)s""" % { 'opno' : node['opno'],
'clauses' : format_node_list(node['args'], 1, True)}
def format_bool_expr(node, indent=0):
return """BoolExpr [op=%(op)s]
%(clauses)s""" % { 'op' : node['boolop'],
'clauses' : format_node_list(node['args'], 1, True)}
def is_a(n, t):
'''checks that the node has type 't' (just like IsA() macro)'''
if not is_node(n):
return False
return (str(n['type']) == ('T_' + t))
def is_node(l):
'''return True if the value looks like a Node (has 'type' field)'''
try:
x = l['type']
return True
except:
return False
def cast(node, type_name):
'''wrap the gdb cast to proper node type'''
# lookup the type with name 'type_name' and cast the node to it
t = gdb.lookup_type(type_name)
return node.cast(t.pointer())
def add_indent(val, indent):
return "\n".join([(("\t"*indent) + l) for l in val.split("\n")])
class PgPrintCommand(gdb.Command):
"print PostgreSQL structures"
def __init__ (self):
super (PgPrintCommand, self).__init__ ("pgprint",
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE, False)
def invoke (self, arg, from_tty):
arg_list = gdb.string_to_argv(arg)
if len(arg_list) != 1:
print "usage: pgprint var"
return
l = gdb.parse_and_eval(arg_list[0])
if not is_node(l):
print "not a node type"
print format_node(l)
PgPrintCommand()
|
python
|
# ptext module: place this in your import directory.
# ptext.draw(text, pos=None, **options)
# Please see README.md for explanation of options.
# https://github.com/cosmologicon/pygame-text
from __future__ import division, print_function
from math import ceil, sin, cos, radians, exp
from collections import namedtuple
import pygame
DEFAULT_FONT_SIZE = 24
REFERENCE_FONT_SIZE = 100
DEFAULT_LINE_HEIGHT = 1.0
DEFAULT_PARAGRAPH_SPACE = 0.0
DEFAULT_FONT_NAME = None
FONT_NAME_TEMPLATE = "%s"
DEFAULT_COLOR = "white"
DEFAULT_BACKGROUND = None
DEFAULT_SHADE = 0
DEFAULT_OUTLINE_COLOR = "black"
DEFAULT_SHADOW_COLOR = "black"
OUTLINE_UNIT = 1 / 24
SHADOW_UNIT = 1 / 18
DEFAULT_ALIGN = "left" # left, center, or right
DEFAULT_ANCHOR = 0, 0 # 0, 0 = top left ; 1, 1 = bottom right
DEFAULT_STRIP = True
ALPHA_RESOLUTION = 16
ANGLE_RESOLUTION_DEGREES = 3
AUTO_CLEAN = True
MEMORY_LIMIT_MB = 64
MEMORY_REDUCTION_FACTOR = 0.5
pygame.font.init()
# Options objects encapsulate the keyword arguments to functions that take a lot of keyword
# arguments.
# Options object base class. Subclass for Options objects specific to different functions.
# Specify valid fields in the _fields list. Unspecified fields default to None, unless otherwise
# specified in the _defaults list.
class _Options(object):
_fields = ()
_defaults = {}
def __init__(self, **kwargs):
fields = self._allfields()
badfields = set(kwargs) - fields
if badfields:
raise ValueError("Unrecognized args: " + ", ".join(badfields))
for field in fields:
value = kwargs[field] if field in kwargs else self._defaults.get(field)
setattr(self, field, value)
@classmethod
def _allfields(cls):
return set(cls._fields) | set(cls._defaults)
def update(self, **newkwargs):
kwargs = { field: getattr(self, field) for field in self._allfields() }
kwargs.update(**newkwargs)
return kwargs
def key(self):
return tuple(getattr(self, field) for field in sorted(self._allfields()))
def getsuboptions(self, optclass):
return { field: getattr(self, field) for field in optclass._allfields() if hasattr(self, field) }
_default_surf_sentinel = ()
# Options argument for the draw function. Specifies both text styling and positioning.
class _DrawOptions(_Options):
_fields = ("pos",
"fontname", "fontsize", "sysfontname", "antialias", "bold", "italic", "underline",
"color", "background",
"top", "left", "bottom", "right", "topleft", "bottomleft", "topright", "bottomright",
"midtop", "midleft", "midbottom", "midright", "center", "centerx", "centery",
"width", "widthem", "lineheight", "pspace", "strip", "align",
"owidth", "ocolor", "shadow", "scolor", "gcolor", "shade",
"alpha", "anchor", "angle", "surf", "cache")
_defaults = {
"antialias": True, "alpha": 1.0, "angle": 0,
"surf": _default_surf_sentinel, "cache": True }
def __init__(self, **kwargs):
_Options.__init__(self, **kwargs)
self.expandposition()
self.expandanchor()
self.resolvesurf()
# Expand each 2-element position specifier and overwrite the corresponding 1-element
# position specifiers.
def expandposition(self):
if self.topleft: self.left, self.top = self.topleft
if self.bottomleft: self.left, self.bottom = self.bottomleft
if self.topright: self.right, self.top = self.topright
if self.bottomright: self.right, self.bottom = self.bottomright
if self.midtop: self.centerx, self.top = self.midtop
if self.midleft: self.left, self.centery = self.midleft
if self.midbottom: self.centerx, self.bottom = self.midbottom
if self.midright: self.right, self.centery = self.midright
if self.center: self.centerx, self.centery = self.center
# Update the pos and anchor fields, if unspecified, to be specified by the positional
# keyword arguments.
def expandanchor(self):
x, y = self.pos or (None, None)
hanchor, vanchor = self.anchor or (None, None)
if self.left is not None: x, hanchor = self.left, 0
if self.centerx is not None: x, hanchor = self.centerx, 0.5
if self.right is not None: x, hanchor = self.right, 1
if self.top is not None: y, vanchor = self.top, 0
if self.centery is not None: y, vanchor = self.centery, 0.5
if self.bottom is not None: y, vanchor = self.bottom, 1
if x is None:
raise ValueError("Unable to determine horizontal position")
if y is None:
raise ValueError("Unable to determine vertical position")
self.pos = x, y
if self.align is None: self.align = hanchor
if hanchor is None: hanchor = DEFAULT_ANCHOR[0]
if vanchor is None: vanchor = DEFAULT_ANCHOR[1]
self.anchor = hanchor, vanchor
# Unspecified surf values default to the display surface.
def resolvesurf(self):
if self.surf is _default_surf_sentinel:
self.surf = pygame.display.get_surface()
def togetsurfoptions(self):
return self.getsuboptions(_GetsurfOptions)
class _DrawboxOptions(_Options):
_fields = (
"fontname", "sysfontname", "antialias", "bold", "italic", "underline",
"color", "background",
"lineheight", "pspace", "strip", "align",
"owidth", "ocolor", "shadow", "scolor", "gcolor", "shade",
"alpha", "anchor", "angle", "surf", "cache")
_defaults = {
"antialias": True, "alpha": 1.0, "angle": 0, "anchor": (0.5, 0.5),
"surf": _default_surf_sentinel, "cache": True }
def __init__(self, **kwargs):
_Options.__init__(self, **kwargs)
if self.fontname is None: self.fontname = DEFAULT_FONT_NAME
if self.lineheight is None: self.lineheight = DEFAULT_LINE_HEIGHT
if self.pspace is None: self.pspace = DEFAULT_PARAGRAPH_SPACE
def todrawoptions(self):
return self.getsuboptions(_DrawOptions)
def tofitsizeoptions(self):
return self.getsuboptions(_FitsizeOptions)
class _GetsurfOptions(_Options):
_fields = ("fontname", "fontsize", "sysfontname", "bold", "italic", "underline", "width",
"widthem", "strip", "color", "background", "antialias", "ocolor", "owidth", "scolor",
"shadow", "gcolor", "shade", "alpha", "align", "lineheight", "pspace", "angle", "cache")
_defaults = { "antialias": True, "alpha": 1.0, "angle": 0, "cache": True }
def __init__(self, **kwargs):
_Options.__init__(self, **kwargs)
if self.fontname is None: self.fontname = DEFAULT_FONT_NAME
if self.fontsize is None: self.fontsize = DEFAULT_FONT_SIZE
self.fontsize = int(round(self.fontsize))
if self.align is None: self.align = DEFAULT_ALIGN
if self.align in ["left", "center", "right"]:
self.align = [0, 0.5, 1][["left", "center", "right"].index(self.align)]
if self.lineheight is None: self.lineheight = DEFAULT_LINE_HEIGHT
if self.pspace is None: self.pspace = DEFAULT_PARAGRAPH_SPACE
self.color = _resolvecolor(self.color, DEFAULT_COLOR)
self.background = _resolvecolor(self.background, DEFAULT_BACKGROUND)
self.gcolor = _resolvecolor(self.gcolor, None)
if self.shade is None: self.shade = DEFAULT_SHADE
if self.shade:
self.gcolor = _applyshade(self.gcolor or self.color, self.shade)
self.shade = 0
self.ocolor = None if self.owidth is None else _resolvecolor(self.ocolor, DEFAULT_OUTLINE_COLOR)
self.scolor = None if self.shadow is None else _resolvecolor(self.scolor, DEFAULT_SHADOW_COLOR)
self._opx = None if self.owidth is None else ceil(self.owidth * self.fontsize * OUTLINE_UNIT)
self._spx = None if self.shadow is None else tuple(ceil(s * self.fontsize * SHADOW_UNIT) for s in self.shadow)
self.alpha = _resolvealpha(self.alpha)
self.angle = _resolveangle(self.angle)
self.strip = DEFAULT_STRIP if self.strip is None else self.strip
def towrapoptions(self):
return self.getsuboptions(_WrapOptions)
def togetfontoptions(self):
return self.getsuboptions(_GetfontOptions)
class _WrapOptions(_Options):
_fields = ("fontname", "fontsize", "sysfontname",
"bold", "italic", "underline", "width", "widthem", "strip")
def __init__(self, **kwargs):
_Options.__init__(self, **kwargs)
if self.widthem is not None and self.width is not None:
raise ValueError("Can't set both width and widthem")
if self.widthem is not None:
self.width = self.widthem * REFERENCE_FONT_SIZE
self.fontsize = REFERENCE_FONT_SIZE
if self.strip is None:
self.strip = DEFAULT_STRIP
def togetfontoptions(self):
return self.getsuboptions(_GetfontOptions)
class _GetfontOptions(_Options):
_fields = ("fontname", "fontsize", "sysfontname", "bold", "italic", "underline")
def __init__(self, **kwargs):
_Options.__init__(self, **kwargs)
if self.fontname is not None and self.sysfontname is not None:
raise ValueError("Can't set both fontname and sysfontname")
if self.fontname is None and self.sysfontname is None:
fontname = DEFAULT_FONT_NAME
if self.fontsize is None:
self.fontsize = DEFAULT_FONT_SIZE
def getfontpath(self):
return self.fontname if self.fontname is None else FONT_NAME_TEMPLATE % self.fontname
class _FitsizeOptions(_Options):
_fields = ("fontname", "sysfontname", "bold", "italic", "underline",
"lineheight", "pspace", "strip")
def togetfontoptions(self):
return self.getsuboptions(_GetfontOptions)
def towrapoptions(self):
return self.getsuboptions(_WrapOptions)
_font_cache = {}
def getfont(**kwargs):
options = _GetfontOptions(**kwargs)
key = options.key()
if key in _font_cache: return _font_cache[key]
if options.sysfontname is not None:
font = pygame.font.SysFont(options.sysfontname, options.fontsize, options.bold or False, options.italic or False)
else:
try:
font = pygame.font.Font(options.getfontpath(), options.fontsize)
except IOError:
raise IOError("unable to read font filename: %s" % options.getfontpath())
if options.bold is not None:
font.set_bold(options.bold)
if options.italic is not None:
font.set_italic(options.italic)
if options.underline is not None:
font.set_underline(options.underline)
_font_cache[key] = font
return font
def wrap(text, **kwargs):
options = _WrapOptions(**kwargs)
font = getfont(**options.togetfontoptions())
getwidth = lambda line: font.size(line)[0]
# Apparently Font.render accepts None for the text argument, in which case it's treated as the
# empty string. We match that behavior here.
if text is None: text = ""
paras = text.replace("\t", " ").split("\n")
lines = []
for jpara, para in enumerate(paras):
if options.strip:
para = para.rstrip(" ")
if options.width is None:
lines.append((para, jpara))
continue
if not para:
lines.append(("", jpara))
continue
# A break point is defined as any space character that immediately follows a non-space
# character, or the end of the paragraph. These are the points that will be considered for
# breaking a line off the front of the paragraph, although exactly how much whitespace goes
# into the line depends on options.strip.
# A valid break point is any break point such that breaking here will keep the width of the
# line within options.width, with the exception that the first break point in the
# paragraph is always valid. The goal of this algorithm is to find the last valid break
# point.
# Preserve paragraph leading spaces in all cases.
lspaces = len(para) - len(para.lstrip(" "))
# At any given time, a is the index of a known valid break point, and line = para[:a].
a = para.index(" ", lspaces) if " " in para[lspaces:] else len(para)
line = para[:a]
while a + 1 < len(para):
# b is the next break point, with bline the corresponding line to add.
if " " not in para[a+1:]:
b = len(para)
bline = para
else:
# Find a space character that immediately follows a non-space character.
b = para.index(" ", a + 1)
while para[b-1] == " ":
if " " in para[b+1:]:
b = para.index(" ", b + 1)
else:
b = len(para)
break
bline = para[:b]
bline = para[:b]
if getwidth(bline) <= options.width:
a, line = b, bline
else:
# Last vaild break point located.
if not options.strip:
# If options.strip is False, maintain as many spaces from after the break point
# as will keep us under options.width.
nspaces = len(para[a:]) - len(para[a:].lstrip(" "))
for jspace in range(nspaces):
nline = line + " "
if getwidth(nline) > options.width:
break
line = nline
lines.append((line, jpara))
# Start the search over with the rest of the paragraph.
para = para[a:].lstrip(" ")
a = para.index(" ", 1) if " " in para[1:] else len(para)
line = para[:a]
# Handle the case of the first valid break point of the last line being the end of the line.
# In this case there are no trailing spaces.
if para:
lines.append((line, jpara))
return lines
# Return the largest integer in the range [xmin, xmax] such that f(x) is True.
def _binarysearch(f, xmin = 1, xmax = 256):
if not f(xmin): return xmin
if f(xmax): return xmax
# xmin is the largest known value for which f(x) is True
# xmax is the smallest known value for which f(x) is False
while xmax - xmin > 1:
x = (xmax + xmin) // 2
if f(x):
xmin = x
else:
xmax = x
return xmin
_fit_cache = {}
def _fitsize(text, size, **kwargs):
options = _FitsizeOptions(**kwargs)
key = text, size, options.key()
if key in _fit_cache: return _fit_cache[key]
width, height = size
def fits(fontsize):
texts = wrap(text, fontsize=fontsize, width=width, **options.towrapoptions())
font = getfont(fontsize=fontsize, **options.togetfontoptions())
w = max(font.size(line)[0] for line, jpara in texts)
linesize = font.get_linesize() * options.lineheight
paraspace = font.get_linesize() * options.pspace
h = int(round((len(texts) - 1) * linesize + texts[-1][1] * paraspace)) + font.get_height()
return w <= width and h <= height
fontsize = _binarysearch(fits)
_fit_cache[key] = fontsize
return fontsize
# Returns the color as a color RGB or RGBA tuple (i.e. 3 or 4 integers in the range 0-255)
# If color is None, fall back to the default. If default is also None, return None.
# Both color and default can be a list, tuple, a color name, an HTML color format string, a hex
# number string, or an integer pixel value. See pygame.Color constructor for specification.
def _resolvecolor(color, default):
if color is None: color = default
if color is None: return None
try:
return tuple(pygame.Color(color))
except ValueError:
return tuple(color)
def _applyshade(color, shade):
f = exp(-0.4 * shade)
r, g, b = [
min(max(int(round((c + 50) * f - 50)), 0), 255)
for c in color[:3]
]
return (r, g, b) + tuple(color[3:])
def _resolvealpha(alpha):
if alpha >= 1:
return 1
return max(int(round(alpha * ALPHA_RESOLUTION)) / ALPHA_RESOLUTION, 0)
def _resolveangle(angle):
if not angle:
return 0
angle %= 360
return int(round(angle / ANGLE_RESOLUTION_DEGREES)) * ANGLE_RESOLUTION_DEGREES
# Return the set of points in the circle radius r, using Bresenham's circle algorithm
_circle_cache = {}
def _circlepoints(r):
r = int(round(r))
if r in _circle_cache:
return _circle_cache[r]
x, y, e = r, 0, 1 - r
_circle_cache[r] = points = []
while x >= y:
points.append((x, y))
y += 1
if e < 0:
e += 2 * y - 1
else:
x -= 1
e += 2 * (y - x) - 1
points += [(y, x) for x, y in points if x > y]
points += [(-x, y) for x, y in points if x]
points += [(x, -y) for x, y in points if y]
points.sort()
return points
# Rotate the given surface by the given angle, in degrees.
# If angle is an exact multiple of 90, use pygame.transform.rotate, otherwise fall back to
# pygame.transform.rotozoom.
def _rotatesurf(surf, angle):
if angle in (90, 180, 270):
return pygame.transform.rotate(surf, angle)
else:
return pygame.transform.rotozoom(surf, angle, 1.0)
# Apply the given alpha value to a copy of the Surface.
def _fadesurf(surf, alpha):
surf = surf.copy()
asurf = surf.copy()
asurf.fill((255, 255, 255, int(round(255 * alpha))))
surf.blit(asurf, (0, 0), None, pygame.BLEND_RGBA_MULT)
return surf
def _istransparent(color):
return len(color) > 3 and color[3] == 0
# Produce a 1xh Surface with the given color gradient.
_grad_cache = {}
def _gradsurf(h, y0, y1, color0, color1):
key = h, y0, y1, color0, color1
if key in _grad_cache:
return _grad_cache[key]
surf = pygame.Surface((1, h)).convert_alpha()
r0, g0, b0 = color0[:3]
r1, g1, b1 = color1[:3]
for y in range(h):
f = min(max((y - y0) / (y1 - y0), 0), 1)
g = 1 - f
surf.set_at((0, y), (
int(round(g * r0 + f * r1)),
int(round(g * g0 + f * g1)),
int(round(g * b0 + f * b1)),
0
))
_grad_cache[key] = surf
return surf
_surf_cache = {}
_surf_tick_usage = {}
_surf_size_total = 0
_unrotated_size = {}
_tick = 0
def getsurf(text, **kwargs):
global _tick, _surf_size_total
options = _GetsurfOptions(**kwargs)
key = text, options.key()
if key in _surf_cache:
_surf_tick_usage[key] = _tick
_tick += 1
return _surf_cache[key]
texts = wrap(text, **options.towrapoptions())
if options.angle:
surf0 = getsurf(text, **options.update(angle = 0))
surf = _rotatesurf(surf0, options.angle)
_unrotated_size[(surf.get_size(), options.angle, text)] = surf0.get_size()
elif options.alpha < 1.0:
surf = _fadesurf(getsurf(text, **options.update(alpha = 1.0)), options.alpha)
elif options._spx is not None:
color = (0, 0, 0) if _istransparent(options.color) else options.color
surf0 = getsurf(text, **options.update(background = (0, 0, 0, 0), color = color, shadow = None, scolor = None))
ssurf = getsurf(text, **options.update(background = (0, 0, 0, 0), color = options.scolor, shadow = None, scolor = None, gcolor = None))
w0, h0 = surf0.get_size()
sx, sy = options._spx
surf = pygame.Surface((w0 + abs(sx), h0 + abs(sy))).convert_alpha()
surf.fill(options.background or (0, 0, 0, 0))
dx, dy = max(sx, 0), max(sy, 0)
surf.blit(ssurf, (dx, dy))
x0, y0 = abs(sx) - dx, abs(sy) - dy
if _istransparent(options.color):
surf.blit(surf0, (x0, y0), None, pygame.BLEND_RGBA_SUB)
else:
surf.blit(surf0, (x0, y0))
elif options._opx is not None:
color = (0, 0, 0) if _istransparent(options.color) else options.color
surf0 = getsurf(text, **options.update(color = color, ocolor = None, owidth = None))
osurf = getsurf(text, **options.update(color = options.ocolor, ocolor = None, owidth = None, background = (0,0,0,0), gcolor = None))
w0, h0 = surf0.get_size()
opx = options._opx
surf = pygame.Surface((w0 + 2 * opx, h0 + 2 * opx)).convert_alpha()
surf.fill(options.background or (0, 0, 0, 0))
for dx, dy in _circlepoints(opx):
surf.blit(osurf, (dx + opx, dy + opx))
if _istransparent(options.color):
surf.blit(surf0, (opx, opx), None, pygame.BLEND_RGBA_SUB)
else:
surf.blit(surf0, (opx, opx))
else:
font = getfont(**options.togetfontoptions())
color = options.color
if options.gcolor is not None:
color = 0, 0, 0
# pygame.Font.render does not allow passing None as an argument value for background.
if options.background is None or (len(options.background) > 3 and options.background[3] == 0) or options.gcolor is not None:
lsurfs = [font.render(text, options.antialias, color).convert_alpha() for text, jpara in texts]
else:
lsurfs = [font.render(text, options.antialias, color, options.background).convert_alpha() for text, jpara in texts]
if options.gcolor is not None:
gsurf0 = _gradsurf(lsurfs[0].get_height(), 0.5 * font.get_ascent(), font.get_ascent(), options.color, options.gcolor)
for lsurf in lsurfs:
gsurf = pygame.transform.scale(gsurf0, lsurf.get_size())
lsurf.blit(gsurf, (0, 0), None, pygame.BLEND_RGBA_ADD)
if len(lsurfs) == 1 and options.gcolor is None:
surf = lsurfs[0]
else:
w = max(lsurf.get_width() for lsurf in lsurfs)
linesize = font.get_linesize() * options.lineheight
parasize = font.get_linesize() * options.pspace
ys = [int(round(k * linesize + jpara * parasize)) for k, (text, jpara) in enumerate(texts)]
h = ys[-1] + font.get_height()
surf = pygame.Surface((w, h)).convert_alpha()
surf.fill(options.background or (0, 0, 0, 0))
for y, lsurf in zip(ys, lsurfs):
x = int(round(options.align * (w - lsurf.get_width())))
surf.blit(lsurf, (x, y))
if options.cache:
w, h = surf.get_size()
_surf_size_total += 4 * w * h
_surf_cache[key] = surf
_surf_tick_usage[key] = _tick
_tick += 1
return surf
# The actual position on the screen where the surf is to be blitted, rather than the specified
# anchor position.
def _blitpos(angle, pos, anchor, tsurf, text):
angle = _resolveangle(angle)
x, y = pos
hanchor, vanchor = anchor
if angle:
w0, h0 = _unrotated_size[(tsurf.get_size(), angle, text)]
S, C = sin(radians(angle)), cos(radians(angle))
dx, dy = (0.5 - hanchor) * w0, (0.5 - vanchor) * h0
x += dx * C + dy * S - 0.5 * tsurf.get_width()
y += -dx * S + dy * C - 0.5 * tsurf.get_height()
else:
x -= hanchor * tsurf.get_width()
y -= vanchor * tsurf.get_height()
x = int(round(x))
y = int(round(y))
return x, y
def draw(text, pos=None, **kwargs):
options = _DrawOptions(pos = pos, **kwargs)
tsurf = getsurf(text, **options.togetsurfoptions())
pos = _blitpos(options.angle, options.pos, options.anchor, tsurf, text)
if options.surf is not None:
options.surf.blit(tsurf, pos)
if AUTO_CLEAN:
clean()
return tsurf, pos
def drawbox(text, rect, **kwargs):
options = _DrawboxOptions(**kwargs)
rect = pygame.Rect(rect)
hanchor, vanchor = options.anchor
x = rect.x + hanchor * rect.width
y = rect.y + vanchor * rect.height
fontsize = _fitsize(text, rect.size, **options.tofitsizeoptions())
return draw(text, pos=(x,y), width=rect.width, fontsize=fontsize, **options.todrawoptions())
def clean():
global _surf_size_total
memory_limit = MEMORY_LIMIT_MB * (1 << 20)
if _surf_size_total < memory_limit:
return
memory_limit *= MEMORY_REDUCTION_FACTOR
keys = sorted(_surf_cache, key=_surf_tick_usage.get)
for key in keys:
w, h = _surf_cache[key].get_size()
del _surf_cache[key]
del _surf_tick_usage[key]
_surf_size_total -= 4 * w * h
if _surf_size_total < memory_limit:
break
|
python
|
#it2.py
import json
import argparse
import funcy
import os, shutil
from sklearn.model_selection import train_test_split
'''
#train_dir = '/home/data/130/train_split.json'
#os.makedirs('train2017', exist_ok=True)
#test_dir = '/home/data/130/test_split.json'
#os.makedirs('test2017', exist_ok=True)
parser = argparse.ArgumentParser(description='Splits COCO annotations file into training and test sets.')
parser.add_argument('annotations', metavar='coco_annotations', type=str,
help='Path to COCO annotations file.')
parser.add_argument('train', type=str, help='Where to store COCO training annotations')
parser.add_argument('test', type=str, help='Where to store COCO test annotations')
parser.add_argument('-s', dest='split', type=float, required=True,
help="A percentage of a split; a number in (0, 1)")
parser.add_argument('--having-annotations', dest='having_annotations', action='store_true',
help='Ignore all images without annotations. Keep only these with at least one annotation')
parser.add_argument('images', type=str, help='Where images(dataset) is stored')
args = parser.parse_args()
'''
def save_coco(filep, info, licenses, images, annotations, categories):
with open(filep, 'wt', encoding='UTF-8') as coco:
json.dump({ 'info': info, 'licenses': licenses, 'images': images,
'annotations': annotations, 'categories': categories}, coco, indent=2, sort_keys=True)
def filter_annotations(annotations, images):
image_ids = funcy.lmap(lambda i: int(i['id']), images)
return funcy.lfilter(lambda a: int(a['image_id']) in image_ids, annotations)
def cocosplit(annotations = '/home/data/130/train.json', split = 0.9, train = '/home/data/130/train_split.json' , test = '/home/data/130/test_split.json'):
with open(annotations, 'rt', encoding='UTF-8') as annotations:
coco = json.load(annotations)
info = 'None', #coco['info']
licenses = 'None', #coco['licenses']
images = coco['images']
annotations = coco['annotations']
categories = coco['categories']
#number_of_images = len(images)
#images_with_annotations = funcy.lmap(lambda a: int(a['image_id']), annotations)
#if args.having_annotations:
# images = funcy.lremove(lambda i: i['id'] not in images_with_annotations, images)
x, y = train_test_split(images, train_size=split, random_state=42)
#save_train = [shutil.copyfile(args.images + '/' + i['file_name'], train_dir + '/' + i['file_name']) for i in x]
#save_test = [shutil.copyfile(args.images + '/' + j['file_name'], test_dir + '/' + j['file_name']) for j in y]
save_coco(train, info, licenses, x, filter_annotations(annotations, x), categories)
save_coco(test, info, licenses, y, filter_annotations(annotations, y), categories)
print("Saved {} entries in {} and {} in {}".format(len(x), train, len(y), test))
#if __name__ == "__main__":
# cocosplit(args.annotations, args.split, args.train, args.test)
|
python
|
import os.path, sys, re, cv2, glob, numpy as np
import os.path as osp
from tqdm import tqdm
from IPython import embed
import scipy
import matplotlib.pyplot as plt
from skimage.transform import resize
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import auc
from matplotlib.patches import Circle
import torch
# from .ipv_vis import *
from vision.triangulation import triangulate
from vision.multiview import pix2coord, coord2pix
from core import cfg
from vision.multiview import de_normalize
from vision.visualizer_human import draw_2d_pose
from vision.visualizer_hand import plot_hand_3d
class Cursor(object):
def __init__(self, sample_ax, draw_ax):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.1f, y=%1.1f' % (x, y))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
self.sample_ax.imshow(ref_img)
a, b, heatmap = heatmapat(x, y, weights[0])
im1= self.draw_ax[1].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[1].set_title("%f~%f" % (a, b))
a, b, heatmap = heatmapat(x, y, weights[1])
im2= self.draw_ax[2].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[2].set_title("%f~%f" % (a, b))
a, b, heatmap = heatmapat(x, y, weights[2])
im3= self.draw_ax[3].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[3].set_title("%f~%f" % (a, b))
# fig.colorbar(im2, ax=axs[0, 1])
circ = Circle((x, y),2,color='r')
axs[0, 0].add_patch(circ)
plt.show()
class Cursor_for_epipolar_line(object):
def __init__(self, sample_ax, draw_ax, sample_locs, H, W, axs, img2, outs):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
self.sample_locs = sample_locs
self.H = H
self.W = W
self.axs = axs
self.img2 = img2
self.outs = outs
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
# pr_cost_volume = self.depth[:, int(y), int(x)]
# cost_volume_xs = np.arange(0, pr_cost_volume.shape[0])
# xx, yy = self.corr_pos_pred[int(y)][int(x)]
self.txt.set_text('x=%1.1f, y=%1.1f' % (x, y))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
self.axs[1, 0].clear()
self.axs[1, 0].imshow(self.img2)
inty, intx = int(y+0.5), int(x+0.5)
print(self.sample_locs[:, inty, intx])
_, _, _, debugsample_locs, intersections, mask, valid_intersections, start, vec = self.outs
print(intx, inty)
print('debugsample_locs', debugsample_locs[:, 0, inty, intx])
print('intersections', intersections.view(-1, 64, 64, 4, 2)[0, inty, intx])
print('mask', mask.view(-1, 64, 64, 4)[0, inty, intx])
print('valid_intersections', valid_intersections.view(-1, 64, 64, 2, 2)[0, inty, intx])
print('start', start.view(-1, 64, 64, 2)[0, inty, intx])
print('vec', vec.view(-1, 64, 64, 2)[0, inty, intx])
for i in range(64):
# pos = self.sample_locs[i][int(y+0.5)][int(x+0.5)]
pos = debugsample_locs[i, 0, inty, intx].cpu().numpy().copy()
depos = de_normalize(pos, self.H, self.W)
# circ = Circle((int(depos[0]), int(depos[1])),1,color='b', alpha=0.5)
circ = Circle((depos[0], depos[1]), 1 , color='b', alpha=0.5)
self.axs[1, 0].add_patch(circ)
# circ = Circle((xx, yy),2,color='r')
self.axs[1, 0].add_patch(circ)
plt.show()
class Cursor_for_corrspondence(object):
def __init__(self, sample_ax, draw_ax, depth, corr_pos_pred, sample_locs, H, W):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
self.depth = depth
self.corr_pos_pred = corr_pos_pred
self.sample_locs = sample_locs
self.H = H
self.W = W
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
pr_cost_volume = self.depth[:, int(y), int(x)]
cost_volume_xs = np.arange(0, pr_cost_volume.shape[0])
xx, yy = self.corr_pos_pred[int(y)][int(x)]
self.txt.set_text('x=%1.1f, y=%1.1f depth=%.5f\nCorr xx=%d, yy=%d' % (x, y, np.max(pr_cost_volume), xx, yy))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
axs[1, 0].clear()
axs[1, 0].imshow(img2)
for i in range(64):
pos = sample_locs[i][int(y)][int(x)]
depos = de_normalize(pos, H, W)
circ = Circle((int(depos[0]), int(depos[1])),1,color='b', alpha=0.5)
axs[1, 0].add_patch(circ)
circ = Circle((xx, yy),2,color='r')
axs[1, 0].add_patch(circ)
plt.show()
def toimg(x):
return x.squeeze().numpy().transpose([1,2,0])
def de_transform(img):
img[..., 0, :, :] = img[..., 0, :, :] * 0.229 + 0.485
img[..., 1, :, :] = img[..., 1, :, :] * 0.224 + 0.456
img[..., 2, :, :] = img[..., 2, :, :] * 0.225 + 0.406
return img
def draw_auc(predictions, pck, auc_path):
max_threshold = 20
thresholds = np.linspace(0, max_threshold, num=20)
pck = np.sum(pck, axis=0)
auc_value = auc(thresholds, pck) / max_threshold
print('AUC: ', auc_value)
plt.plot(thresholds, pck, 'r')
plt.axis([0, 20, 0, 1])
plt.savefig(auc_path)
plt.show()
def get_point_cloud(img1, img2, KRT1, KRT2, RT1, RT2, corr_pos, score):
"""
KRT:
corr_pos: feat_h x feat_w x 2
score: sample_size x feat_h x feat_w
"""
y = np.arange(0, img1.shape[0]) # 128
x = np.arange(0, img1.shape[1]) # 84
grid_x, grid_y = np.meshgrid(x, y)
grid_y = pix2coord(grid_y, cfg.BACKBONE.DOWNSAMPLE)
grid_y = grid_y * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
grid_x = pix2coord(grid_x, cfg.BACKBONE.DOWNSAMPLE)
grid_x = grid_x * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
# 2668 * 4076
grid_corr = pix2coord(corr_pos, cfg.BACKBONE.DOWNSAMPLE)
grid_corr = grid_corr * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
grid = np.stack((grid_x, grid_y))
grid = grid.reshape(2, -1)
grid_corr = grid_corr.reshape(-1, 2).transpose()
from scipy.misc import imresize
sample_size, fh, fw = score.shape
resized_img2 = imresize(img2, (fh, fw))
max_score = np.max(score.reshape(sample_size, -1), axis=0).reshape(fh, fw)
select_pos1 = max_score > 0.02
print('->', np.sum(select_pos1))
select_pos2 = np.sum(resized_img2, axis=2) > 20
print('->',np.sum(select_pos2))
select_pos3 = np.sum(corr_pos, axis=2) > -50
print('->',np.sum(select_pos2))
select_pos = np.logical_and(select_pos3, select_pos2).reshape(-1)
# select_pos = select_pos3
print('-->',np.sum(select_pos))
select_pos = select_pos.reshape(-1)
select_img_point = resized_img2.reshape(fh*fw, 3)[select_pos, :]
print(select_pos.shape)
print('total pos', sum(select_pos))
p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr[:,select_pos], grid[:,select_pos])
# p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr, grid)
# depth = np.ones((fh, fw)) * np.min((KRT1@p3D)[2, :])
depth = np.ones((fh, fw)) * np.max((KRT1@p3D)[2, :])
cnt = 0
for i in range(fh):
for j in range(fw):
if not select_pos[i*fw+j]:
continue
p_homo = (KRT1 @ p3D[:, cnt])
p = p_homo / p_homo[2]
depth[int(coord2pix(p[1], 32)), int(coord2pix(p[0], 32))] = p_homo[2]
cnt += 1
p3D /= p3D[3]
p3D = p3D[:3].squeeze()
depth = (depth - depth.min()) / (depth.max() - depth.min()) + 1
depth = np.log(depth)
depth = (depth - depth.min()) / (depth.max() - depth.min())
#######vis
fig = plt.figure(1)
ax1_1 = fig.add_subplot(331)
ax1_1.imshow(img1)
ax1_2 = fig.add_subplot(332)
ax1_2.imshow(img2)
w = corr_pos[:, :, 0]
w = (w - w.min()) / (w.max() - w.min())
ax1_1 = fig.add_subplot(334)
ax1_1.imshow(w)
w = corr_pos[:, :, 1]
w = (w - w.min()) / (w.max() - w.min())
ax1_1 = fig.add_subplot(335)
ax1_1.imshow(w)
# w1 = corr_pos[:, :, 0]
# w1 = (w1 - w1.min()) / (w1.max() - w1.min())
# w2 = corr_pos[:, :, 1]
# w2 = (w2 - w2.min()) / (w2.max() - w2.min())
# W = np.stack([w1, w2, np.ones(w2.shape)], axis=0)
# ax2_1 = fig.add_subplot(336)
# ax2_1.imshow(W.transpose(1,2,0))
ax1_1 = fig.add_subplot(336)
ax1_1.imshow(depth)
w = select_pos1.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(337)
ax2_1.imshow(w)
w = select_pos2.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(338)
ax2_1.imshow(w)
w = select_pos.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(339)
ax2_1.imshow(w)
####### end vis
# w = select_img_point[:, :10000].reshape(-1, 100, 100).transpose(1,2,0)
# w = (w - w.min()) / (w.max() - w.min())
# ax2_1 = fig.add_subplot(326)
# ax2_1.imshow(w)
plt.show()
return p3D, select_img_point
def visualization(cfg):
if cfg.VIS.POINTCLOUD and 'h36m' not in cfg.OUTPUT_DIR:
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
print(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
cnt = 0
# for inputs, pred in predictions:
while True:
inputs, pred = predictions[cnt]
heatmap = inputs.get('heatmap')
points2d = inputs.get('points-2d')
KRT = inputs.get('KRT')[0]
RT = inputs.get('RT')[0]
image_path = inputs.get('img-path')
print('image path:', image_path)
img = resize(plt.imread(image_path), (128, 84, 3))
other_KRT = inputs.get('other_KRT')[0]
other_RT = inputs.get('other_RT')[0]
other_image_path = inputs.get('other_img_path')[0]
print('other image path', other_image_path)
other_img = resize(plt.imread(other_image_path), (128, 84, 3))
heatmap_pred = pred.get('heatmap_pred')
score_pred = pred.get('score_pred')
corr_pos_pred = pred.get('corr_pos')
sim = pred.get('depth')
import pdb; pdb.set_trace()
# p3D, img_pt = get_point_cloud(img, other_img, KRT, other_KRT, RT, other_RT, corr_pos_pred, sim)
output = {
# 'p3D': p3D,
# 'img_pt': img_pt,
'img1': img,
'img2' : other_img,
'img1_path': image_path,
'img2_path': other_image_path,
'RT' : RT,
'other_RT': other_RT,
'corr_pos_pred': corr_pos_pred,
'depth': sim,
}
if 'sample_locs' in pred:
sample_locs = pred.get('sample_locs')
output['sample_locs'] = sample_locs
else:
print('No sample_locs!!!!!')
import pickle
with open('baseline_' + "output_{:d}.pkl".format(cnt),"wb") as f:
pickle.dump(output, f)
print('saved! to ', 'baseline_' + "output_{:d}.pkl".format(cnt))
cnt += 1
# break
# ipv_prepare(ipv)
# ipv_draw_point_cloud(ipv, p3D, colors=img_pt, pt_size=1)
# ipv.xyzlim(500)
# ipv.show()
if cfg.VIS.POINTCLOUD and 'h36m' in cfg.OUTPUT_DIR:
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
baseline = "baseline" in cfg.VIS.SAVE_PRED_NAME
name = "_baseline" if baseline else ""
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions"+name+".pth"))
print(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions"+name+".pth"))
cnt = 0
# for inputs, pred in predictions:
while True:
inputs, pred = predictions[cnt]
print('input keys:')
print(inputs.keys())
print('pred keys:')
print(pred.keys())
heatmap = inputs.get('heatmap')
other_heatmap = inputs.get('other_heatmap')
points2d = inputs.get('points-2d')
KRT = inputs.get('KRT')[0]
camera = inputs.get('camera')
other_camera = inputs.get('other_camera')
image_path = inputs.get('img-path')[0]
print(image_path)
# image_path = 'images.zip@'
image_file = osp.join("datasets", 'h36m', 'images.zip@', 'images',
image_path)
# from utils import zipreader
# data_numpy = zipreader.imread(
# image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# img = data_numpy[:1000]
# assert img.shape == (1000, 1000, 3), img.shape
img = inputs.get('img')
other_KRT = inputs.get('other_KRT')[0]
# other_RT = inputs.get('other_RT')[0]
other_image_path = inputs.get('other_img-path')[0]
print('other image path', other_image_path)
other_image_file = osp.join("datasets", 'h36m', 'images.zip@', 'images',
other_image_path)
other_img = inputs.get('other_img')
heatmap_pred = pred.get('heatmap_pred')
score_pred = pred.get('score_pred')
corr_pos_pred = pred.get('corr_pos')
sim = pred.get('depth')
batch_locs = pred.get('batch_locs')
# p3D, img_pt = get_point_cloud(img, other_img, KRT, other_KRT, RT, other_RT, corr_pos_pred, sim)
output = {
# 'p3D': p3D,
# 'img_pt': img_pt,
'img1': img,
'img2' : other_img,
'img1_path': image_file,
'img2_path': other_image_file,
# 'RT' : RT,
# 'other_RT': other_RT,
'heatmap': heatmap,
'other_heatmap': other_heatmap,
'points-2d': points2d,
'corr_pos_pred': corr_pos_pred,
'depth': sim,
'heatmap_pred': heatmap_pred,
'batch_locs': batch_locs,
'camera': camera,
'other_camera': other_camera,
}
if 'sample_locs' in pred:
sample_locs = pred.get('sample_locs')
output['sample_locs'] = sample_locs
else:
print('No sample_locs!!!!!')
import pickle
with open(cfg.OUTPUT_DIR + "/visualizations/h36m/output{}_{:d}.pkl".format(name, cnt),"wb") as f:
pickle.dump(output,f)
print('saved!')
cnt += 1
# depth = output['depth']
# corr_pos_pred = output['corr_pos_pred']
# sample_locs = output['sample_locs']
if cfg.EPIPOLAR.VIS:
if 'h36m' in cfg.OUTPUT_DIR:
from data.build import make_data_loader
if cfg.VIS.MULTIVIEWH36M:
data_loader = make_data_loader(cfg, is_train=True, force_shuffle=True)
elif cfg.VIS.H36M:
from data.datasets.joints_dataset import JointsDataset
from data.datasets.multiview_h36m import MultiViewH36M
data_loader = MultiViewH36M('datasets', 'validation', True)
print(len(data_loader))
for i in tqdm(range(len(data_loader))):
data_loader.__getitem__(i)
data_loader = make_data_loader(cfg, is_train=False)[0]
# data_loader = make_data_loader(cfg, is_train=True, force_shuffle=True)
# data_loader = make_data_loader(cfg, is_train=False, force_shuffle=True)[0]
# for idx, batchdata in enumerate(tqdm(data_loader)):
if not cfg.VIS.MULTIVIEWH36M and not cfg.VIS.H36M:
cpu = lambda x: x.cpu().numpy() if isinstance(x, torch.Tensor) else x
from modeling.layers.epipolar import Epipolar
imgmodel = Epipolar()
debugmodel = Epipolar(debug=True)
KRT0 = batchdata['KRT'].squeeze()[None, 0]
KRT1 = batchdata['other_KRT'].squeeze()[None, 0]
# batchdata['img']: 1 x 4 x 3 x 256 x 256
input_img = batchdata['img'].squeeze()[None, 0, :, ::4, ::4]
input_other_img = batchdata['other_img'].squeeze()[None, 0, :, ::4, ::4]
outs = debugmodel(input_img, input_other_img, KRT0, KRT1)
H, W = input_img.shape[-2:]
print(H, W)
orig_img = de_transform(cpu(batchdata['img'].squeeze()[None, ...])[0][0])
orig_other_img = de_transform(cpu(batchdata['other_img'].squeeze()[None, ...])[0][0])
# outs = imgmodel(batchdata['heatmap'][:, 0], batchdata['heatmap'][:, 1], batchdata['KRT'][:, 0], batchdata['other_KRT'][:, 1])
out, sample_locs = imgmodel.imgforward_withdepth(input_img, input_other_img, KRT0, KRT1, outs[2][0])
if not cfg.VIS.CURSOR:
# show_img = de_transform(cpu(batchdata['img'][:, 0, :, ::4, ::4])[0][0])
# show_other_img = de_transform(cpu(batchdata['other_img'][:, 0, :, ::4, ::4])[0][0])
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax1.imshow(orig_img[::-1].transpose((1,2,0)))
ax2.imshow(orig_other_img[::-1].transpose((1,2,0)))
ax3.imshow(cpu(batchdata['heatmap'])[0][0].sum(0))
ax4.imshow(cpu(batchdata['other_heatmap'])[0][0].sum(0))
# ax5.imshow(cpu(outs[0])[0].sum(0))
print(out.shape)
out_img = de_transform(cpu(out)[0, ::-1].transpose((1,2,0)))
ax5.imshow(out_img)
plt.show()
else:
print(sample_locs.shape) # 64 x 1 x H x W x 2
sample_locs = sample_locs[:, 0, :, :, :]
# import pdb; pdb.set_trace()
fig, axs = plt.subplots(2, 2)
cus = Cursor_for_epipolar_line(axs[0,0], [axs[0,1], axs[1,0], axs[1,1]], sample_locs, H, W, axs, \
cpu(input_other_img)[0, :, :, :][::-1].transpose((1,2,0)), outs)
axs[0, 0].imshow(cpu(input_img)[0, :, :, :][::-1].transpose((1,2,0)))
# prob_im = axs[1, 1].imshow(max_score)
fig.canvas.mpl_connect('button_press_event', cus.mouse_down)
plt.show()
return
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
pck = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "pck.pth"))
if cfg.VIS.AUC:
auc_path = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "auc.png")
draw_auc(predictions, pck, auc_path)
total = 0
for inputs, pred in predictions:
heatmap = inputs.get('heatmap')
points2d = inputs.get('points-2d')
hand_side = inputs.get('hand-side')
img = inputs.get('img')
can_3dpoints = inputs.get('can-points-3d')
normed_3d = inputs.get('normed-points-3d')
target_global = inputs.get('points-3d')
rot_mat = inputs.get('rotation')
R_global = inputs.get('R')
keypoint_scale = inputs.get('scale')
visibility = inputs.get('visibility')
unit = inputs.get('unit')
image_path = inputs.get('img-path')
can_pred = pred.get('can_pred')
normed_pred = pred.get('normed_pred')
heatmap_pred = pred.get('heatmap_pred')
im = plt.imread(image_path)
image = np.array(im, dtype=np.int)
if cfg.DATASETS.TASK == 'keypoint':
fig = plt.figure(1)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
#ax1.imshow(image)
print(heatmap.min(), heatmap.max())
print(heatmap_pred.min(), heatmap_pred.max())
ax2.imshow(heatmap.sum(0).T)
ax3.imshow(heatmap_pred.sum(0).T)
else:
total += 1
visibility = visibility.squeeze()[..., None]
can_3dpoints = can_3dpoints * visibility
can_pred = can_pred * visibility
normed_3d = normed_3d * visibility
normed_pred = normed_pred * visibility
delta = normed_pred - normed_3d
print(delta)
print('L1 err = ', np.abs(delta).sum())
print('L2 err = ', ((delta**2).sum(-1)**0.5).mean())
fig = plt.figure(1)
ax1_1 = fig.add_subplot(331)
ax1_2 = fig.add_subplot(332)
#ax1_3 = fig.add_subplot(333)
#ax2 = fig.add_subplot(222)
ax2_1 = fig.add_subplot(334, projection='3d')
ax2_2 = fig.add_subplot(335, projection='3d')
ax2_3 = fig.add_subplot(336, projection='3d')
ax3_1 = fig.add_subplot(337, projection='3d')
ax3_2 = fig.add_subplot(338, projection='3d')
ax3_3 = fig.add_subplot(333, projection='3d')
ax1_1.imshow(image)
ax1_2.imshow(image)
#ax1_3.imshow(image)
#ax2.imshow(image)
plot_hand_3d(can_3dpoints, visibility, ax2_1)
ax2_1.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(can_pred, visibility, ax2_2)
ax2_2.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(can_3dpoints, visibility, ax2_3)
plot_hand_3d(can_pred, visibility, ax2_3)
ax2_3.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
# ax3.set_xlim([-3, 3])
# ax3.set_ylim([-3, 3])
# ax3.set_zlim([-3, 3])
plot_hand_3d(normed_3d, visibility, ax3_1)
ax3_1.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(normed_pred, visibility, ax3_2)
ax3_2.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(normed_3d, visibility, ax3_3)
plot_hand_3d(normed_pred, visibility, ax3_3)
ax3_3.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
# ax3.set_xlim([-3, 3])
# ax3.set_ylim([-3, 3])
# ax3.set_zlim([-3, 3])
plt.show()
print("show")
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015,掌阅科技
All rights reserved.
摘 要: __init__.py
创 建 者: zhuangshixiong
创建日期: 2015-10-10
'''
if __name__ == '__main__':
pass
|
python
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import core
from . import framework
from . import executor
from . import compiler
import sys
__all__ = ['ParallelExecutor']
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
class ParallelExecutor(object):
"""
The ParallelExecutor is an upgraded version of :code:`fluid.Executor` that supports multi-node model
training and testing based on the data-parallel mode. In data-parallel mode,
ParallelExecutor will broadcast the parameters from Node0 to other nodes during
construction and copy the input Program to other nodes from Node0 to make sure
that the initial state on each node is the same. Each node runs the model independently
and the parameters' gradient is aggregated between those nodes during backward
computation, and then each node independently updates its parameters. If you use
the GPU to run the model, i.e. use_cuda=True, the node refers to the GPU,
ParallelExecutor will automatically get the GPU resources available on the
current machine, users can also set the available GPU resources in the environment
variable, for example: want to use GPU0, GPU1, export CUDA_VISIBLEDEVICES=0,1;
If the operation is performed on the CPU, i.e. use_cuda=False, the node refers to the CPU.
**Note: At this time, the user needs to manually add CPU_NUM to the environment variable
and set the number of CPU devices. For example, export CPU_NUM=4, if the environment
variable is not set, the executor will add the variable to the environment variable
and set it to 1.**
Args:
use_cuda (bool): Whether to use CUDA or not.
loss_name (str): This parameter is the name of the loss variable of the
model. **Note: If it is data-parallel model training, you must set loss_name,
otherwise, the results may be wrong**. The default is None.
main_program (Program): This parameter represents the Program to be executed.
If this parameter is not provided, that parameter is None, the program will
be set to :code:`fluid.default_main_program()`. The default is None.
share_vars_from(ParallelExecutor): If share_vars_from is set, the current
ParallelExecutor will share the parameters with the ParallelExecutor
specified by share_vars_from. This parameter needs to be set when model testing
is required during model training, and the data parallel mode is used for
training and testing. Since ParallelExecutor will only distribute parameter
variables to other devices when it is first executed, the ParallelExecutor
specified by share_vars_from must be run before the current ParallelExecutor.
The default is None.
exec_strategy(ExecutionStrategy): exec_strategy specifies the options that can
be changed when running the current model, such as the thread pool size.
For more information about exec_strategy, please refer to :code:`fluid.ExecutionStrategy`.
The default is None.
build_strategy(BuildStrategy): By configuring build_strategy, we can
optimize the computational graph, such as operators' fusion in the
computational graph and memory optimization during the execution
of the computational graph. For more information about build_strategy,
please refer to :code:`fluid.BuildStrategy`. The default is None.
num_trainers(int): This parameter needs to be set in GPU distributed training.
If the parameter value is greater than 1, NCCL will be initialized by multi-level
nodes. Each node should have the same number of GPUs. The default is 1.
trainer_id(int): This parameter needs to be set when performing GPU distributed
training. This parameter must be used with the num_trainers parameter.
Trainer_id indicates the "rank" of the current node. The trainer_id starts
counting from 0. The default is 0.
scope(Scope): Specifies the scope in which the program is executed.
The default is fluid.global_scope().
Returns:
ParallelExecutor: The initialized ParallelExecutor object.
Raises:
TypeError: If share_vars_from is provided, but not ParallelExecutor object.
NOTES:
1. If you only use ParallelExecutor to do multi-card test, you don't need to set loss_name
and share_vars_from.
2. If you need to train and test the model with ParallelExecutor, the share_vars_from
must be set when building the ParallelExecutor corresponding to the model test.
Otherwise, the parameters used in the model test and the model training are inconsistent.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
test_program = fluid.default_main_program().clone(for_test=True)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
startup_program.random_seed=1
exe.run(startup_program)
train_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=train_program,
loss_name=loss.name)
test_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=test_program,
share_vars_from=train_exe)
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = train_exe.run(feed={"X": x},
fetch_list=[loss.name])
loss_data, = test_exe.run(feed={"X": x},
fetch_list=[loss.name])
"""
def __init__(self,
use_cuda,
loss_name=None,
main_program=None,
share_vars_from=None,
exec_strategy=None,
build_strategy=None,
num_trainers=1,
trainer_id=0,
scope=None):
if build_strategy is None:
build_strategy = BuildStrategy()
# TODO(paddle-dev): trainer_id and num_trainers should be removed from parameter list.
if num_trainers != 1 and build_strategy.num_trainers != num_trainers:
sys.stderr.write(
'The value of build_strategy.num_trainers[%d] is overwritten '
'by the passed num_trainers[%d].\n' %
(build_strategy.num_trainers, num_trainers))
build_strategy.num_trainers = num_trainers
if trainer_id != 0 and build_strategy.trainer_id != trainer_id:
sys.stderr.write(
'The value of build_strategy.trainer_id[%d] is overwritten '
'by the passed trainer_id[%d].\n' %
(build_strategy.trainer_id, trainer_id))
build_strategy.trainer_id = trainer_id
self._places = framework.cuda_places(
) if use_cuda else framework.cpu_places()
self._scope = scope if scope is not None else executor.global_scope()
if main_program is not None and main_program._enable_dgc:
assert build_strategy.num_trainers > 1, "dgc is not useful when num_trainers <= 1"
assert build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "dgc \
only used for allreduce"
assert build_strategy.num_trainers * len(
self._places) > 1, "dgc is not useful for single card training"
assert use_cuda, "dgc only used under cuda"
main_program = main_program if main_program is not None \
else framework.default_main_program()
self._compiled_program = compiler.CompiledProgram(main_program)
if share_vars_from:
assert isinstance(
share_vars_from, ParallelExecutor
), "The share_vars_from should be ParallelExecutor."
self._compiled_program.with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy,
share_vars_from=share_vars_from._compiled_program
if share_vars_from else None)
self._place = core.CUDAPlace(0) if use_cuda else core.CPUPlace()
self._exe = executor.Executor(self._place)
def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
"""
This interface is used to run the current model. It should be noted
that the executor will execute all the operators in the Program,
and will not prune some operators in the Program according to the
fetch_list.
Args:
fetch_list(list): This parameter represents the variables that need to be returned
after the model runs. The default is None.
feed(list|dict): This parameter represents the input variables of the model.
If it is single card training, the feed is dict type, and if it is multi-card
training, the parameter feed can be dict or list type variable. If the
parameter type is dict, the data in the feed will be split and sent to
multiple devices (CPU/GPU), that is to say, the input data will be evenly
sent to different devices, so you should make sure the number of samples of
the current mini-batch must be greater than the number of places;
if the parameter type is list, those data are copied directly to each device,
so the length of this list should be equal to the number of places.
The default is None.
feed_dict: Alias for feed parameter, for backward compatibility.
This parameter has been deprecated. Default None.
return_numpy(bool): This parameter indicates whether convert the fetched variables
(the variable specified in the fetch list) to numpy.ndarray. if it is False,
the type of the return value is a list of :code:`LoDTensor`. The default is True.
Returns:
List: The fetched result list.
Raises:
ValueError: If the feed is a list, but its length is not equal the
length of active places, or its element's is not dict.
NOTES:
1. If the feed parameter is dict type, the input data will be evenly distributed
to different cards. For example, using two GPUs to run the model, the input
sample number is 3, that is, [0, 1, 2], the sample number on GPU0 is 1,
that is, [0], and the sample number on GPU1 is 2, that is, [1, 2].
If the number of samples is less than the number of devices, the program will
throw an exception, so when running the model, you should make sure that the
number of samples of the last batch of the data set should be greater than the
number of CPU cores or GPU cards, if it is less than, it is recommended that
the batch be discarded.
2. If the number of CPU cores or GPU cards available is greater than 1, the fetch
results are spliced together in dimension 0 for the same variable values
(variables in fetch_list) on different devices.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
exe.run(startup_program)
train_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=train_program,
loss_name=loss.name)
# If the feed is a dict:
# the image will be splitted into devices. If there is two devices
# each device will process an image with shape (5, 1)
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = train_exe.run(feed={"X": x},
fetch_list=[loss.name])
# If the feed is a list:
# each device will process each element in the list.
# the 1st device will process an image with shape (10, 1)
# the 2nd device will process an image with shape (9, 1)
#
# you can use exe.device_count to get the device number.
x2 = numpy.random.random(size=(9, 1)).astype('float32')
loss_data, = train_exe.run(feed=[{"X": x}, {"X": x2}],
fetch_list=[loss.name])
"""
return self._exe.run(program=self._compiled_program,
scope=self._scope,
feed=feed,
fetch_list=fetch_list,
return_numpy=return_numpy)
@property
def device_count(self):
return len(self._places)
def drop_local_exe_scopes(self):
"""
Drop the local execution scopes immediately. In order to avoid frequently
application and release of temporary variables, the strategy adopted by
ParallelExecutor is to drop the local execution scopes after several iterations.
ParallelExecutor provides the num_iteration_per_drop_scope option in
:code:`fluid.ExecutionStrategy`, which indicates how many iterations are intervened to
drop the local execution scopes. If the num_iteration_per_drop_scope value
is 100, but you want to drop the local execution scopes after 50 iterations,
you can call the interface manually.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
import os
use_cuda = True
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=train_program,
loss_name=loss.name)
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = parallel_exe.run(feed={"X": x},
fetch_list=[loss.name])
parallel_exe.drop_local_exe_scopes()
"""
assert isinstance(
self._compiled_program._executor,
core.ParallelExecutor), "The Executor should be ParallelExecutor."
self._compiled_program._executor.drop_local_exe_scopes()
# This API is used to check whether DropLocalExeScopes can work.
def _need_create_local_exe_scopes(self):
assert isinstance(
self._compiled_program._executor,
core.ParallelExecutor), "The Executor should be ParallelExecutor."
return self._compiled_program._executor._need_create_local_exe_scopes()
|
python
|
import os
from foliant.preprocessors.apireferences.classes import BadConfigError
from foliant.preprocessors.apireferences.classes import WrongModeError
from foliant.preprocessors.apireferences.classes import get_api
from unittest import TestCase
def rel_name(path: str):
return os.path.join(os.path.dirname(__file__), path)
class TestGetApi(TestCase):
def test_get_api(self):
options = {
'mode': 'find_by_tag_content',
'name': 'MyAPI',
'url': 'https://example.com',
'content_template': '{command}'
}
api = get_api(options)
self.assertEqual(api.__class__.__name__, 'APIByTagContent')
options = {
'mode': 'generate_anchor',
'name': 'MyAPI',
'url': 'https://example.com',
'anchor_template': '{command}'
}
api = get_api(options)
self.assertEqual(api.__class__.__name__, 'APIGenAnchor')
options = {
'mode': 'find_by_anchor',
'name': 'MyAPI',
'url': 'https://example.com',
'anchor_template': '{command}',
}
api = get_api(options)
self.assertEqual(api.__class__.__name__, 'APIByAnchor')
options = {
'mode': 'find_for_swagger',
'name': 'MyAPI',
'url': 'https://example.com',
'spec': rel_name('data/swagger.json')
}
api = get_api(options)
self.assertEqual(api.__class__.__name__, 'APIBySwagger')
options = {
'mode': 'find_for_redoc',
'name': 'MyAPI',
'url': 'https://example.com',
'spec': rel_name('data/swagger.json')
}
api = get_api(options)
self.assertEqual(api.__class__.__name__, 'APIForRedoc')
def test_missing_params(self):
options = {
'mode': 'find_by_tag_content',
'name': 'MyAPI',
'url': 'https://example.com',
}
with self.assertRaises(BadConfigError):
get_api(options)
options = {
'mode': 'find_by_anchor',
'name': 'MyAPI',
'url': 'https://example.com',
}
with self.assertRaises(BadConfigError):
get_api(options)
def test_wrong_api(self):
with self.assertRaises(WrongModeError):
options = {
'mode': 'wrong_mode',
'name': 'MyAPI',
'url': 'https://example.com',
}
get_api(options)
|
python
|
"""
translatory.py
Takes either english or french phrase and converts to french and english
"""
import json
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import os
from dotenv import load_dotenv
load_dotenv()
"""translator_instance"""
apikey_lt = os.environ['apikey_lt']
url_lt = os.environ['url_lt']
version_lt = os.environ['version_lt']
authenticator = IAMAuthenticator(apikey_lt)
language_translator = LanguageTranslatorV3(
version=version_lt, authenticator=authenticator)
language_translator.set_service_url(url_lt)
language_translator
def englishToFrench(english_text):
"""
Translate from English to French
Recieves english_text to be translated
and returns frenchTransalation
"""
translation_response = language_translator.translate(
text=english_text, model_id='en-fr')
translation_response
xlate_french_Res = translation_response.get_result()
french_translation_is = xlate_french_Res['translations'][0]['translation']
"""
print('The translation to French is: ', french_translation_is)
"""
return french_translation_is
def frenchToEnglish(french_text):
"""
Translate from french_text to English
Recieves french_text to be translated
and returns english_translation_is
"""
translationEngRes = language_translator.translate(
text=french_text, model_id='fr-en').get_result()
english_translation_is = translationEngRes['translations'][0]['translation']
"""
print('The translation to English back from French is: ', english_translation_is)
"""
return english_translation_is
# Testing by calling the functions and texts for the testing
print('french_translation_is : ' ,englishToFrench('how are you today?'))
print('english_translation_is : ' ,frenchToEnglish('Comment vas-tu aujourd\'hui'))
|
python
|
from Crypto.Util.number import bytes_to_long, getPrime, inverse
m = b'flag{4cce551ng_th3_subc0nsc10us}'
p = getPrime(512)
q = getPrime(512)
N = p*q
e = 0x10001
d = inverse(e,(p-1)*(q-1))
c = pow(bytes_to_long(m),e,N)
print(f'Modulus: {N}\nOne factor of N: {p}\nPublic key: {e}\nCiphertext: {c}')
|
python
|
# BSD 2-Clause License
# Copyright (c) 2020, Allen Cheng
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
from bs4 import BeautifulSoup
import requests
import re
import datetime
import pickle
import time
Payload = {
'__EVENTTARGET' : '',
'__EVENTARGUMENT' : '',
'__LASTFOCUS' : '',
'__VIEWSTATE' : '',
'__VIEWSTATEGENERATOR' : '',
'__EVENTVALIDATION': '',
'Lotto649Control_history$DropDownList1': '2',
'Lotto649Control_history$chk': 'radYM',
'Lotto649Control_history$dropYear': '103',
'Lotto649Control_history$dropMonth': '4',
'Lotto649Control_history$btnSubmit': '查詢',
}
RequestUrl = "https://www.taiwanlottery.com.tw/Lotto/Lotto649/history.aspx"
def ParsingLotteryByDate(Year, Month):
print("Parsing {0}/{1} data".format(Year, Month))
Sess = requests.session()
Sess.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'
Result0 = Sess.get(RequestUrl)
soup0 = BeautifulSoup(Result0.text, features='lxml')
for key in Payload.keys():
if key.startswith('_'):
tag = soup0.find('input', {'id':key})
if tag != None:
Payload[key] = tag['value']
Payload['Lotto649Control_history$dropYear'] = Year
Payload['Lotto649Control_history$dropMonth'] = Month
Result1 = Sess.post(RequestUrl, data=Payload)
soup1 = BeautifulSoup(Result1.text, features='lxml')
# LotteryNumbers = soup1.findAll('span', id = re.compile(r'Lotto649Control_history_dlQuery_SNo[\w]+'))
LotteryNumbers = soup1.findAll(id=re.compile(r'Lotto649Control_history_dlQuery_SNo[\w]+'))
LotteryDays = soup1.findAll(id=re.compile(r'Lotto649Control_history_dlQuery_L649_DDate_[\w]+'))
LotteryIndex = soup1.findAll(id=re.compile(r'Lotto649Control_history_dlQuery_L649_DrawTerm_[\w]+'))
ResultList = list()
if (len(LotteryDays) * 7) != len(LotteryNumbers):
raise Exception('Could not match the number of LotterDays/LotteryNumbers')
if len(LotteryDays) != len(LotteryIndex):
raise Exception('Could not match the number of LotterDays/LotteryIndex')
for Day_Idx, Days in enumerate(LotteryDays):
SingleDayDict = dict()
SingleDayDict['Day'] = Days.text
SingleDayDict['Index'] = LotteryIndex[Day_Idx].text
SingleDayDict['Numbers'] = list()
for Number_Idx in range(7):
SingleDayDict['Numbers'].append(int(LotteryNumbers[(Day_Idx*7) + Number_Idx].text))
ResultList.append(SingleDayDict)
return ResultList
def GetLotteryHistory():
StartYear = 103
EndYear = datetime.datetime.today().year - 1911
EndMonth = datetime.datetime.today().month
FinalResultArray = list()
for TargetYear in range(StartYear, EndYear + 1):
for TargetMonth in range(1, 13):
if (TargetYear >= EndYear) and (TargetMonth > EndMonth):
break
FinalResultArray += ParsingLotteryByDate('{0:03d}'.format(TargetYear), '{0:01d}'.format(TargetMonth))
# time.sleep(0.5)
return FinalResultArray
def main():
FinalResultArray = GetLotteryHistory()
FinalResultArray = sorted(FinalResultArray, key=lambda k: k['Index'])
with open('FinalResultList.bin', 'wb') as fp:
pickle.dump(FinalResultArray, fp)
for item in FinalResultArray:
print(item)
if __name__ == "__main__":
sys.exit(main())
|
python
|
import random
import pytest
import geomstats.backend as gs
from geomstats.geometry.hyperbolic import Hyperbolic
from geomstats.geometry.hyperboloid import Hyperboloid
from tests.data_generation import _LevelSetTestData, _RiemannianMetricTestData
# Tolerance for errors on predicted vectors, relative to the *norm*
# of the vector, as opposed to the standard behavior of gs.allclose
# where it is relative to each element of the array
RTOL = 1e-6
class HyperbolicTestData(_LevelSetTestData):
dim_list = random.sample(range(2, 4), 2)
space_args_list = [(dim,) for dim in dim_list]
shape_list = [(dim + 1,) for dim in dim_list]
n_points_list = random.sample(range(2, 5), 2)
n_vecs_list = random.sample(range(2, 5), 2)
def belongs_test_data(self):
smoke_data = [
dict(
dim=3,
coords_type="extrinsic",
vec=gs.array([1.0, 0.0, 0.0, 0.0]),
expected=True,
),
dict(
dim=2,
coords_type="extrinsic",
vec=gs.array([0.5, 7, 3.0]),
expected=False,
),
dict(
dim=2,
coords_type="intrinsic",
vec=gs.array([0.5, 7]),
expected=True,
),
]
return self.generate_tests(smoke_data)
def regularize_raises_test_data(self):
smoke_data = [
dict(
dim=3,
point=gs.array([-1.0, 1.0, 0.0, 0.0]),
expected=pytest.raises(ValueError),
)
]
return self.generate_tests(smoke_data)
def extrinsic_to_intrinsic_coords_rasises_test_data(self):
smoke_data = [
dict(
dim=3,
point=gs.array([-1.0, 1.0, 0.0, 0.0]),
expected=pytest.raises(ValueError),
)
]
return self.generate_tests(smoke_data)
def random_point_belongs_test_data(self):
smoke_space_args_list = [(2,), (3,)]
smoke_n_points_list = [1, 2]
belongs_atol = gs.atol * 100000
return self._random_point_belongs_test_data(
smoke_space_args_list,
smoke_n_points_list,
self.space_args_list,
self.n_points_list,
belongs_atol,
)
def to_tangent_is_tangent_test_data(self):
is_tangent_atol = gs.atol * 100000
return self._to_tangent_is_tangent_test_data(
Hyperboloid,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
is_tangent_atol,
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 100000,
)
def intrinsic_after_extrinsic_test_data(self):
return self._intrinsic_after_extrinsic_test_data(
Hyperbolic, self.space_args_list, self.n_points_list
)
def extrinsic_after_intrinsic_test_data(self):
return self._extrinsic_after_intrinsic_test_data(
Hyperbolic, self.space_args_list, self.n_points_list
)
def extrinsic_ball_extrinsic_composition_test_data(self):
smoke_data = [dict(dim=2, point_intrinsic=gs.array([0.5, 7]))]
return self.generate_tests(smoke_data)
def extrinsic_half_plane_extrinsic_composition_test_data(self):
smoke_data = [dict(dim=2, point_intrinsic=gs.array([0.5, 7], dtype=gs.float64))]
return self.generate_tests(smoke_data)
def ball_extrinsic_ball_test_data(self):
smoke_data = [dict(dim=2, x_ball=gs.array([0.5, 0.2]))]
return self.generate_tests(smoke_data)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
Hyperbolic,
self.space_args_list,
self.n_vecs_list,
is_tangent_atol=gs.atol * 1000,
)
class HyperboloidMetricTestData(_RiemannianMetricTestData):
dim_list = random.sample(range(2, 4), 2)
metric_args_list = [(dim,) for dim in dim_list]
shape_list = [(dim + 1,) for dim in dim_list]
space_list = [Hyperboloid(dim) for dim in dim_list]
n_points_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_is_minkowski_inner_product_test_data(self):
space = Hyperboloid(dim=3)
base_point = gs.array([1.16563816, 0.36381045, -0.47000603, 0.07381469])
tangent_vec_a = space.to_tangent(
vector=gs.array([10.0, 200.0, 1.0, 1.0]), base_point=base_point
)
tangent_vec_b = space.to_tangent(
vector=gs.array([11.0, 20.0, -21.0, 0.0]), base_point=base_point
)
smoke_data = [
dict(
dim=3,
tangent_vec_a=tangent_vec_a,
tangent_vec_b=tangent_vec_b,
base_point=base_point,
)
]
return self.generate_tests(smoke_data)
def scaled_inner_product_test_data(self):
space = Hyperboloid(3)
base_point = space.from_coordinates(gs.array([1.0, 1.0, 1.0]), "intrinsic")
tangent_vec_a = space.to_tangent(gs.array([1.0, 2.0, 3.0, 4.0]), base_point)
tangent_vec_b = space.to_tangent(gs.array([5.0, 6.0, 7.0, 8.0]), base_point)
smoke_data = [
dict(
dim=3,
scale=2,
tangent_vec_a=tangent_vec_a,
tangent_vec_b=tangent_vec_b,
base_point=base_point,
)
]
return self.generate_tests(smoke_data)
def scaled_squared_norm_test_data(self):
space = Hyperboloid(3)
base_point = space.from_coordinates(gs.array([1.0, 1.0, 1.0]), "intrinsic")
tangent_vec = space.to_tangent(gs.array([1.0, 2.0, 3.0, 4.0]), base_point)
smoke_data = [
dict(dim=3, scale=2, tangent_vec=tangent_vec, base_point=base_point)
]
return self.generate_tests(smoke_data)
def scaled_dist_test_data(self):
space = Hyperboloid(3)
point_a = space.from_coordinates(gs.array([1.0, 2.0, 3.0]), "intrinsic")
point_b = space.from_coordinates(gs.array([4.0, 5.0, 6.0]), "intrinsic")
smoke_data = [dict(dim=3, scale=2, point_a=point_a, point_b=point_b)]
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list, self.space_list, self.shape_list
)
def log_shape_test_data(self):
return self._log_shape_test_data(self.metric_args_list, self.space_list)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=1e-2,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=1e-2,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 100,
atol=gs.atol * 100000,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
amplitude=10.0,
rtol=gs.rtol * 100,
atol=gs.atol * 100000,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 10000,
atol=gs.atol * 10000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 10000,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 10000,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
atol=gs.atol * 1000,
)
def exp_after_log_intrinsic_ball_extrinsic_test_data(self):
smoke_data = [
dict(
dim=2,
x_intrinsic=gs.array([4.0, 0.2]),
y_intrinsic=gs.array([3.0, 3]),
)
]
return self.generate_tests(smoke_data)
def distance_ball_extrinsic_from_ball_test_data(self):
smoke_data = [
dict(dim=2, x_ball=gs.array([0.7, 0.2]), y_ball=gs.array([0.2, 0.2]))
]
return self.generate_tests(smoke_data)
def distance_ball_extrinsic_intrinsic_test_data(self):
smoke_data = [
dict(
dim=2,
x_intrinsic=gs.array([10, 0.2]),
y_intrinsic=gs.array([1, 6.0]),
),
dict(
dim=4,
x_intrinsic=gs.array([10, 0.2, 3, 4]),
y_intrinsic=gs.array([1, 6, 2.0, 1]),
),
]
return self.generate_tests(smoke_data)
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from mocker.endpoints import grpc_endpoint_pb2 as mocker_dot_endpoints_dot_grpc__endpoint__pb2
class MockServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.UnaryDoSomething = channel.unary_unary(
'/etcdserverpb.MockService/UnaryDoSomething',
request_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.SerializeToString,
response_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.FromString,
)
self.ClientStreamDoSomething = channel.stream_unary(
'/etcdserverpb.MockService/ClientStreamDoSomething',
request_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.SerializeToString,
response_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.FromString,
)
self.ServerSteramDoSomething = channel.unary_stream(
'/etcdserverpb.MockService/ServerSteramDoSomething',
request_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.SerializeToString,
response_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.FromString,
)
class MockServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def UnaryDoSomething(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ClientStreamDoSomething(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerSteramDoSomething(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MockServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'UnaryDoSomething': grpc.unary_unary_rpc_method_handler(
servicer.UnaryDoSomething,
request_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.FromString,
response_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.SerializeToString,
),
'ClientStreamDoSomething': grpc.stream_unary_rpc_method_handler(
servicer.ClientStreamDoSomething,
request_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.FromString,
response_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.SerializeToString,
),
'ServerSteramDoSomething': grpc.unary_stream_rpc_method_handler(
servicer.ServerSteramDoSomething,
request_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.FromString,
response_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'etcdserverpb.MockService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
python
|
# Description:
# Functions that deal with data generation/manipulation
# Imports:
import game_mechanics as gm
import GUI
import numpy as np
from keras.utils.np_utils import to_categorical
from _collections import deque
import os
# Constants:
board_len = 30
# Functions:
def moves_to_board(moves):
new_board = gm.create_board()
# symbol_dict = {1: 'O', 2: 'X'}
player = 'X'
for move in moves:
# gm.select_space(new_board, symbol_dict[move[0]], move[1])
gm.select_space(new_board, player, move)
if player == 'X':
player = 'O'
else:
player = 'X'
return new_board
# Functions for first training strategy:
# -----------------------------------------------
def move_to_board(move):
new_board = gm.create_board()
gm.select_space(new_board, 'O', move)
return new_board
def generate_y_data(moves):
y = []
for move in moves:
# Trying to change a little bit so categorical_xentropy works
# y.append(move_to_board(move))
y.append(30*move[0] + move[1])
y = to_categorical(y, num_classes=900)
return y
def prepare_data(observations, moves):
moves = generate_y_data(moves)
X = []
# y = []
y = moves
for obs, mov in zip(observations, moves):
X.append(obs.reshape((gm.board_len, gm.board_len, 1)))
# y.append(mov.reshape((gm.board_len, gm.board_len, 1)))
X = np.array(X)
# y = np.array(y)
return X, y
# ------------------------------------------------
# Functions for second training strategy:
# ------------------------------------------------
def make_batches(ar_len, batch_size):
# ar_len = len(array)
batches = []
n_batches = int(ar_len/batch_size)
for i in range(n_batches):
# Samples batches from the end of the array (situational because in this game the last gradient is very important)
# batches.append(array[ar_len - (i+1)*batch_size:ar_len - i*batch_size])
batches.append((ar_len - (i+1)*batch_size, ar_len - i*batch_size))
return batches
# ------------------------------------------------
def split_state(state):
# Making state have 2 channels, one for X's, the other for O's
transformed_state = np.zeros((2, gm.board_len, gm.board_len))
for i, row in enumerate(state):
for j, space in enumerate(row):
if space != 0:
transformed_state[int(space-1)][i][j] = 1
# Output shape is (2, board_len, board_len)
return transformed_state
def reshape_board(board):
n_channels = 2
return board.reshape((gm.board_len, gm.board_len, n_channels))
def reshape_labels(label):
return label.flatten()
def gen_data(start_nr, n=20):
x_wins = 0
o_wins = 0
for game_nr in range(n):
game_history = GUI.play_game('pvp', return_history=True)
if len(game_history) % 2 == 0:
o_wins += 1
else:
x_wins += 1
game_string = 'policy_net_data/game_' + str(start_nr + game_nr) + '_data.csv'
np.savetxt(game_string, game_history, delimiter=',')
print(f'X has won {x_wins} games and O has won {o_wins} games.')
return
def pull_game_data(game_nr, move_cluster):
game_hist = np.loadtxt('policy_net_data/game_' + str(game_nr) + '_data.csv', delimiter=',', dtype=int)
if move_cluster is True:
translated_game_hists = translate_cluster(game_hist)
else:
translated_game_hists = [game_hist]
return translated_game_hists
def augment_game(game_hists):
# We need this function to return the augmented data set (state, action)
# so we can train the policy
augmented_data = [None for _ in range(16)]
augmented_labels = [None for _ in range(16)]
for game_hist in game_hists:
states = []
labels = []
board = gm.create_board()
empty_board = gm.create_board()
for i, move in enumerate(game_hist):
action = (int(move[0]), int(move[1]))
if i % 2 == 0:
player = 'X'
else:
player = 'O'
board_copy = board.copy()
# Marking the move that was made
board_copy[action[0]][action[1]] = 5
# Augmentation part
for k in range(2):
for m in range(2):
for j in range(4):
# 4 rotations (because rotationally symmetric)
move_made = np.unravel_index(np.argmax(board_copy), (gm.board_len, gm.board_len))
empty_board_copy = empty_board.copy()
gm.select_space(empty_board_copy, 'O', move_made)
labels.append(reshape_labels(empty_board_copy))
state_copy = board_copy.copy()
state_copy[move_made[0]][move_made[1]] = 0
states.append(state_copy)
board_copy = np.rot90(board_copy)
# Reflection because reflectional symmetry
board_copy = board_copy.T
# Swap symbols because symbolically (almost) symmetric
gm.swap_symbols(board_copy)
gm.select_space(board, player, (action[0], action[1]))
states = np.array(states)
labels = np.array(labels)
data_set, label_set = attach_past(states, labels)
move_count = int(len(data_set)/16)
for i in range(16):
augmented_data[i] = data_set[i*move_count:(i+1)*move_count]
augmented_labels[i] = label_set[i*move_count:(i+1)*move_count]
return np.array(augmented_data), np.array(augmented_labels)
def attach_past(augmented_history, labels):
data_set = []
labels_set = []
x_turn = np.ones((1, gm.board_len, gm.board_len))
o_turn = np.zeros((1, gm.board_len, gm.board_len))
move_count = int(len(augmented_history)/16)
for i in range(16):
# empty_x = deque([gm.create_board(), gm.create_board(), gm.create_board(), gm.create_board(), gm.create_board()], maxlen=5)
# empty_o = deque([gm.create_board(), gm.create_board(), gm.create_board(), gm.create_board(), gm.create_board()], maxlen=5)
if i > 7:
swapped = True
else:
swapped = False
for j in range(move_count):
state = augmented_history[16*j + i]
label = labels[16*j + i]
split = split_state(state)
# empty_x.appendleft(split[1])
# empty_o.appendleft(split[0])
# np_empty_x = np.array(empty_x)
# np_empty_o = np.array(empty_o)
if j % 2 == 0:
if swapped is False:
turn = x_turn
else:
turn = o_turn
else:
if swapped is False:
turn = o_turn
else:
turn = x_turn
# data_instance = np.r_[np_empty_x, np_empty_o, turn]
data_instance = np.r_[split, turn]
# Model-specific reshaping:
# data_instance = data_instance.reshape((gm.board_len, gm.board_len, 11))
data_instance = data_instance.reshape((gm.board_len, gm.board_len, 3))
data_instance = np.moveaxis(data_instance, -1, 0)
data_set.append(data_instance)
labels_set.append(label)
return np.array(data_set), np.array(labels_set)
def translate_cluster(game_hist):
possible_translations = [game_hist]
game_copy_y = game_hist.copy()
in_bounds_y = True
while in_bounds_y:
game_copy_x = game_copy_y.copy()
in_bounds_x = True
while in_bounds_x:
for i, move in enumerate(game_copy_x):
if move[1] > 0:
game_copy_x[i][1] -= 1
else:
in_bounds_x = False
break
if in_bounds_x:
possible_translations.append(game_copy_x.copy())
game_copy_x = game_copy_y.copy()
in_bounds_x = True
while in_bounds_x:
for i, move in enumerate(game_copy_x):
if move[1] < 29:
game_copy_x[i][1] += 1
else:
in_bounds_x = False
break
if in_bounds_x:
possible_translations.append(game_copy_x.copy())
for i, move in enumerate(game_copy_y):
if move[0] > 0:
game_copy_y[i][0] -= 1
else:
in_bounds_y = False
break
game_copy_y = game_hist.copy()
in_bounds_y = True
while in_bounds_y:
game_copy_x = game_copy_y.copy()
in_bounds_x = True
while in_bounds_x:
for i, move in enumerate(game_copy_x):
if move[1] > 0:
game_copy_x[i][1] -= 1
else:
in_bounds_x = False
break
if in_bounds_x:
possible_translations.append(game_copy_x.copy())
game_copy_x = game_copy_y.copy()
in_bounds_x = True
while in_bounds_x:
for i, move in enumerate(game_copy_x):
if move[1] < 29:
game_copy_x[i][1] += 1
else:
in_bounds_x = False
break
if in_bounds_x:
possible_translations.append(game_copy_x.copy())
for i, move in enumerate(game_copy_y):
if move[0] < 29:
game_copy_y[i][0] += 1
else:
in_bounds_y = False
break
return possible_translations
def discount_win(winner, game_len, discount_rate):
if winner == 'X':
x_win_param = 1
o_win_param = -1
else:
x_win_param = -1
o_win_param = 1
X_discounted_win = []
O_discounted_win = []
for i in range(game_len):
X_discounted_win.append(x_win_param*discount_rate**(game_len-i-1))
O_discounted_win.append(o_win_param*discount_rate**(game_len-i-1))
return X_discounted_win, O_discounted_win
def download_data(game_start_nr=0, game_count=180):
data_path = "D:/machine_learning_data/TTT/data_flat"
for i in range(game_count):
game_nr = game_start_nr + i
os.mkdir(f"{data_path}/game_{game_nr}")
game_hists = pull_game_data(game_nr, move_cluster=True)
for j in range(len(game_hists)):
os.mkdir(f"{data_path}/game_{game_nr}/perm_{j}")
data, labels = augment_game([game_hists[j]])
for k in range(16):
np.savez_compressed(f"{data_path}/game_{game_nr}/perm_{j}/var_{k}", data=data[k], label=labels[k])
def add_noise_to_labels(labels, mean, std):
for label in labels:
ind = np.argmax(label)
dl_0 = np.clip(np.random.normal(mean, std, label.shape), 0, 1)
label += dl_0
dl_1 = np.clip(np.random.normal(1-mean, std, 1), 0, 1)
label[ind] = dl_1[0]
return labels
def get_symbol_token(symbol):
if symbol == 'X':
return np.ones((1, gm.board_len, gm.board_len))
else:
return np.zeros((1, gm.board_len, gm.board_len))
def get_prediction_format(obs, symbol):
token = get_symbol_token(symbol)
split = split_state(obs)
data = np.r_[split, token]
data = np.moveaxis(data, 0, -1)
data = np.array([data])
return data
|
python
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <[email protected]>
#
# *****************************************************************************
"""NICOS GUI script status panel component."""
from time import time
from nicos.clients.gui.panels import Panel
from nicos.clients.gui.utils import loadUi
from nicos.guisupport.qt import QActionGroup, QBrush, QColor, QFontMetrics, \
QIcon, QListWidgetItem, QMenu, QPen, QPixmap, QSize, QStyledItemDelegate, \
Qt, QTimer, QToolBar, pyqtSlot
from nicos.guisupport.utils import setBackgroundColor
from nicos.protocols.daemon import BREAK_AFTER_LINE, BREAK_AFTER_STEP, \
BREAK_NOW, SIM_STATES, STATUS_IDLEEXC
from nicos.utils import formatEndtime
class ScriptQueue:
def __init__(self, frame, view):
self._id2item = {} # mapping from request ID to list widget item
self._frame = frame
self._view = view
self._timer = QTimer(singleShot=True, timeout=self._timeout)
def _format_item(self, request):
script = request['script']
if len(script) > 100:
return script[:100] + '...'
return script
def _timeout(self):
self._frame.show()
def append(self, request):
item = QListWidgetItem(self._format_item(request))
item.setData(Qt.UserRole, request['reqid'])
self._id2item[request['reqid']] = item
self._view.addItem(item)
# delay showing the frame for 20 msecs, so that it doesn't flicker in
# and out if the script is immediately taken out of the queue again
self._timer.start(20)
def update(self, request):
item = self._id2item.get(request['reqid'])
if item:
text = self._format_item(request)
item.setText(text)
def remove(self, reqid):
item = self._id2item.pop(reqid, None)
if item is None:
return
item = self._view.takeItem(self._view.row(item))
if not self._id2item:
self._timer.stop()
self._frame.hide()
return item
def rearrange(self, reqids):
selected = self._view.currentItem()
for i in range(self._view.count()-1, -1, -1):
self._view.takeItem(i)
for reqid in reqids:
self._view.addItem(self._id2item[reqid])
if selected:
self._view.setCurrentItem(selected)
def clear(self):
self._frame.hide()
self._view.clear()
self._id2item.clear()
def __bool__(self):
return bool(self._id2item)
class LineDelegate(QStyledItemDelegate):
def __init__(self, offset, view):
QStyledItemDelegate.__init__(self, view)
self._icon_offset = offset
self._margin_offset = 0
self._pen = QPen(QBrush(QColor('grey')), 1)
def paint(self, painter, option, index):
QStyledItemDelegate.paint(self, painter, option, index)
lineno = index.data(Qt.UserRole)
rect = option.rect.adjusted(self._icon_offset, 0, 0, 0)
painter.save()
painter.setPen(self._pen)
painter.drawText(rect, Qt.AlignVCenter, lineno)
margin_x = self._icon_offset + self._margin_offset
painter.drawLine(margin_x, rect.top(), margin_x, rect.bottom() + 1)
painter.restore()
class ScriptStatusPanel(Panel):
"""Provides a view of the currently executed script.
The current position within the script is shown with an arrow. The panel
also displays queued scripts.
Options:
* ``stopcounting`` (default False) -- Configure the stop button behaviour,
if is set to ``True``, the execution of a script will be aborted,
otherwise a counting will be finished first before the script will be
stopped.
* ``eta`` (default False) - if set to ``True`` the "ETA" (estimated time of
end of script) will be displayed if the
:class:`daemon <nicos.services.daemon.NicosDaemon>` is configured to run
with automatic simulation of the current command.
"""
panelName = 'Script status'
SHOW_ETA_STATES =[
'running',
'paused'
]
def __init__(self, parent, client, options):
Panel.__init__(self, parent, client, options)
loadUi(self, 'panels/status.ui')
self.stopcounting = False
self.menus = None
self.bar = None
self.queueFrame.hide()
self.statusLabel.hide()
self.pause_color = QColor('#ffdddd')
self.idle_color = parent.user_color
self.script_queue = ScriptQueue(self.queueFrame, self.queueView)
self.current_line = -1
self.current_request = {}
self.curlineicon = QIcon(':/currentline')
self.errlineicon = QIcon(':/errorline')
empty = QPixmap(16, 16)
empty.fill(Qt.transparent)
self.otherlineicon = QIcon(empty)
self.traceView.setItemDelegate(LineDelegate(24, self.traceView))
self.stopcounting = bool(options.get('stopcounting', False))
if self.stopcounting:
tooltip = 'Aborts the current executed script'
self.actionStop.setToolTip(tooltip)
self.actionStop.setText('Abort current script')
self.actionStop2.setToolTip(tooltip)
self.showETA = bool(options.get('eta', False))
self.etaWidget.hide()
client.request.connect(self.on_client_request)
client.processing.connect(self.on_client_processing)
client.blocked.connect(self.on_client_blocked)
client.status.connect(self.on_client_status)
client.initstatus.connect(self.on_client_initstatus)
client.disconnected.connect(self.on_client_disconnected)
client.rearranged.connect(self.on_client_rearranged)
client.updated.connect(self.on_client_updated)
client.eta.connect(self.on_client_eta)
bar = QToolBar('Script control')
bar.setObjectName(bar.windowTitle())
# unfortunately it is not wise to put a menu in its own dropdown menu,
# so we have to duplicate the actionBreak and actionStop...
dropdown1 = QMenu('', self)
dropdown1.addAction(self.actionBreak)
dropdown1.addAction(self.actionBreakCount)
dropdown1.addAction(self.actionFinishEarly)
self.actionBreak2.setMenu(dropdown1)
dropdown2 = QMenu('', self)
dropdown2.addAction(self.actionStop)
dropdown2.addAction(self.actionFinish)
dropdown2.addAction(self.actionFinishEarlyAndStop)
self.actionStop2.setMenu(dropdown2)
bar.addAction(self.actionBreak2)
bar.addAction(self.actionContinue)
bar.addAction(self.actionStop2)
bar.addAction(self.actionEmergencyStop)
self.bar = bar
# self.mainwindow.addToolBar(bar)
menu = QMenu('&Script control', self)
menu.addAction(self.actionBreak)
menu.addAction(self.actionBreakCount)
menu.addAction(self.actionContinue)
menu.addAction(self.actionFinishEarly)
menu.addSeparator()
menu.addAction(self.actionStop)
menu.addAction(self.actionFinish)
menu.addAction(self.actionFinishEarlyAndStop)
menu.addSeparator()
menu.addAction(self.actionEmergencyStop)
self.mainwindow.menuBar().insertMenu(
self.mainwindow.menuWindows.menuAction(), menu)
self.activeGroup = QActionGroup(self)
self.activeGroup.addAction(self.actionBreak)
self.activeGroup.addAction(self.actionBreak2)
self.activeGroup.addAction(self.actionBreakCount)
self.activeGroup.addAction(self.actionContinue)
self.activeGroup.addAction(self.actionStop)
self.activeGroup.addAction(self.actionStop2)
self.activeGroup.addAction(self.actionFinish)
self.activeGroup.addAction(self.actionFinishEarly)
self.activeGroup.addAction(self.actionFinishEarlyAndStop)
self._status = 'idle'
def setViewOnly(self, viewonly):
self.activeGroup.setEnabled(not viewonly)
def setCustomStyle(self, font, back):
self.idle_color = back
for widget in (self.traceView, self.queueView):
widget.setFont(font)
setBackgroundColor(widget, back)
def getToolbars(self):
return [self.bar]
def getMenus(self):
return []
def updateStatus(self, status, exception=False):
self._status = status
isconnected = status != 'disconnected'
self.actionBreak.setEnabled(isconnected and status != 'idle')
self.actionBreak2.setEnabled(isconnected and status != 'idle')
self.actionBreak2.setVisible(status != 'paused')
self.actionBreakCount.setEnabled(isconnected and status != 'idle')
self.actionContinue.setVisible(status == 'paused')
self.actionStop.setEnabled(isconnected and status != 'idle')
self.actionStop2.setEnabled(isconnected and status != 'idle')
self.actionFinish.setEnabled(isconnected and status != 'idle')
self.actionFinishEarly.setEnabled(isconnected and status != 'idle')
self.actionFinishEarlyAndStop.setEnabled(isconnected and status != 'idle')
self.actionEmergencyStop.setEnabled(isconnected)
if status == 'paused':
self.statusLabel.setText('Script is paused.')
self.statusLabel.show()
setBackgroundColor(self.traceView, self.pause_color)
else:
self.statusLabel.hide()
setBackgroundColor(self.traceView, self.idle_color)
self.traceView.update()
if status == 'idle':
self.etaWidget.hide()
def setScript(self, script):
self.traceView.clear()
lines = script.splitlines()
longest = len(str(len(lines)))
padding = ' ' * (longest + 3)
metrics = QFontMetrics(self.traceView.font())
height = metrics.height()
lineno_width = metrics.size(0, ' ' * (longest + 2)).width()
self.traceView.itemDelegate()._margin_offset = lineno_width
for (i, line) in enumerate(lines):
item = QListWidgetItem(self.otherlineicon, padding + line,
self.traceView)
item.setSizeHint(QSize(-1, height))
item.setData(Qt.UserRole, '%*d' % (longest, i+1))
self.traceView.addItem(item)
self.current_line = -1
def setCurrentLine(self, line, error_exit=False):
if self.current_line != -1:
item = self.traceView.item(self.current_line - 1)
if item:
# when a script has exited with an error, keep indicating the
# current line, with a red arrow
if error_exit and line == -1:
item.setIcon(self.errlineicon)
else:
item.setIcon(self.otherlineicon)
self.current_line = -1
if 0 < line <= self.traceView.count():
item = self.traceView.item(line - 1)
item.setIcon(self.curlineicon)
self.traceView.scrollToItem(item)
self.current_line = line
def on_client_request(self, request):
if 'script' not in request:
return
self.script_queue.append(request)
def on_client_processing(self, request):
if 'script' not in request:
return
new_current_line = -1
if self.current_request['reqid'] == request['reqid']:
# on update, set the current line to the same as before
# (this may be WRONG, but should not in most cases, and it's
# better than no line indicator at all)
new_current_line = self.current_line
self.script_queue.remove(request['reqid'])
self.setScript(request['script'])
self.current_request = request
self.setCurrentLine(new_current_line)
def on_client_blocked(self, requests):
for reqid in requests:
self.script_queue.remove(reqid)
def on_client_eta(self, data):
if not self.showETA or self._status not in self.SHOW_ETA_STATES:
return
state, eta = data
if state == SIM_STATES['pending']:
self.etaWidget.hide()
elif state == SIM_STATES['running']:
self.etaLabel.setText('Calculating...')
self.etaWidget.show()
elif state == SIM_STATES['success'] and eta > time():
self.etaLabel.setText(formatEndtime(eta - time()))
self.etaWidget.show()
elif state == SIM_STATES['failed']:
self.etaLabel.setText('Could not calculate ETA')
self.etaWidget.show()
def on_client_initstatus(self, state):
self.setScript(state['script'])
self.current_request['script'] = state['script']
self.current_request['reqid'] = None
self.on_client_status(state['status'])
for req in state['requests']:
self.on_client_request(req)
if self.showETA:
self.on_client_eta(state['eta'])
def on_client_status(self, data):
status, line = data
if line != self.current_line:
self.setCurrentLine(line, status == STATUS_IDLEEXC)
def on_client_disconnected(self):
self.script_queue.clear()
def on_client_rearranged(self, items):
self.script_queue.rearrange(items)
def on_client_updated(self, request):
if 'script' not in request:
return
self.script_queue.update(request)
@pyqtSlot()
def on_actionBreak_triggered(self):
self.client.tell_action('break', BREAK_AFTER_STEP)
@pyqtSlot()
def on_actionBreak2_triggered(self):
self.on_actionBreak_triggered()
@pyqtSlot()
def on_actionBreakCount_triggered(self):
self.client.tell_action('break', BREAK_NOW)
@pyqtSlot()
def on_actionContinue_triggered(self):
self.client.tell_action('continue')
@pyqtSlot()
def on_actionStop_triggered(self):
if self.stopcounting:
self.client.tell_action('stop', BREAK_NOW)
else:
self.client.tell_action('stop', BREAK_AFTER_STEP)
@pyqtSlot()
def on_actionStop2_triggered(self):
self.on_actionStop_triggered()
@pyqtSlot()
def on_actionFinish_triggered(self):
self.client.tell_action('stop', BREAK_AFTER_LINE)
@pyqtSlot()
def on_actionFinishEarly_triggered(self):
self.client.tell_action('finish')
@pyqtSlot()
def on_actionFinishEarlyAndStop_triggered(self):
self.client.tell_action('stop', BREAK_AFTER_STEP)
self.client.tell_action('finish')
@pyqtSlot()
def on_actionEmergencyStop_triggered(self):
self.client.tell_action('emergency')
@pyqtSlot()
def on_clearQueue_clicked(self):
if self.client.tell('unqueue', '*'):
self.script_queue.clear()
@pyqtSlot()
def on_deleteQueueItem_clicked(self):
item = self.queueView.currentItem()
if not item:
return
reqid = item.data(Qt.UserRole)
if self.client.tell('unqueue', str(reqid)):
self.script_queue.remove(reqid)
def moveItem(self, delta):
rowCount = self.queueView.count()
IDs = []
for i in range(rowCount):
IDs.append(self.queueView.item(i).data(Qt.UserRole))
curID = self.queueView.currentItem().data(Qt.UserRole)
i = IDs.index(curID)
IDs.insert(i + delta, IDs.pop(i))
self.client.ask('rearrange', IDs)
@pyqtSlot()
def on_upButton_clicked(self):
if self.queueView.currentItem():
self.moveItem(-1)
@pyqtSlot()
def on_downButton_clicked(self):
if self.queueView.currentItem():
self.moveItem(+1)
|
python
|
import numpy as np
import pandas as pd
import tensorflow as tf
import sys
import csv
import random
import os
from io import StringIO
import keras
from keras.layers import Input,Dense,concatenate,Dropout,LSTM
from keras.models import Model,load_model
from keras.optimizers import Adam, Adamax, RMSprop, Adagrad, Adadelta, Nadam
from keras import backend as K
from keras.callbacks import EarlyStopping,ModelCheckpoint
random.seed(54321)
##Customer Input
#python pMTnet.py -input input.csv -library library_dir -output output_dir
args = sys.argv
file_dir=args[args.index('-input')+1] #input protein seq file
library_dir=args[args.index('-library')+1] #directory to downloaded library
prediction_output = args[args.index('-prediction')+1]
model_dir=library_dir+'/h5_file'
aa_dict_dir=library_dir+'/Atchley_factors.csv' #embedding vector for tcr encoding
hla_db_dir=library_dir+'/hla_library/' #hla sequence
output_dir=args[args.index('-output')+1] #diretory to hold encoding and prediction output
output_log_dir=args[args.index('-output_log')+1] #standard output
################################
# Reading Encoding Matrix #
################################
########################### Atchley's factors#######################
aa_dict_atchley=dict()
with open(aa_dict_dir,'r') as aa:
aa_reader=csv.reader(aa)
next(aa_reader, None)
for rows in aa_reader:
aa_name=rows[0]
aa_factor=rows[1:len(rows)]
aa_dict_atchley[aa_name]=np.asarray(aa_factor,dtype='float')
########################### One Hot ##########################
aa_dict_one_hot = {'A': 0,'C': 1,'D': 2,'E': 3,'F': 4,'G': 5,'H': 6,'I': 7,'K': 8,'L': 9,
'M': 10,'N': 11,'P': 12,'Q': 13,'R': 14,'S': 15,'T': 16,'V': 17,
'W': 18,'Y': 19,'X': 20} # 'X' is a padding variable
########################### Blosum ##########################
BLOSUM50_MATRIX = pd.read_table(StringIO(u"""
A R N D C Q E G H I L K M F P S T W Y V B J Z X *
A 5 -2 -1 -2 -1 -1 -1 0 -2 -1 -2 -1 -1 -3 -1 1 0 -3 -2 0 -2 -2 -1 -1 -5
R -2 7 -1 -2 -4 1 0 -3 0 -4 -3 3 -2 -3 -3 -1 -1 -3 -1 -3 -1 -3 0 -1 -5
N -1 -1 7 2 -2 0 0 0 1 -3 -4 0 -2 -4 -2 1 0 -4 -2 -3 5 -4 0 -1 -5
D -2 -2 2 8 -4 0 2 -1 -1 -4 -4 -1 -4 -5 -1 0 -1 -5 -3 -4 6 -4 1 -1 -5
C -1 -4 -2 -4 13 -3 -3 -3 -3 -2 -2 -3 -2 -2 -4 -1 -1 -5 -3 -1 -3 -2 -3 -1 -5
Q -1 1 0 0 -3 7 2 -2 1 -3 -2 2 0 -4 -1 0 -1 -1 -1 -3 0 -3 4 -1 -5
E -1 0 0 2 -3 2 6 -3 0 -4 -3 1 -2 -3 -1 -1 -1 -3 -2 -3 1 -3 5 -1 -5
G 0 -3 0 -1 -3 -2 -3 8 -2 -4 -4 -2 -3 -4 -2 0 -2 -3 -3 -4 -1 -4 -2 -1 -5
H -2 0 1 -1 -3 1 0 -2 10 -4 -3 0 -1 -1 -2 -1 -2 -3 2 -4 0 -3 0 -1 -5
I -1 -4 -3 -4 -2 -3 -4 -4 -4 5 2 -3 2 0 -3 -3 -1 -3 -1 4 -4 4 -3 -1 -5
L -2 -3 -4 -4 -2 -2 -3 -4 -3 2 5 -3 3 1 -4 -3 -1 -2 -1 1 -4 4 -3 -1 -5
K -1 3 0 -1 -3 2 1 -2 0 -3 -3 6 -2 -4 -1 0 -1 -3 -2 -3 0 -3 1 -1 -5
M -1 -2 -2 -4 -2 0 -2 -3 -1 2 3 -2 7 0 -3 -2 -1 -1 0 1 -3 2 -1 -1 -5
F -3 -3 -4 -5 -2 -4 -3 -4 -1 0 1 -4 0 8 -4 -3 -2 1 4 -1 -4 1 -4 -1 -5
P -1 -3 -2 -1 -4 -1 -1 -2 -2 -3 -4 -1 -3 -4 10 -1 -1 -4 -3 -3 -2 -3 -1 -1 -5
S 1 -1 1 0 -1 0 -1 0 -1 -3 -3 0 -2 -3 -1 5 2 -4 -2 -2 0 -3 0 -1 -5
T 0 -1 0 -1 -1 -1 -1 -2 -2 -1 -1 -1 -1 -2 -1 2 5 -3 -2 0 0 -1 -1 -1 -5
W -3 -3 -4 -5 -5 -1 -3 -3 -3 -3 -2 -3 -1 1 -4 -4 -3 15 2 -3 -5 -2 -2 -1 -5
Y -2 -1 -2 -3 -3 -1 -2 -3 2 -1 -1 -2 0 4 -3 -2 -2 2 8 -1 -3 -1 -2 -1 -5
V 0 -3 -3 -4 -1 -3 -3 -4 -4 4 1 -3 1 -1 -3 -2 0 -3 -1 5 -3 2 -3 -1 -5
B -2 -1 5 6 -3 0 1 -1 0 -4 -4 0 -3 -4 -2 0 0 -5 -3 -3 6 -4 1 -1 -5
J -2 -3 -4 -4 -2 -3 -3 -4 -3 4 4 -3 2 1 -3 -3 -1 -2 -1 2 -4 4 -3 -1 -5
Z -1 0 0 1 -3 4 5 -2 0 -3 -3 1 -1 -4 -1 0 -1 -2 -2 -3 1 -3 5 -1 -5
X -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -5
* -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 1
"""), sep='\s+').loc[list(aa_dict_one_hot.keys()), list(aa_dict_one_hot.keys())]
assert (BLOSUM50_MATRIX == BLOSUM50_MATRIX.T).all().all()
ENCODING_DATA_FRAMES = {
"BLOSUM50": BLOSUM50_MATRIX,
"one-hot": pd.DataFrame([
[1 if i == j else 0 for i in range(len(aa_dict_one_hot.keys()))]
for j in range(len(aa_dict_one_hot.keys()))
], index=aa_dict_one_hot.keys(), columns=aa_dict_one_hot.keys())
}
########################### HLA pseudo-sequence ##########################
#pMHCpan
HLA_ABC=[hla_db_dir+'/A_prot.fasta',hla_db_dir+'/B_prot.fasta',hla_db_dir+'/C_prot.fasta',hla_db_dir+'/E_prot.fasta']
HLA_seq_lib={}
for one_class in HLA_ABC:
prot=open(one_class)
#pseudo_seq from netMHCpan:https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0000796
pseudo_seq_pos=[7,9,24,45,59,62,63,66,67,79,70,73,74,76,77,80,81,84,95,97,99,114,116,118,143,147,150,152,156,158,159,163,167,171]
#write HLA sequences into a library
#class I alles
name=''
sequence=''
for line in prot:
if len(name)!=0:
if line.startswith('>HLA'):
pseudo=''
for i in range(0,33):
if len(sequence)>pseudo_seq_pos[i]:
pseudo=pseudo+sequence[pseudo_seq_pos[i]]
HLA_seq_lib[name]=pseudo
name=line.split(' ')[1]
sequence=''
else:
sequence=sequence+line.strip()
else:
name=line.split(' ')[1]
########################################
# Input data encoding helper functions #
########################################
#################functions for TCR encoding####################
def preprocess(filedir):
#Preprocess TCR files
print('Processing: '+filedir)
if not os.path.exists(filedir):
print('Invalid file path: ' + filedir)
return 0
dataset = pd.read_csv(filedir, header=0)
#Preprocess HLA_antigen files
#remove HLA which is not in HLA_seq_lib; if the input hla allele is not in HLA_seq_lib; then the first HLA startswith the input HLA allele will be given
#Remove antigen that is longer than 15aa
dataset=dataset.dropna()
HLA_list=list(dataset['HLA'])
ind=0
index_list=[]
for i in HLA_list:
if len([hla_allele for hla_allele in HLA_seq_lib.keys() if hla_allele.startswith(str(i))])==0:
index_list.append(ind)
print('drop '+i)
ind=ind+1
dataset=dataset.drop(dataset.iloc[index_list].index)
dataset=dataset[dataset.Antigen.str.len()<16]
print(str(max(dataset.index)-dataset.shape[0]+1)+' antigens longer than 15aa are dropped!')
TCR_list=dataset['CDR3'].tolist()
antigen_list=dataset['Antigen'].tolist()
HLA_list=dataset['HLA'].tolist()
return TCR_list,antigen_list,HLA_list
def aamapping_TCR(peptideSeq,aa_dict):
#Transform aa seqs to Atchley's factors.
peptideArray = []
if len(peptideSeq)>80:
print('Length: '+str(len(peptideSeq))+' over bound!')
peptideSeq=peptideSeq[0:80]
for aa_single in peptideSeq:
try:
peptideArray.append(aa_dict[aa_single])
except KeyError:
print('Not proper aaSeqs: '+peptideSeq)
peptideArray.append(np.zeros(5,dtype='float64'))
for i in range(0,80-len(peptideSeq)):
peptideArray.append(np.zeros(5,dtype='float64'))
return np.asarray(peptideArray)
def hla_encode(HLA_name,encoding_method):
#Convert the a HLA allele to a zero-padded numeric representation.
if HLA_name not in HLA_seq_lib.keys():
if len([hla_allele for hla_allele in HLA_seq_lib.keys() if hla_allele.startswith(str(HLA_name))])==0:
print('cannot find'+HLA_name)
HLA_name=[hla_allele for hla_allele in HLA_seq_lib.keys() if hla_allele.startswith(str(HLA_name))][0]
if HLA_name not in HLA_seq_lib.keys():
print('Not proper HLA allele:'+HLA_name)
HLA_sequence=HLA_seq_lib[HLA_name]
HLA_int=[aa_dict_one_hot[char] for char in HLA_sequence]
while len(HLA_int)!=34:
#if the pseudo sequence length is not 34, use X for padding
HLA_int.append(20)
result=ENCODING_DATA_FRAMES[encoding_method].iloc[HLA_int]
# Get a numpy array of 34 rows and 21 columns
return np.asarray(result)
def peptide_encode_HLA(peptide, maxlen,encoding_method):
#Convert peptide amino acid sequence to numeric encoding
if len(peptide) > maxlen:
msg = 'Peptide %s has length %d > maxlen = %d.'
raise ValueError(msg % (peptide, len(peptide), maxlen))
peptide= peptide.replace(u'\xa0', u'') #remove non-breaking space
o = list(map(lambda x: aa_dict_one_hot[x.upper()] if x.upper() in aa_dict_one_hot.keys() else 20 , peptide))
#if the amino acid is not valid, replace it with padding aa 'X':20
k = len(o)
#use 'X'(20) for padding
o = o[:k // 2] + [20] * (int(maxlen) - k) + o[k // 2:]
if len(o) != maxlen:
msg = 'Peptide %s has length %d < maxlen = %d, but pad is "none".'
raise ValueError(msg % (peptide, len(peptide), maxlen))
result=ENCODING_DATA_FRAMES[encoding_method].iloc[o]
return np.asarray(result)
def TCRMap(dataset,aa_dict):
#Wrapper of aamapping
for i in range(0,len(dataset)):
if i==0:
TCR_array=aamapping_TCR(dataset[i],aa_dict).reshape(1,80,5,1)
else:
TCR_array=np.append(TCR_array,aamapping_TCR(dataset[i],aa_dict).reshape(1,80,5,1),axis=0)
print('TCRMap done!')
return TCR_array
def HLAMap(dataset,encoding_method):
#Input a list of HLA and get a three dimentional array
m=0
for each_HLA in dataset:
if m==0:
HLA_array=hla_encode(each_HLA,encoding_method).reshape(1,34,21)
else:
HLA_array=np.append(HLA_array,hla_encode(each_HLA,encoding_method).reshape(1,34,21),axis=0)
m=m+1
print('HLAMap done!')
return HLA_array
def antigenMap(dataset,maxlen,encoding_method):
#Input a list of antigens and get a three dimentional array
m=0
for each_antigen in dataset:
if m==0:
antigen_array=peptide_encode_HLA(each_antigen,maxlen,encoding_method).reshape(1,maxlen,21)
else:
antigen_array=np.append(antigen_array,peptide_encode_HLA(each_antigen, maxlen,encoding_method).reshape(1,maxlen,21),axis=0)
m=m+1
print('antigenMap done!')
return antigen_array
def pearson_correlation_f(y_true, y_pred):
fsp = y_pred - K.mean(y_pred) #being K.mean a scalar here, it will be automatically subtracted from all elements in y_pred
fst = y_true - K.mean(y_true)
devP = K.std(y_pred)
devT = K.std(y_true)
return K.mean(fsp*fst)/(devP*devT)
def pos_neg_acc(y_true,y_pred):
#self-defined prediction accuracy metric
positive_pred=y_pred[:,1]
negative_pred=y_pred[:,0]
diff=K.mean(K.cast(negative_pred<positive_pred,"float16"))
return diff
def pos_neg_loss(y_true,y_pred):
#self-defined prediction loss function
positive_pred=y_pred[:,1]
negative_pred=y_pred[:,0]
diff=K.mean(K.relu(1+negative_pred-positive_pred))+0.2*K.mean(K.square(negative_pred)+K.square(positive_pred))
return diff
#########################################
# preprocess input data and do encoding #
#########################################
#Read data
#TCR Data preprocess
log_file=open(output_log_dir,'w')
sys.stdout=log_file
print('Mission loading.')
TCR_list,antigen_list,HLA_list=preprocess(file_dir)
TCR_array=TCRMap(TCR_list,aa_dict_atchley)
antigen_array=antigenMap(antigen_list,15,'BLOSUM50')
HLA_array=HLAMap(HLA_list,'BLOSUM50')
#Model prediction
TCR_encoder=load_model(model_dir+'/TCR_encoder_30.h5')
TCR_encoder=Model(TCR_encoder.input,TCR_encoder.layers[-12].output)
TCR_encoded_result=TCR_encoder.predict(TCR_array)
HLA_antigen_encoder=load_model(model_dir+'/HLA_antigen_encoder_60.h5',custom_objects={'pearson_correlation_f': pearson_correlation_f})
HLA_antigen_encoder=Model(HLA_antigen_encoder.input,HLA_antigen_encoder.layers[-2].output)
HLA_antigen_encoded_result=HLA_antigen_encoder.predict([antigen_array,HLA_array])
TCR_encoded_matrix=pd.DataFrame(data=TCR_encoded_result,index=range(1,len(TCR_list)+1))
HLA_antigen_encoded_matrix=pd.DataFrame(data=HLA_antigen_encoded_result,index=range(1,len(HLA_list)+1))
allele_matrix=pd.DataFrame({'CDR3':TCR_list,'Antigen':antigen_list,'HLA':HLA_list},index=range(1,len(TCR_list)+1))
TCR_encoded_matrix.to_csv(output_dir+'/TCR_output.csv',sep=',')
HLA_antigen_encoded_matrix.to_csv(output_dir+'/MHC_antigen_output.csv',sep=',')
print('Encoding Accomplished.\n')
#########################################
# make prediction based on encoding #
#########################################
############## Load Prediction Model ################
#set up model
hla_antigen_in=Input(shape=(60,),name='hla_antigen_in')
pos_in=Input(shape=(30,),name='pos_in')
ternary_layer1_pos=concatenate([pos_in,hla_antigen_in])
ternary_dense1=Dense(300,activation='relu')(ternary_layer1_pos)
ternary_do1=Dropout(0.2)(ternary_dense1)
ternary_dense2=Dense(200,activation='relu')(ternary_do1)
ternary_dense3=Dense(100,activation='relu')(ternary_dense2)
ternary_output=Dense(1,activation='linear')(ternary_dense3)
ternary_prediction=Model(inputs=[pos_in,hla_antigen_in],outputs=ternary_output)
#load weights
ternary_prediction.load_weights(model_dir+'/weights.h5')
################ read dataset #################
#read background negative TCRs
TCR_neg_df_1k=pd.read_csv(library_dir+'/bg_tcr_library/TCR_output_1k.csv',index_col=0)
TCR_neg_df_10k=pd.read_csv(library_dir+'/bg_tcr_library/TCR_output_10k.csv',index_col=0)
TCR_pos_df=pd.read_csv(output_dir+'/TCR_output.csv',index_col=0)
MHC_antigen_df=pd.read_csv(output_dir+'/MHC_antigen_output.csv',index_col=0)
################ make prediction #################
rank_output=[]
for each_data_index in range(TCR_pos_df.shape[0]):
tcr_pos=TCR_pos_df.iloc[[each_data_index,]]
pmhc=MHC_antigen_df.iloc[[each_data_index,]]
#used the positive pair with 1k negative tcr to form a 1001 data frame for prediction
TCR_input_df=pd.concat([tcr_pos,TCR_neg_df_1k],axis=0)
MHC_antigen_input_df= pd.DataFrame(np.repeat(pmhc.values,1001,axis=0))
prediction=ternary_prediction.predict({'pos_in':TCR_input_df,'hla_antigen_in':MHC_antigen_input_df})
rank=1-(sorted(prediction.tolist()).index(prediction.tolist()[0])+1)/1000
#if rank is higher than top 2% use 10k background TCR
if rank<0.02:
TCR_input_df=pd.concat([tcr_pos,TCR_neg_df_10k],axis=0)
MHC_antigen_input_df= pd.DataFrame(np.repeat(pmhc.values,10001,axis=0))
prediction=ternary_prediction.predict({'pos_in':TCR_input_df,'hla_antigen_in':MHC_antigen_input_df})
rank=1-(sorted(prediction.tolist()).index(prediction.tolist()[0])+1)/10000
rank_output.append(rank)
rank_output_matrix=pd.DataFrame({'CDR3':TCR_list,'Antigen':antigen_list,'HLA':HLA_list,'Rank':rank_output},index=range(1,len(TCR_list)+1))
rank_output_matrix.to_csv(output_dir + f'/{prediction_output}',sep=',')
print('Prediction Accomplished.\n')
log_file.close()
#delete encoding files
os.remove(output_dir+'/MHC_antigen_output.csv')
os.remove(output_dir+'/TCR_output.csv')
|
python
|
from django.test import TestCase
from hknweb.academics.tests.utils import ModelFactory
class QuestionModelTests(TestCase):
def setUp(self):
question = ModelFactory.create_question()
self.question = question
def test_basic(self):
pass
|
python
|
from modules.module_base import ModuleBase
import urllib.request
import json
from tools.limitator import Limitator, LimitatorLimitted, LimitatorMultiple
class ModuleAss(ModuleBase):
def __init__(self, bot):
ModuleBase.__init__(self, bot)
self.name = "ModuleAss"
self.Url = "http://api.obutts.ru/noise/1"
self.mediaUrl = "http://media.obutts.ru"
self.limitator = LimitatorMultiple(
Limitator(5, 60, True),
Limitator(50, 600, False),
)
def notify_command(self, message_id, from_attr, date, chat, commandName, commandStr):
if commandName == "ass":
try:
self.limitator.next(from_attr)
except LimitatorLimitted:
self.bot.sendMessage("Pas de petit cul pour toi !", chat["id"])
return
response = urllib.request.urlopen(self.Url)
str_response = response.read().decode('utf-8')
objJSON = json.loads(str_response)
print(objJSON)
imgPath = objJSON[0]['preview']
if len(imgPath) > 0:
url = self.mediaUrl + "/" + imgPath
self.bot.sendPhotoUrl(chat["id"], url, "Et un petit cul pour %s !" % from_attr["first_name"])
def get_commands(self):
return [
("ass", "Random pretty ass on demand"),
]
|
python
|
# @AUTHOR: Piplopp <https://github.com/Piplopp>
# @DATE: 05/2017
#
# @DESC This script generates a blank template for translation of the fire emblem
# heroes unit names, weapons, assit, special and passive skills for the
# feh-inheritance-tool <https://github.com/arghblargh/feh-inheritance-tool>.
#
# It will parse the english files in data/ to extract all fields to translate.
#
# This is written in Python3 (better forget about dinopython)
import os
import json
import argparse
from collections import OrderedDict
# Some colors
C_FILE = '\033[95m'
C_WARNING = '\033[93m'
C_FAIL = '\033[91m'
C_ENDC = '\033[0m'
############
# Misc #
############
def cli():
"""Create and populate the cli parser"""
parser = argparse.ArgumentParser(
description= 'Create a blank template for translation purpose or populate an existing one with new entries for update',
)
# Args
parser.add_argument('-u', '--update',
metavar= 'file',
type= existant_file,
help= 'Update the given file with the new blank entries'
)
parser.add_argument('-v', '--verbose',
action= 'store_true',
help= 'Increase verbosity output'
)
return parser.parse_args()
def existant_file(filepath:str) -> str:
"""Argparse type, raising an error if given file does not exists"""
if not os.path.exists(filepath):
raise argparse.ArgumentTypeError(
"file {} doesn't exists".format(C_FILE + filepath + C_ENDC)
)
return filepath
def sort_OD(od):
""" Sort an OrderedDict by key recursively"""
out = OrderedDict()
for key, val in sorted(od.items()):
if isinstance(val, dict):
out[key] = sort_OD(val)
else:
out[key] = val
return out
#############################
# Template generation #
#############################
def main(update, verbose, data_dir='data/'):
""" Do the checks before calling the update or the template gen methods """
# Verifications
if not update:
fname = 'lang/template.json'
# Check if a blank template already exists
if os.path.exists(fname):
s = (C_WARNING + 'WARNING: ' + C_ENDC + 'A template file seems to already exists: '
+ C_FILE + fname + C_ENDC)
print(s)
a = input('(A)bort or (O)verride (default abort): ').lower()
if a == 'a':
print(C_FAIL + 'Aborted' + C_ENDC)
exit(-1)
# NOTE: Don't need to check if update file exists, parser already did this
# Processing
dict_out = _get_data(verbose, data_dir)
if update:
dict_out = _update_lang_data(update, dict_out)
with open(update, 'w') as outfile:
if verbose: print("Writing json to " + C_FILE + update + C_ENDC)
json.dump(dict_out, outfile, indent=4, sort_keys=False, ensure_ascii=False)
else: # generate template
with open(fname, 'w') as outfile:
if verbose: print("Writing json to " + C_FILE + fname + C_ENDC)
json.dump(dict_out, outfile, indent=4, sort_keys=False, ensure_ascii=False)
def _update_lang_data(update, new):
with open(update, 'r') as f:
old = json.load(f)
new.update(old)
for entry in new:
new[entry] = sort_OD(new[entry])
return new
def _get_data(verbose, data_dir):
""" Generate or update the blank template """
dict_out = OrderedDict()
dict_out.update(_process_units(data_dir, verbose))
dict_out.update(_process_default(data_dir, verbose))
dict_out.update(_process_passives(data_dir, verbose))
return dict_out
def _process_default(data_dir, verbose):
"""
Process assits, specials and weapons files
Return order is:
Weapons, Assists, Specials
"""
base_files = ( 'weapons.json', 'assists.json', 'specials.json' )
dict_out = OrderedDict()
for f in base_files:
if verbose: print('Processing ' + C_FILE + data_dir+f + C_ENDC + '...')
with open(data_dir+f) as infile:
dict_in = json.load(infile)
tmp_dict = {
entry: {
f: "" for f in ('effect', 'name')
} for entry,fields in dict_in.items()
}
# Sort effect and and name field
# tmp_dict = sort_OD(tmp_dict)
# Add sorted new entry (weapons, assists, specials) to global dict
dict_out.update(OrderedDict(sorted(tmp_dict.items(), key=lambda t: t[0])))
return dict_out
def _process_units(data_dir, verbose):
"""
Process units file, sorted by their names
"""
if verbose: print('Processing ' + C_FILE + data_dir+'units.json' + C_ENDC + '...')
with open(data_dir+'units.json') as infile:
dict_in = json.load(infile)
dict_out = { entry: {"name": ""} for entry in dict_in.keys() }
dict_out = sort_OD(dict_out)
return dict_out
def _process_passives(data_dir, verbose):
"""
Process passives file, sorted
Return order is:
Passive A, B, C
"""
if verbose: print('Processing ' + C_FILE + data_dir+'passives.json' + C_ENDC + '...')
with open(data_dir+'passives.json') as infile:
dict_in = json.load(infile)
dict_out = {
"PASSIVE_"+passives_type.upper(): {
entry: {
f: "" for f in ('effect', 'name')
} for entry, fields in passives.items()
} for passives_type, passives in dict_in.items()
}
dict_out = sort_OD(dict_out) # Sort recursively all levels
# concat PASSIVE_A,B,C if no structure needed
tmp_dict = OrderedDict()
for entry in dict_out:
tmp_dict.update(dict_out[entry])
dict_out = tmp_dict
return dict_out
if __name__ == '__main__':
args = cli()
# do some stuff
if args.verbose:
print('ARGS: ' + str(args))
if args.update:
pass
main(update=args.update, verbose=args.verbose)
|
python
|
from . import *
class AWS_OpsWorksCM_Server_EngineAttribute(CloudFormationProperty):
def write(self, w):
with w.block("engine_attribute"):
self.property(w, "Value", "value", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
class AWS_OpsWorksCM_Server(CloudFormationResource):
cfn_type = "AWS::OpsWorksCM::Server"
tf_type = "aws_ops_works_cm_server" # TODO: Most likely not working
ref = "id"
attrs = {
"Endpoint": "endpoint",
"Arn": "arn",
}
def write(self, w):
with self.resource_block(w):
self.property(w, "KeyPair", "key_pair", StringValueConverter())
self.property(w, "EngineVersion", "engine_version", StringValueConverter())
self.property(w, "ServiceRoleArn", "service_role_arn", StringValueConverter())
self.property(w, "DisableAutomatedBackup", "disable_automated_backup", BasicValueConverter())
self.property(w, "BackupId", "backup_id", StringValueConverter())
self.property(w, "EngineModel", "engine_model", StringValueConverter())
self.property(w, "PreferredMaintenanceWindow", "preferred_maintenance_window", StringValueConverter())
self.property(w, "AssociatePublicIpAddress", "associate_public_ip_address", BasicValueConverter())
self.property(w, "InstanceProfileArn", "instance_profile_arn", StringValueConverter())
self.property(w, "CustomCertificate", "custom_certificate", StringValueConverter())
self.property(w, "PreferredBackupWindow", "preferred_backup_window", StringValueConverter())
self.property(w, "SecurityGroupIds", "security_group_ids", ListValueConverter(StringValueConverter()))
self.property(w, "SubnetIds", "subnet_ids", ListValueConverter(StringValueConverter()))
self.property(w, "CustomDomain", "custom_domain", StringValueConverter())
self.property(w, "CustomPrivateKey", "custom_private_key", StringValueConverter())
self.property(w, "ServerName", "server_name", StringValueConverter())
self.repeated_block(w, "EngineAttributes", AWS_OpsWorksCM_Server_EngineAttribute)
self.property(w, "BackupRetentionCount", "backup_retention_count", BasicValueConverter())
self.property(w, "InstanceType", "instance_type", StringValueConverter())
self.property(w, "Tags", "tags", ListValueConverter(ResourceTag()))
self.property(w, "Engine", "engine", StringValueConverter())
|
python
|
#!/usr/bin/env python
from PIL import Image, ImageDraw
import random as pyrandom
import sys
import os.path
import shutil
import re
import subprocess
import numpy as np
from glob import glob
import argparse
# from matplotlib.pyplot import imread
from scipy.ndimage import filters, interpolation, morphology, measurements
# from scipy.ndimage.filters import gaussian_filter, uniform_filter, maximum_filter
from scipy import stats
from scipy.misc import imsave
import ocrolib
from ocrolib import hocr, common, psegutils, morph, sl
from ocrolib.toplevel import *
"""
Construct an HTML output file in hOCR format by putting together
the recognition results for each page in sequence.
You should usually invoke this program as
ocropus-hocr 'book/????.bin.png'
For each page like 'book/0001.bin.png', it uses the following files:
book/0001.bin.png # page image
book/0001.pseg.png # page segmentation
book/0001/010001.txt # recognizer output for lines
# perform binarization
./ocropus-nlbin tests/ersch.png -o book
# perform page layout analysis
./ocropus-gpageseg 'book/????.bin.png'
"""
maxlines = 300
noise = 8 # "noise threshold for removing small components from lines
pad = 3 # padding for extracted line
expand = 3 # expand mask for grayscale extraction
gray = False # output grayscale lines as well which are extracted from the grayscale version of the pages
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start", default=-1, type=int,
help="first page in PDF")
parser.add_argument("-e", "--end", default=-1, type=int,
help="last page in PDF")
parser.add_argument("-n", "--needed", default=1, type=int,
help="min number of pages required")
parser.add_argument("files", nargs="+",
help="input files; glob and @ expansion performed")
parser.add_argument("-f", "--force", action="store_true",
help="force processing of PDF file")
args = parser.parse_args()
# inFile = args.input
os.makedirs(outPdfRoot, exist_ok=True)
pdfFiles = args.files
pdfFiles.sort(key=lambda k: (os.path.getsize(k), k))
processedFiles = []
for i, inFile in enumerate(pdfFiles):
print("-" * 80)
if not processPdfFile(inFile, args.start, args.end, args.needed, args.force):
continue
processedFiles.append(inFile)
print("Processed %d (%d of %d): %s" % (len(processedFiles), i + 1, len(pdfFiles), inFile))
print("=" * 80)
print("Processed %d files %s" % (len(processedFiles), processedFiles))
outPdfRoot = "pdf.output"
def processPdfFile(pdfFile, start, end, needed, force):
assert needed >= 0, needed
baseName = os.path.basename(pdfFile)
baseBase, _ = os.path.splitext(baseName)
outPdfFile = os.path.join(outPdfRoot, baseName)
outRoot = os.path.join(outPdfRoot, baseBase)
if not force and os.path.exists(outPdfFile):
print("%s exists. skipping" % outPdfFile)
return False
os.makedirs(outRoot, exist_ok=True)
retval = runGhostscript(pdfFile, outRoot)
assert retval == 0
return True
fileList = glob(os.path.join(outRoot, "doc-*.png"))
fileList.sort()
print("fileList=%d %s" % (len(fileList), fileList))
numPages = 0
for fileNum, origFile in enumerate(fileList):
page, ok = pageNum(origFile)
print("#### page=%s ok=%s" % (page, ok))
if ok:
if start >= 0 and page < start:
print("@1", [start, end])
continue
if end >= 0 and page > end:
if not (needed >= 0 and numPages < needed):
print("@2", [start, end], [numPages, needed])
continue
print("@31", start, end)
ok = processPngFile(outRoot, origFile, fileNum)
if ok:
numPages += 1
assert numPages > 0
if numPages == 0:
print("~~ No pages processed")
return False
shutil.copyfile(pdfFile, outPdfFile)
return True
gsImageFormat = "doc-%03d.png"
gsImagePattern = r"^doc\-(\d+).png$"
gsImageRegex = re.compile(gsImagePattern)
def pageNum(pngPath):
name = os.path.basename(pngPath)
m = gsImageRegex.search(name)
print("pageNum:", pngPath,name, m)
if m is None:
return 0, False
return int(m.group(1)), True
def runGhostscript(pdf, outputDir):
retval = runGhostscriptDevice(pdf, outputDir, "png16m", gsImageFormat)
assert retval == 0
retval = runGhostscriptDevice(pdf, outputDir, "jpeg", "doc-%03d.jpg")
assert retval == 0
return retval
def runGhostscriptDevice(pdf, outputDir, device, gsImageFormat):
"""runGhostscript runs Ghostscript on file `pdf` to create file one png file per page in
directory `outputDir`.
"""
print("runGhostscript: pdf=%s outputDir=%s" % (pdf, outputDir))
outputPath = os.path.join(outputDir, gsImageFormat)
deviceArg = "-sDEVICE=%s" % device
output = "-sOutputFile=%s" % outputPath
cmd = ["gs",
"-dSAFER",
"-dBATCH",
"-dNOPAUSE",
"-r300",
deviceArg,
"-dTextAlphaBits=1",
"-dGraphicsAlphaBits=1",
output,
pdf]
print("runGhostscript: cmd=%s" % cmd)
print("%s" % ' '.join(cmd))
os.makedirs(outputDir, exist_ok=True)
# p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p = subprocess.Popen(cmd, shell=False)
retval = p.wait()
print("retval=%d" % retval)
print("%s" % ' '.join(cmd))
print(" outputDir=%s" % outputDir)
print("outputPath=%s" % outputPath)
assert os.path.exists(outputDir)
return retval
def processPngFile(outRoot, origFile, fileNum):
baseName = os.path.basename(origFile)
baseBase, _ = os.path.splitext(baseName)
outDir = os.path.join(outRoot, "%s.%03d" % (baseBase, fileNum))
inFile = os.path.join(outDir, baseName)
os.makedirs(outDir, exist_ok=True)
shutil.copy(origFile, inFile)
inBase, _ = ocrolib.allsplitext(inFile)
print("** inBase=%s" % inBase)
# print("** binBase=%s" % binBase)
fname = inFile
outputdir = inBase
binFile = inBase + ".bin.png"
outFile = inBase + ".out.png"
outRoot2, outDir2 = os.path.split(outRoot)
outFile2 = os.path.join(outRoot2, "%s.out" % outDir2, baseName)
print("outFile2=%s" % outFile2)
# assert False
grayFile = inBase + ".nrm.png"
psegFile = inBase + ".pseg.png"
print(" inFile=%s" % inFile)
print(" binFile=%s" % binFile)
print("grayFile=%s" % grayFile)
print(" outFile=%s" % outFile)
assert inFile and binFile
assert outFile != inFile
assert outFile != binFile
if not binarize(inFile, binFile, grayFile):
binExists = os.path.exists(binFile)
print("Couldn't binarize inFile=%s binFile=%s exists=%s" % (inFile, binFile, binExists))
return False
binary = ocrolib.read_image_binary(binFile)
print("$$ %s=%s" % (binFile, desc(binary)))
height, width = binary.shape
checktype(binary, ABINARY2)
check = check_page(np.amax(binary) - binary)
if check is not None:
print("%s SKIPPED %s (use -n to disable this check)" % (inFile, check))
return False
# if args.gray:
# if os.path.exists(base+".nrm.png"):
# gray = ocrolib.read_image_gray(base+".nrm.png")
# checktype(gray, GRAYSCALE)
# else:
# print_error("Grayscale version %s.nrm.png not found. Use ocropus-nlbin for creating " +
# "normalized grayscale version of the pages as well." % base)
# return
binary = 1 - binary # invert
scale = psegutils.estimate_scale(binary)
print("scale %f" % scale)
if np.isnan(scale) or scale > 1000.0:
print("%s: bad scale (%g); skipping\n" % (fname, scale))
return False
# find columns and text lines
print("computing segmentation")
segmentation = compute_segmentation(binary, scale)
if np.amax(segmentation) > maxlines:
print("%s: too many lines %g" % (fname, np.amax(segmentation)))
return False
print("segmentation=%s" % desc(segmentation))
print("number of lines %g" % np.amax(segmentation))
# compute the reading order
print("finding reading order")
lines = psegutils.compute_lines(segmentation, scale)
order = psegutils.reading_order([l.bounds for l in lines])
lsort = psegutils.topsort(order)
print("$$ lsort = %d = %s...%s" % (len(lsort), lsort[:10], lsort[-10:]))
# renumber the labels so that they conform to the specs
nlabels = np.amax(segmentation) + 1
renumber = np.zeros(nlabels, 'i')
for i, v in enumerate(lsort):
renumber[lines[v].label] = 0x010000+(i+1)
segmentation = renumber[segmentation]
# finally, output everything
print("writing lines")
if not os.path.exists(outputdir):
os.mkdir(outputdir)
lines = [lines[i] for i in lsort]
ocrolib.write_page_segmentation("%s.pseg.png" % outputdir, segmentation)
cleaned = ocrolib.remove_noise(binary, noise)
for i, l in enumerate(lines):
binline = psegutils.extract_masked(1-cleaned, l, pad=pad, expand=expand)
ocrolib.write_image_binary("%s/01%04x.bin.png" % (outputdir, i+1), binline)
# if args.gray:
# grayline = psegutils.extract_masked(
# gray, l, pad=args.pad, expand=args.expand)
# ocrolib.write_image_gray("%s/01%04x.nrm.png" % (outputdir, i+1), grayline)
print("%6d %s %4.1f %d" % (i, fname, scale, len(lines)))
# to proceed, we need a pseg file and a subdirectory containing text lines
assert os.path.exists(psegFile), "%s: no such file" % psegFile
assert os.path.isdir(inBase), "%s: no such directory" % inBase
# iterate through the text lines in reading order, based on the page segmentation file
pseg = ocrolib.read_page_segmentation(psegFile)
print("$$ %s=%s" % (psegFile, desc(pseg)))
regions = ocrolib.RegionExtractor()
print("$$ regions=%s" % regions)
regions.setPageLines(pseg)
im = Image.open(inFile)
print("~~%s %s" % (inFile, im.size))
print("$$ regions=%s=%s" % (regions, sorted(regions.__dict__)))
print("$$ regions.length=%s" % regions.length())
n = regions.length()
for i in range(1, n):
id = regions.id(i)
y0, x0, y1, x1 = regions.bbox(i)
# print("%5d: 0x%05X %s %d x %d" %
# (i, id, [y0, x0, y1, x1], y1 - y0, x1 - x0))
draw = ImageDraw.Draw(im)
draw.rectangle((x0, y0, x1, y1), outline=(255, 0, 0), width=3)
draw.rectangle((x0, y0, x1, y1), outline=(0, 0, 255), width=0)
# draw.rectangle((x0, y0, x1, y1), outline=255, width=5)
# draw.rectangle((x0, y0, x1, y1), outline=10, width=1)
del draw
# write output files
print("outFile=%s" % outFile)
im.save(outFile, "PNG")
print("outFile2=%s" % outFile2)
outDir2 = os.path.dirname(outFile2)
os.makedirs(outDir2, exist_ok=True)
im.save(outFile2, "PNG")
assert os.path.exists(outFile2)
# outFile3, _ = os.path.splitext(outFile)
# outFile3 = "%s.jpg" % outFile3
# print("outFile3=%s" % outFile3)
# im.save(outFile3, "JPEG")
# assert os.path.exists(outFile3)
return True
def compute_segmentation(binary, scale):
"""Given a binary image, compute a complete segmentation into lines, computing both columns and
text lines.
"""
print("$$ compute_segmentation: %s %g" % (desc(binary), scale))
binary = np.array(binary, 'B')
# start by removing horizontal black lines, which only interfere with the rest of the page
# segmentation
binary = remove_hlines(binary, scale)
# do the column finding
print("computing column separators")
colseps, binary = compute_colseps(binary, scale)
# now compute the text line seeds
print("computing lines")
bottom, top, boxmap = compute_gradmaps(binary, scale)
seeds = compute_line_seeds(binary, bottom, top, colseps, scale)
print("seeds=%s" % desc(seeds))
DSAVE("seeds", [bottom, top, boxmap])
# spread the text line seeds to all the remaining components
print("propagating labels")
llabels = morph.propagate_labels(boxmap, seeds, conflict=0)
print("spreading labels: llabels=%s" % desc(llabels))
spread = morph.spread_labels(seeds, maxdist=scale)
llabels = np.where(llabels > 0, llabels, spread*binary)
segmentation = llabels * binary
print("$$ llabels: %s" % desc(llabels))
print("$$ segmentation: %s" % desc(segmentation))
return segmentation
################################################################
### Text Line Finding.
###
### This identifies the tops and bottoms of text lines by
### computing gradients and performing some adaptive thresholding.
### Those components are then used as seeds for the text lines.
################################################################
def compute_gradmaps(binary, scale, hscale=1.0, vscale=1.0, usegauss=False):
"""usegauss: use gaussian instead of uniform"""
# use gradient filtering to find baselines
boxmap = psegutils.compute_boxmap(binary, scale)
DSAVE("boxmap", boxmap)
cleaned = boxmap * binary
DSAVE("cleaned", cleaned)
if usegauss:
# this uses Gaussians
grad = filters.gaussian_filter(1.0*cleaned, (vscale*0.3*scale, hscale*6*scale), order=(1, 0))
else:
# this uses non-Gaussian oriented filters
grad = filters.gaussian_filter(1.0*cleaned,
(max(4, vscale*0.3*scale), hscale*scale),
order=(1, 0))
grad = filters.uniform_filter(grad, (vscale, hscale*6*scale))
bottom = ocrolib.norm_max((grad < 0)*(-grad))
top = ocrolib.norm_max((grad > 0)*grad)
return bottom, top, boxmap
def compute_line_seeds(binary, bottom, top, colseps, scale, threshold=0.2, vscale=1.0):
"""Based on gradient maps, computes candidates for baselines and xheights. Then, it marks the
regions between the two as a line seed.
"""
t = threshold
vrange = int(vscale*scale)
bmarked = filters.maximum_filter(bottom == filters.maximum_filter(bottom, (vrange, 0)), (2, 2))
bmarked = bmarked * (bottom > t*np.amax(bottom)*t) * (1-colseps)
tmarked = filters.maximum_filter(top == filters.maximum_filter(top, (vrange, 0)), (2, 2))
tmarked = tmarked*(top > t*np.amax(top)*t/2)*(1-colseps)
tmarked = filters.maximum_filter(tmarked, (1, 20))
seeds = np.zeros(binary.shape, 'i')
delta = max(3, int(scale/2))
for x in range(bmarked.shape[1]):
transitions = sorted([(y, 1) for y in find(bmarked[:, x])] +
[(y, 0) for y in find(tmarked[:, x])])[::-1]
transitions += [(0, 0)]
for l in range(len(transitions)-1):
y0, s0 = transitions[l]
if s0 == 0:
continue
seeds[y0-delta:y0, x] = 1
y1, s1 = transitions[l+1]
if s1 == 0 and (y0-y1) < 5*scale:
seeds[y1:y0, x] = 1
seeds = filters.maximum_filter(seeds, (1, int(1+scale)))
seeds = seeds*(1-colseps)
DSAVE("lineseeds", [seeds, 0.3*tmarked+0.7*bmarked, binary])
seeds, _ = morph.label(seeds)
return seeds
####
def remove_hlines(binary, scale, maxsize=10):
labels, _ = morph.label(binary)
objects = morph.find_objects(labels)
for i, b in enumerate(objects):
if sl.width(b) > maxsize * scale:
labels[b][labels[b] == i+1] = 0
return np.array(labels != 0, 'B')
def find(condition):
"""Return the indices where ravel(condition) is true"""
res, = np.nonzero(np.ravel(condition))
return res
def check_page(image):
if len(image.shape) == 3:
return "input image is color image %s" % (image.shape,)
if np.mean(image) < np.median(image):
return "image may be inverted %s" % desc(image)
h, w = image.shape
if h < 600:
return "image not tall enough for a page image %s" % list(image.shape)
if h > 10000:
return "image too tall for a page image %s" % list(image.shape)
if w < 600:
return "image too narrow for a page image %s" % list(image.shape)
if w > 10000:
return "line too wide for a page image %s" % list(image.shape)
return None
def compute_colseps(binary, scale, maxcolseps=3, maxseps=0):
"""Computes column separators either from vertical black lines or whitespace."""
print("considering at most %g whitespace column separators" % maxcolseps)
colseps = compute_colseps_conv(binary, scale)
# DSAVE("colwsseps", 0.7*colseps+0.3*binary)
if maxseps > 0:
print("considering at most %g black column separators" % maxseps)
seps = compute_separators_morph(binary, scale)
# DSAVE("colseps", 0.7*seps+0.3*binary)
#colseps = compute_colseps_morph(binary,scale)
colseps = np.maximum(colseps, seps)
binary = np.minimum(binary, 1 - seps)
# binary, colseps = apply_mask(binary, colseps) !@#$
return colseps, binary
def compute_colseps_conv(binary, scale=1.0, csminheight=10, maxcolseps=3):
"""Find column separators by convolution and thresholding.
csminheight: minimum column height (units=scale)
maxcolseps: maximum # whitespace column separators
"""
h, w = binary.shape
# find vertical whitespace by thresholding
assert np.array_equal(binary, 1.0*binary)
smoothed = filters.gaussian_filter(binary, sigma=(scale, scale*0.5))
smoothed = filters.uniform_filter(smoothed, size=(5.0*scale, 1))
thresh = smoothed < np.amax(smoothed)*0.1
DSAVE("1thresh", thresh)
# find column edges by filtering
grad = filters.gaussian_filter(binary, (scale, scale*0.5), order=(0, 1))
grad = filters.uniform_filter(grad, (10.0*scale, 1))
# grad = abs(grad) # use this for finding both edges
grad = (grad > 0.5*np.amax(grad))
DSAVE("2grad", grad)
# combine edges and whitespace
seps = np.minimum(thresh, filters.maximum_filter(grad, (int(scale), int(5*scale))))
seps = filters.maximum_filter(seps, (int(2*scale), 1))
DSAVE("3seps", seps)
# select only the biggest column separators
seps = morph.select_regions(seps, sl.dim0, min=csminheight*scale, nbest=maxcolseps)
DSAVE("4seps", seps)
return seps
def apply_mask(binary, colseps):
try:
mask = ocrolib.read_image_binary(base+".mask.png")
except IOError:
raise # !@#$
return binary, colseps
masked_seps = np.maximum(colseps, mask)
binary = np.minimum(binary, 1-masked_seps)
# DSAVE("masked_seps", masked_seps)
return binary, masked_seps
def normalize_raw_image(raw):
""" perform image normalization """
image = raw - np.amin(raw)
if np.amax(image) == np.amin(image):
# print("# image is empty: %s" % (fname))
return None
image /= np.amax(image)
return image
def estimate_local_whitelevel(image, zoom=0.5, perc=80, size=20):
"""flatten image by estimating the local whitelevel
zoom for page background estimation, smaller=faster
percentage for filters
size for filters
"""
m = interpolation.zoom(image, zoom)
m = filters.percentile_filter(m, perc, size=(size, 2))
m = filters.percentile_filter(m, perc, size=(2, size))
m = interpolation.zoom(m, 1.0/zoom)
w, h = np.minimum(np.array(image.shape), np.array(m.shape))
flat = np.clip(image[:w, :h] - m[:w, :h] + 1, 0, 1)
return flat
def estimate_skew(flat, bignore=0.1, maxskew=2, skewsteps=8):
"""estimate skew angle and rotate"""
d0, d1 = flat.shape
o0, o1 = int(bignore*d0), int(bignore*d1) # border ignore
flat = np.amax(flat)-flat
flat -= np.amin(flat)
est = flat[o0:d0-o0, o1:d1-o1]
ma = maxskew
ms = int(2*maxskew*skewsteps)
# print(linspace(-ma,ma,ms+1))
angle = estimate_skew_angle(est, np.linspace(-ma, ma, ms+1))
flat = interpolation.rotate(flat, angle, mode='constant', reshape=0)
flat = np.amax(flat)-flat
return flat, angle
def estimate_thresholds(flat, bignore=0.1, escale=1.0, lo=5, hi=90):
"""# estimate low and high thresholds
bignore: ignore this much of the border for threshold estimation
escale: for estimating a mask over the text region
lo: percentile for black estimation
hi: percentile for white estimation
"""
d0, d1 = flat.shape
o0, o1 = int(bignore*d0), int(bignore*d1)
est = flat[o0:d0-o0, o1:d1-o1]
if escale > 0:
# by default, we use only regions that contain significant variance; this makes the
# percentile-based low and high estimates more reliable
e = escale
v = est - filters.gaussian_filter(est, e*20.0)
v = filters.gaussian_filter(v**2, e*20.0)**0.5
v = v > 0.3*np.amax(v)
v = morphology.binary_dilation(v, structure=np.ones((int(e*50), 1)))
v = morphology.binary_dilation(v, structure=np.ones((1, int(e*50))))
est = est[v]
print(" lo=%g hi=%g" % (lo, hi))
try:
lo2 = np.percentile(est.ravel(), lo)
hi2 = np.percentile(est.ravel(), hi)
except IndexError as e:
print("error=%s" % e)
return 0, 0, False
print("lo2=%g hi2=%g" % (lo2, hi2))
return lo2, hi2, True
zoom = 0.5
perc = 80
size = 20
bignore = 0.1
escale = 1.0
defLo = 5
defHi = 90
threshold = 0.5
def binarize(inFile, binFile, grayFile):
print("binarize: inFile=%s binFile=%s grayFile=%s" %(inFile, binFile, grayFile))
fname = inFile
raw = ocrolib.read_image_gray(inFile)
# perform image normalization
image = normalize_raw_image(raw)
if image is None:
print("!! # image is empty: %s" % (inFile))
assert False
return False
check = check_page(np.amax(image) - image)
if check is not None:
print(inFile+" SKIPPED "+check+"(use -n to disable this check)")
# assert False
return False
# check whether the image is already effectively binarized
extreme = (np.sum(image < 0.05) + np.sum(image > 0.95)) / np.prod(image.shape)
if extreme > 0.95:
comment = "no-normalization"
flat = image
else:
comment = ""
# if not, we need to flatten it by estimating the local whitelevel
print("flattening")
flat = estimate_local_whitelevel(image, zoom, perc, size)
print("comment=%r extreme=%s" % (comment, extreme))
print("image=%s" % desc(image))
print(" flat=%s" % desc(flat))
# assert False
# estimate skew angle and rotate
# print("estimating skew angle")
# flat, angle = estimate_skew(flat, args.bignore, args.maxskew, args.skewsteps)
angle = 0.0
# estimate low and high thresholds
print("estimating thresholds")
lo, hi, ok = estimate_thresholds(flat, bignore, escale, defLo, defHi)
if not ok:
return False
print("lo=%5.3f (%g)" % (lo, defLo))
print("hi=%5.3f (%g)" % (hi, defHi))
# rescale the image to get the gray scale image
print("rescaling")
flat -= lo
flat /= (hi-lo)
flat = np.clip(flat, 0, 1)
bin = flat > threshold
# output the normalized grayscale and the thresholded images
print("%s lo-hi (%.2f %.2f) angle %4.1f %s" % (fname, lo, hi, angle, comment))
print("##1 flat=%s" % desc(flat))
print("##2 bin=%s" % desc(bin))
print("writing %s" % binFile)
ocrolib.write_image_binary(binFile, bin)
ocrolib.write_image_gray(grayFile, flat)
return True
debug = False
def DSAVE(title, image):
if not debug:
return
if type(image) == list:
assert len(image) == 3
image = np.transpose(np.array(image), [1, 2, 0])
fname = "_%s.png" % title
print("debug " + fname)
imsave(fname, image.astype('float'))
main()
|
python
|
# Copyright 2017 Insurance Australia Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import common
import get_accounts
import os
TEMPLATE_BASE = os.environ['LOCATION_CORE']+"/"+"watchmen_cloudformation/templates/roles.tmpl"
TEMPLATE_DESTINATION = os.environ['LOCATION_CORE']+"/"+"watchmen_cloudformation/files/roles.yml"
def main():
roles_cf = common.get_template(TEMPLATE_BASE)
common.generate_file(TEMPLATE_DESTINATION, roles_cf) # Creates the deployable CF file
if __name__ == "__main__":
main()
|
python
|
GROUPS_OF = """
CREATE OR REPLACE FUNCTION cognition.groupsof(username text)
RETURNS setof text AS $$
DECLARE rolename pg_roles.rolname%TYPE;
BEGIN
FOR rolename IN
SELECT a.rolname FROM pg_authid a
WHERE pg_has_role(username, a.oid, 'member') AND a.rolname != username
LOOP
RETURN NEXT rolename;
END LOOP;
RETURN;
END;
$$ LANGUAGE PLPGSQL VOLATILE;
"""
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import random
import numpy as np
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# import splitfolders #only needed for train/test split folder creation
# In[3]:
#here's the line of code I used to split the data into multiple train/test directories with sub folders for each class. 80/20 split inot images/data_split
#the original images stay in "good_images" and are copied to "data_split"
#this cell should only need to be re-run if we need to re-split the images after removing more or something that would change our train/test split images
# Split with a ratio.
# To only split into training and validation set, set a tuple to `ratio`, i.e, `(.8, .2)`.
#splitfolders.ratio("../images/good_images", output="../images/data_split", seed=42, ratio=(.8, .2), group_prefix=None) # default values
# In[4]:
#load images
# In[5]:
DATA_PATH = "../images/data_split"
def get_count_metrics(folder, data_path=DATA_PATH):
#folder here is either "train" or "val"
number_normal = len(os.listdir(data_path + "/" + folder + "/normal/"))
number_pneumonia = len(os.listdir(data_path + "/" + folder + "/pneumonia/"))
number_covid = len(os.listdir(data_path + "/" + folder + "/covid-19/"))
return number_normal, number_pneumonia
def load_data(data_path=DATA_PATH, batch_size=32):
'''
https://pytorch.org/docs/stable/torchvision/transforms.html)
'''
train_base_trans = transforms.Compose([
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.RandomHorizontalFlip(),
transforms.GaussianBlur(3),
transforms.ToTensor()
])
test_base_trans = transforms.Compose([
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.RandomHorizontalFlip(),
transforms.GaussianBlur(3),
transforms.ToTensor()
])
train_red_trans = transforms.Compose([
transforms.Resize(299),
transforms.RandomRotation(30),
transforms.CenterCrop(299),
transforms.RandomHorizontalFlip(),
transforms.GaussianBlur(3),
#transforms.Lambda(lambda img: torchvision.transforms.functional.equalize(img)),
transforms.ToTensor(),
transforms.ColorJitter(brightness=.2, contrast=.2),
])
test_red_trans = transforms.Compose([
transforms.Resize(299),
#transforms.RandomRotation(20),
transforms.CenterCrop(299),
#transforms.RandomHorizontalFlip(),
#transforms.Grayscale(),
#transforms.GaussianBlur(),
#transforms.functional.rgb_to_grayscale(),
transforms.ToTensor()
#transforms.Normalize(.5,.25)
#transforms.ColorJitter(brightness=.2, contrast=.2),
])
trn_data = torchvision.datasets.ImageFolder(root=data_path + "/train/", transform=train_base_trans)
val_data = torchvision.datasets.ImageFolder(root=data_path + "/val/", transform=test_base_trans)
#To test
# #trn_data = torch.utils.data.Subset(trn_data, range(100))
# trn_data = torchvision.datasets.ImageFolder(root=data_path + "/train/", transform=train_base_trans)
# print(trn_data.class_to_idx)
train_loader_base = torch.utils.data.DataLoader(trn_data, batch_size=batch_size, shuffle=True, drop_last=False)
val_loader_base = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=False, drop_last=False)
trn_red_data = torchvision.datasets.ImageFolder(root=data_path + "/train/", transform=train_red_trans)
val_red_data = torchvision.datasets.ImageFolder(root=data_path + "/val/", transform=test_red_trans)
train_loader_red = torch.utils.data.DataLoader(trn_red_data, batch_size=batch_size, shuffle=True, drop_last=False)
val_loader_red = torch.utils.data.DataLoader(val_red_data, batch_size=batch_size, shuffle=False, drop_last=False)
return train_loader_base, val_loader_base, train_loader_red, val_loader_red
# In[6]:
import matplotlib.pyplot as plt
def imshow(img, title):
npimg = img.numpy()
plt.figure(figsize=(15, 7))
plt.axis('off')
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title(title)
plt.show()
def show_batch_images(dataloader):
images, labels = next(iter(dataloader))
img = torchvision.utils.make_grid(images, padding=25)
imshow(img, title=["COVID-19" if x==0 else "NORMAL" if x == 1 else "PNEUMONIA" for x in labels])
train_loader, _, val_loader,_ = load_data()
#train_loader, val_loader = load_data()
for i in range(2):
show_batch_images(train_loader)
for i in range(2):
show_batch_images(val_loader)
# In[ ]:
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.