content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import mysql.connector
from mysql.connector import errorcode
from flask import flash
try:
# Establish a connection with the MySQL database
# Password here is hardcoded for simplicity.
# For all practical purposes, use environment variables or config files.
cnx = mysql.connector.connect(user='test_user', password='password',
host='127.0.0.1',
database='phone_directory')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
def add_contact(contact):
status = None
cursor = cnx.cursor()
insert_query = "INSERT INTO contacts (name, mobile_no, email) VALUES (%s, %s, %s)"
contact = (contact.get("name"), contact.get(
"mobile_no"), contact.get("email"))
try:
cursor.execute(insert_query, contact)
status = True
except Exception as e:
print(e)
status = False
finally:
cnx.commit()
cursor.close()
return status
def get_contacts():
try:
cursor = cnx.cursor()
cursor.execute("SELECT * FROM contacts")
contacts = cursor.fetchall()
cursor.close()
return contacts
except Exception as e:
print(e)
flash("Error occured while reading from the database")
return None
def delete_contact(contact_id):
cursor = cnx.cursor()
try:
cursor.execute("DELETE FROM contacts WHERE id = %s", (contact_id, ))
flash("Contact deleted successfully")
except Exception as e:
print(e)
flash("Error occured while deleting from the database")
cursor.close()
|
python
|
"""
Django template tags for configurations.
"""
|
python
|
from services import user_service
from viewmodels.shared.viewmodelbase import ViewModelBase
class IndexViewModel(ViewModelBase):
def __init__(self):
super().__init__()
self.user = user_service.get_user_by_id(self.user_id)
def validate(self):
if not self.user_id:
self.errors.append('No user id. ')
if self.user_id and not self.user:
self.errors.append('No user. ')
|
python
|
# coding: utf-8
# In[1]:
from flask import Flask,render_template,session,url_for,request,redirect
from flask_pymongo import PyMongo
from flask_bcrypt import Bcrypt
from flask import jsonify,json
import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
import datetime
import argparse
import pickle
from model import create_model
import pprint
import datetime
app1 = Flask(__name__)
nn4_small2_pretrained = create_model()
# In[2]:
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')
# arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c","--course",help="Course ID for attendance")
args = vars(parser.parse_args())
print(args['course'])
# In[3]:
import numpy as np
import os.path
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path):
metadata = []
for i in os.listdir(path):
for f in os.listdir(os.path.join(path, i)):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg' or ext=='.png':
metadata.append(IdentityMetadata(path, i, f))
return np.array(metadata)
metadata = load_metadata('images')
print('metadata created')
print(metadata)
# In[4]:
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib
# get_ipython().run_line_magic('matplotlib', 'inline')
def load_image(path):
img = cv2.imread(path, 1) #BGR
return img[...,::-1] #RGB
alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')
#combined transformation
def align_image(img):
return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# In[5]:
import pickle
embedded = np.zeros((metadata.shape[0], 128))
real_name = {}
embeddings = open('embeddings.pkl','rb')
embedded = pickle.load(embeddings)
embeddings.close()
for i, m in enumerate(metadata):
# img = load_image(m.image_path())
# img = align_image(img)
# scale RGB values to interval [0,1]
# if img is not None:
# img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
# embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
real_name[os.path.dirname(m.image_path()[7:])] = embedded[i]
print(i)
print(m.name)
# embeddings = open('embeddings.pkl','wb')
# pickle.dump(embedded,embeddings)
# embeddings.close()
def real_names():
return real_name
# In[6]:
def distance(emb1, emb2):
return np.sum(np.square(emb1 - emb2))
# In[7]:
def show_pair(idx1, idx2):
plt.figure(figsize=(6,3))
plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')
plt.subplot(121)
plt.imshow(load_image(metadata[idx1].image_path()))
plt.subplot(122)
plt.imshow(load_image(metadata[idx2].image_path()));
show_pair(78, 76)
show_pair(78, 17)
# In[8]:
def recognize(embedded):
min_dist = 100
_id = None
for name,emb in real_name.items():
dist = np.sum(np.square(emb - embedded))
if dist < min_dist:
min_dist = dist
_id = name
if min_dist > 0.58:
print(min_dist)
return None
else:
print(min_dist)
return _id
# In[9]:
from sklearn.metrics import f1_score, accuracy_score
distances = [] # squared L2 distance between pairs
identical = [] # 1 if same identity, 0 otherwise
num = len(metadata)
for i in range(num - 1):
for j in range(1, num):
distances.append(distance(embedded[i], embedded[j]))
identical.append(1 if metadata[i].name == metadata[j].name else 0)
distances = np.array(distances)
identical = np.array(identical)
thresholds = np.arange(0.3, 1.0, 0.01)
f1_scores = [f1_score(identical, distances < t) for t in thresholds]
acc_scores = [accuracy_score(identical, distances < t) for t in thresholds]
opt_idx = np.argmax(f1_scores)
# Threshold at maximal F1 score
opt_tau = thresholds[opt_idx]
# Accuracy at maximal F1 score
opt_acc = accuracy_score(identical, distances < opt_tau)
# Plot F1 score and accuracy as function of distance threshold
plt.plot(thresholds, f1_scores, label='F1 score');
plt.plot(thresholds, acc_scores, label='Accuracy');
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title(f'Accuracy at threshold {opt_tau:.2f} = {opt_acc:.3f}');
plt.xlabel('Distance threshold')
plt.legend();
# In[10]:
embedded = np.zeros((1, 128))
def recognize_image(image_path):
img = load_image(image_path)
img = align_image(img)
if img is not None:
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded = nn4_small2_pretrained.predict(np.expand_dims(np.array(img), axis=0))[0]
name = recognize(embedded)
print(name)
return name
return None
cap = cv2.VideoCapture(1)
# def webcam_recognize():
# while(True):
# ret, frame = cap.read()
# cv2.imwrite('temp.jpg',frame)
# cv2.waitKey(20)
# if 'temp.jpg' is not None:
# name = recognize_image('temp.jpg')
# cv2.imshow('temp',frame)
# print(name)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# webcam_recognize()
# recognize_image('.jpg')
#students list for gspread updations
students = []
from mtcnn.mtcnn import MTCNN
import pickle
def multiple_recognize():
while(True):
ret,frame = cap.read()
cv2.imwrite('temp.jpg',frame)
detector = MTCNN()
image = load_image('temp.jpg')
faces = detector.detect_faces(image)
print(faces)
if faces is not None:
for face in faces:
(x,y,w,h) = face['box']
cv2.imwrite('temp.jpg',frame)
if 'temp.jpg' is not None:
name = recognize_image('temp.jpg')
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),5)
cv2.imshow('Faces',frame)
if name!=None and name not in students:
students.append(name)
stud_names = open('present.pickle','wb')
pickle.dump(students, stud_names,protocol=2)
stud_names.close()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# cv2.imshow('parts',image[y:y+h,x:x+w])
multiple_recognize()
# In[11]:
# import datetime
# import os
# import subprocess as s
# s.call("python mark_attendance.py", shell=True)
# today = datetime.date.today()
# formatted_date = today.strftime("%m-%d-%Y")
# print(formatted_date)
# from mark_attendance import mark_attendance
# mark_attendance(students)
import datetime
today = datetime.date.today()
formatted_date = today.strftime("%m-%d-%Y")
print(formatted_date)
from mark_attendance import mark_attendance
mark_attendance(students,args['course'])
|
python
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from typing import List, Callable, Optional
import requests
from .models import (
User,
Address,
Transaction,
Transactions,
RequestForQuote,
Quote,
CreateTransaction,
AccountInfo,
OffChainSequenceInfo,
TransactionId,
FundsTransfer,
PaymentDetails,
PreparePaymentInfoResponse,
)
from .models_fppa import (
FundPullPreApprovalScope,
FundsPullPreApprovalRequest,
FundsPullPreApproval,
FundPullPreApprovalStatus,
)
RequestSender = Callable[[str, str, Optional[dict]], requests.Response]
class ReferenceWalletProxy:
def __init__(self, base_url):
self.base_url = base_url
self.authorization_header = {}
self.funds_pull_preapproval = ReferenceWalletProxyFPPA(self._request_authorized)
def create_new_user(self, username, password):
add_user_json = {"username": username, "password": password}
add_user_response = self._request("POST", "user", json=add_user_json)
self._set_authorization_token(add_user_response.text)
def get_user(self):
user_response = self._request_authorized("GET", "user")
return User.from_json(user_response.text)
def update_user(self, user: User):
self._request_authorized("PUT", "user", json=user.to_dict())
return self.get_user()
def get_account_info(self) -> AccountInfo:
response = self._request_authorized("GET", "account")
return AccountInfo.from_json(response.text)
def get_balance(self, currency):
return sum(
x.balance
for x in self.get_account_info().balances
if x.currency == currency
)
def get_receiving_address(self) -> str:
address_response = self._request_authorized(
"POST", "account/receiving-addresses"
)
address = Address.from_json(address_response.text)
return address.address
def get_transaction_list(self) -> List[Transaction]:
account_transactions_response = self._request_authorized(
"GET", "account/transactions"
)
transactions = Transactions.from_json(account_transactions_response.text)
return transactions.transaction_list
def create_deposit_quote(self, amount: int, currency_pair) -> Quote:
quote_request = RequestForQuote(
action="buy",
amount=amount,
currency_pair=currency_pair,
)
quote_response = self._request_authorized(
"POST", "account/quotes", json=quote_request.to_dict()
)
return Quote.from_json(quote_response.text)
def execute_quote(self, quote_id: str):
self._request_authorized(
"POST", f"account/quotes/{quote_id}/actions/execute", json={}
)
def get_offchain_state(self, reference_id) -> OffChainSequenceInfo:
# TBD: There is no way, at the moment, to get off-chain sequence
# state. Should be implemented.
return OffChainSequenceInfo()
def send_transaction(self, address, amount, currency) -> TransactionId:
tx_request = CreateTransaction(
currency=currency,
amount=amount,
receiver_address=address,
)
send_transaction_response = self._request_authorized(
"POST", "account/transactions", json=tx_request.to_dict()
)
return TransactionId.from_json(send_transaction_response.text)
def create_payment_command_as_sender(
self,
reference_id,
vasp_address,
merchant_name,
action,
currency,
amount,
expiration,
):
request = {
"reference_id": reference_id,
"vasp_address": vasp_address,
"merchant_name": merchant_name,
"action": action,
"currency": currency,
"amount": amount,
"expiration": expiration,
}
self._request_authorized("POST", "offchain/payment_command", json=request)
def get_payment_details(self, reference_id, vasp_address) -> PaymentDetails:
response = self._request_authorized(
"GET",
f"offchain/query/payment_details?"
f"vasp_address={vasp_address}&"
f"reference_id={reference_id}",
)
return PaymentDetails.from_json(response.text) if response.text else None
def prepare_payment_as_receiver(self, action: str = "charge") -> (str, str):
response = self._request_authorized(
"POST", f"/validation/payment_info/{action}"
)
response_object = PreparePaymentInfoResponse.from_json(response.text)
return response_object.reference_id, response_object.address
def approve_payment(self, reference_id: str, init_offchain: bool):
self._request_authorized(
"POST",
f"/offchain/payment/{reference_id}/actions/approve",
json={"init_offchain_required": init_offchain},
)
def approve_payment_command(self, reference_id):
self._request_authorized(
"POST", f"/offchain/payment_command/{reference_id}/actions/approve"
)
def reject_payment_command(self, reference_id):
self._request_authorized(
"POST", f"/offchain/payment_command/{reference_id}/actions/reject"
)
def get_transaction(self, tx_id) -> FundsTransfer:
response = self._request_authorized("GET", f"account/transactions/{tx_id}")
return FundsTransfer.from_json(response.text)
def _set_authorization_token(self, token):
self.authorization_header = {"Authorization": "Bearer " + token}
def _request(self, method, endpoint, json=None):
response = requests.request(
method, url=f"{self.base_url}/{endpoint}", json=json
)
response.raise_for_status()
return response
def _request_authorized(self, method, endpoint, json=None) -> requests.Response:
response = requests.request(
method,
url=f"{self.base_url}/{endpoint}",
json=json,
headers=self.authorization_header,
)
response.raise_for_status()
return response
class ReferenceWalletProxyFPPA:
"""
Sends to the reference wallet funds pull pre-approval related requests.
"""
def __init__(self, request_wallet_authorized: RequestSender):
self._request_authorized = request_wallet_authorized
def get_all_preapprovals(self) -> List[FundsPullPreApproval]:
r = self._request_authorized("GET", "offchain/funds_pull_pre_approvals")
preapprovals = r.json()
return [
FundsPullPreApproval.from_dict(x)
for x in preapprovals["funds_pull_pre_approvals"]
]
def request_preapproval_from_another(
self,
payer_addr_bech32: str,
scope: FundPullPreApprovalScope,
description: str = None,
) -> str:
fppa_request = FundsPullPreApprovalRequest(
payer_address=payer_addr_bech32,
description=description,
scope=scope,
)
r = self._request_authorized(
"POST", "validation/funds_pull_pre_approvals", fppa_request.to_dict()
)
return r.json()["funds_pull_pre_approval_id"]
def create_fppa_request_for_unknown_payer(
self,
scope: FundPullPreApprovalScope,
description: str = None,
) -> (str, str):
r = self._request_authorized(
"POST",
"validation/funds_pull_pre_approvals",
{"description": description, "scope": scope.to_dict()},
)
return r.json()["funds_pull_pre_approval_id"], r.json()["address"]
def update_preapproval_status(
self, fppa_id: str, status: FundPullPreApprovalStatus
):
self._request_authorized(
"PUT",
f"offchain/funds_pull_pre_approvals/{fppa_id}",
{"status": status.value},
)
def create_and_approve(
self,
biller_address: str,
funds_pull_pre_approval_id: str,
scope: FundPullPreApprovalScope,
description: str,
):
self._request_authorized(
"POST",
"offchain/funds_pull_pre_approvals",
{
"biller_address": biller_address,
"funds_pull_pre_approval_id": funds_pull_pre_approval_id,
"scope": scope.to_dict(),
"description": description,
},
)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 14:05:38 2019
@author: ico
"""
import numpy as np
class MyNeuron:
def training(self,X,Y):
self.W=np.random.random((X.shape[1]+1,1))
X=np.append(np.ones((X.shape[0],1)),X,axis=1)
for j in range(1,21):
i=0
for x in X:
if np.dot(self.W.T,x)>0:
y = 1
else:
y = 0
self.W=self.W+(Y[i]-y)*x.reshape(3,1)
i=i+1
#W=np.array([-100,100/6])
#constructor
def __init__(self,funcActivation):
self.funcAct=funcActivation
#Predice si aprueba o no
def predict(self,x):
x=np.append(1,x)
y=np.dot(self.W.T,x.reshape(self.W.shape[0]))
if self.funcAct=="heaviside":
return self.heaviside(y)
if self.funcAct=="tanh":
return self.tanh(y)
if self.funcAct=="sigmoid":
return self.sigmoid(y)
#funciones activación
def Yp(self,x):
if x>=0:
return 1#aprobado
else:
return -1#no aprobado
def tanh(self,x):
return np.sinh(x)/np.cosh(x)
def sigmoid(self,x):
return 1/(1+np.exp(-x))
#transforma a 0 o 1
def transformPredictions(self,Y):
Y=np.array(Y)
if self.funcAct=='heaviside':
idxNeg=Y==-1
Y[idxNeg]=0
elif self.funcAct=='tanh':
idxPos = Y >=0
idxNeg = Y<0
Y[idxPos]=1
Y[idxNeg]=0
else:
idxPos = Y>=0.5
idxNeg=Y<0.5
Y[idxPos]=1
Y[idxNeg]=0
return Y
clf=MyNeuron("sigmoid")
import pandas as pd
#datos=pd.read_csv('training.csv')
#X=datos.iloc[:,[0,1]]
#Y=datos.iloc[:,2]
ceros=np.random.uniform(0,0.3,10)#vector de 10 ceros
unos= np.random.uniform(0.7,1.0,10)#vecto de 10 unos
#creo conjunto de datos
X=np.append(ceros,ceros)
X=np.append(X,unos)
X=np.append(X,unos)
X=np.append(X,ceros)
X=np.append(X,unos)
X=np.append(X,ceros)
X=np.append(X,unos)
X=X.reshape(40,2)
Y=np.zeros((30,1))
Y=np.append(Y,np.ones((10,1)))
Y=Y.reshape(40,1)
clf.training(X,Y)
prueba=pd.read_csv('test.csv')
Xt=prueba.iloc[:,[0,1]]
Yt=prueba.iloc[:,2]
Yp=[]
for i in range(0,Xt.shape[0]):
p=np.array(Xt.iloc[i,:])
Yp.append(clf.predict(p))
"""
import matplotlib.pyplot as plt
idxPos = Y ==1
idxNeg = Y ==0
Xgraf=X[idxPos]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'bo')
Xgraf=X[idxNeg]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'ro')
plt.title("Entrenamiento")
plt.show()
for i in range(0,Xt.shape[0]):
p=np.array(Xt.iloc[i,:])
Yp.append(clf.predict(p))
idxPos = Yt ==1
idxNeg = Yt ==0
Xgraf=Xt[idxPos]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'bo')
Xgraf=Xt[idxNeg]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'ro')
plt.title("Test")
plt.show()
#calculo de matriz de confusion
#convertir las predicciones en clases
Yp=clf.transformPredictions(Yp)
Yt=np.array(Yt)
Yt=Yt.reshape(Yt.shape[0],1)
a=np.sum(np.logical_and(Yp==0,Yt==0))
b=np.sum(np.logical_and(Yp==1,Yt==0))
c=np.sum(np.logical_and(Yp==0,Yt==1))
d=np.sum(np.logical_and(Yp==1,Yt==1))
"""
#cm=np.array([a,b,c,d]).reshape((2,2))
#Ypprint(cm)
#con imprimimos los resultados#
|
python
|
#!/usr/bin/env python3
import argparse
import random
import sys
from mpmath import mp
from common import print_integral_single_input
from common.randomgen import generate_basis_function
parser = argparse.ArgumentParser()
parser.add_argument("--filename", type=str, required=True, help="Output file name")
parser.add_argument("--max-am", type=int, required=True, help="Maximum AM of the basis functions")
parser.add_argument("--alpha-power", type=int, required=True, help="Maximum power of the exponent (range will be 1e-x to 1e+x)")
parser.add_argument("--xyz-power", type=int, required=True, help="Maximum power of the coordinates (range will be -1e+x to 1e+x)")
parser.add_argument("--seed", type=int, required=True, help="Seed to use for the pseudo-random number generator")
parser.add_argument("--ndigits", type=int, required=True, help="Number of digits for the value of the integral")
parser.add_argument("--ncenter", type=int, required=True, help="Number of centers in the integral (typically 2 or 4)")
parser.add_argument("--ntests", type=int, required=True, help="Number of tests to generate")
args = parser.parse_args()
random.seed(args.seed, version=2)
with open(args.filename, 'w') as f:
f.write("# THIS FILE IS GENERATED VIA A SCRIPT. DO NOT EDIT\n")
f.write("#\n")
f.write("# Input parameters for integral generated with:\n")
f.write("# " + " ".join(sys.argv[:]) + "\n")
f.write("#\n")
f.write(str(args.ntests))
f.write("\n")
for i in range(args.ntests):
entry = []
for n in range(args.ncenter):
bf = generate_basis_function(args.max_am, args.alpha_power, args.xyz_power, args.ndigits)
entry.append(bf)
print_integral_single_input(f, entry)
|
python
|
# coding: utf-8
r"""Distance conversions"""
from corelib.units.base import create_code
distances = {"mm": 1e-3, "millimeter": 1e-3, "millimeters": 1e-3, "millimetre": 1e-3, "millimetres": 1e-3,
"cm": 1e-2, "centimeter": 1e-2, "centimeters": 1e-2, "centimetre": 1e-2, "centimetres": 1e-2,
"m": 1., "meter": 1., "meters": 1., "metre": 1., "metres": 1.,
"km": 1000., "kilometer": 1000., "kilometers": 1000., "kilometre": 1000., "kilometres": 1000.,
# "in": 0.0254, # in is a reserved keyword in Python
"inch": 0.0254, "inches": 0.0254,
"ft": 0.3048, "foot": 0.3048, "feet": 0.3048,
"yd": 0.9144, "yard": 0.9144, "yards": 0.9144,
"mi": 1609.344, "mile": 1609.344, "miles": 1609.344,
"ftm": 1.8288, "fathom": 1.8288, "fathoms": 1.8288,
"nm": 1852., "nautical_mile": 1852., "nautical_miles": 1852.}
for k in distances.keys():
# code = fs_units.base.create_code("distances", k)
# exec code in module.__dict__
# g = globals()
exec(create_code("distances", k), globals())
def convert(value, to_unit, from_unit):
r"""Convenience function for cases where the to_unit and the from_unit are
in string form
Parameters
----------
value : float or int
to_unit : str
The desired unit
from_unit : str
The input unit
"""
return globals()[to_unit](**{from_unit: value})
|
python
|
kOk = 0
kNoSuchSession = 6
kNoSuchElement = 7
kNoSuchFrame = 8
kUnknownCommand = 9
kStaleElementReference = 10
kElementNotVisible = 11
kInvalidElementState = 12
kUnknownError = 13
kJavaScriptError = 17
kXPathLookupError = 19
kTimeout = 21
kNoSuchWindow = 23
kInvalidCookieDomain = 24
kUnexpectedAlertOpen = 26
kNoAlertOpen = 27
kScriptTimeout = 28
kInvalidSelector = 32
kSessionNotCreatedException = 33
# Xwalk-specific status codes.
kXwalkNotReachable = 100
kNoSuchExecutionContext = 101
kDisconnected = 102
kForbidden = 103
kTabCrashed = 104
class _DefaultMessageForStatusCode(object):
""" Returns the string equivalent of the given |ErrorCode|."""
Message = {
kOk: "ok",
kNoSuchSession: "no such session",
kNoSuchElement: "no such element",
kNoSuchFrame: "no such frame",
kUnknownCommand: "unknown command",
kStaleElementReference: "stale element reference",
kElementNotVisible: "element not visible",
kInvalidElementState: "invalid element state",
kUnknownError: "unknown error",
kJavaScriptError: "javascript error",
kXPathLookupError: "xpath lookup error",
kTimeout: "timeout",
kNoSuchWindow: "no such window",
kInvalidCookieDomain: "invalid cookie domain",
kUnexpectedAlertOpen: "unexpected alert open",
kNoAlertOpen: "no alert open",
kScriptTimeout: "asynchronous script timeout",
kInvalidSelector: "invalid selector",
kSessionNotCreatedException: "session not created exception",
kNoSuchExecutionContext: "no such execution context",
kXwalkNotReachable: "xwalk not reachable",
kDisconnected: "disconnected",
kForbidden: "forbidden",
kTabCrashed: "tab crashed",
}
class Status(object):
def __init__(self, code=kOk, details=""):
self.code = code
if type(details) == str and details:
self.msg = _DefaultMessageForStatusCode.Message[code] + ":" + details
else:
self.msg = _DefaultMessageForStatusCode.Message[code]
def Update(self, other):
self.code = other.code
self.msg = other.msg
def IsOk(self):
return self.code == kOk
def IsError(self):
return self.code != kOk
def Code(self):
return self.code
def Message(self):
return self.msg
def AddDetails(self, details):
self.msg += details
|
python
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library containing helpers for adding post export metrics for evaluation.
These post export metrics can be included in the add_post_export_metrics
parameter of Evaluate to compute them.
"""
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import metrics_for_slice_pb2 as metrics_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
# pylint: disable=protected-access
@post_export_metrics._export('fairness_indicators')
class _FairnessIndicators(post_export_metrics._ConfusionMatrixBasedMetric):
"""Metrics that can be used to evaluate the following fairness metrics.
* Demographic Parity or Equality of Outcomes.
For each slice measure the Positive* Rate, or the percentage of all
examples receiving positive scores.
* Equality of Opportunity
Equality of Opportunity attempts to match the True Positive* rate
(aka recall) of different data slices.
* Equality of Odds
In addition to looking at Equality of Opportunity, looks at equalizing the
False Positive* rates of slices as well.
The choice to focus on these metrics as a starting point is based primarily on
the paper Equality of Opportunity in Supervised Learning and the excellent
visualization created as a companion to the paper.
https://arxiv.org/abs/1610.02413
http://research.google.com/bigpicture/attacking-discrimination-in-ml/
* Note that these fairness formulations assume that a positive prediction is
associated with a positive outcome for the user--in certain contexts such as
abuse, positive predictions translate to non-opportunity. You may want to use
the provided negative rates for comparison instead.
"""
_thresholds = ... # type: List[float]
_example_weight_key = ... # type: str
_labels_key = ... # type: str
_metric_tag = None # type: str
# We could use the same keys as the ConfusionMatrix metrics, but with the way
# that post_export_metrics are currently implemented, if both
# post_export_metrics were specified we would pop the matrices/thresholds in
# the first call, and have issues with the second.
thresholds_key = metric_keys.FAIRNESS_CONFUSION_MATRIX_THESHOLDS
matrices_key = metric_keys.FAIRNESS_CONFUSION_MATRIX_MATRICES
def __init__(self,
thresholds: Optional[List[float]] = None,
example_weight_key: Optional[str] = None,
target_prediction_keys: Optional[List[str]] = None,
labels_key: Optional[str] = None,
metric_tag: Optional[str] = None,
tensor_index: Optional[int] = None) -> None:
if not thresholds:
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# Determine the number of threshold digits to display as part of the metric
# key. We want lower numbers for readability, but allow differentiation
# between close thresholds.
self._key_digits = 2
for t in thresholds:
if len(str(t)) - 2 > self._key_digits:
self._key_digits = len(str(t)) - 2
super().__init__(
thresholds,
example_weight_key,
target_prediction_keys,
labels_key,
metric_tag,
tensor_index=tensor_index)
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[str, Tuple[types.TensorType, types.TensorType]]:
values, update_ops = self.confusion_matrix_metric_ops(
features_dict, predictions_dict, labels_dict)
# True positive rate is computed by confusion_matrix_metric_ops as 'recall'.
# pytype: disable=unsupported-operands
values['tnr'] = tf.math.divide_no_nan(values['tn'],
values['tn'] + values['fp'])
values['fpr'] = tf.math.divide_no_nan(values['fp'],
values['fp'] + values['tn'])
values['positive_rate'] = tf.math.divide_no_nan(
values['tp'] + values['fp'],
values['tp'] + values['fp'] + values['tn'] + values['fn'])
values['fnr'] = tf.math.divide_no_nan(values['fn'],
values['fn'] + values['tp'])
values['negative_rate'] = tf.math.divide_no_nan(
values['tn'] + values['fn'],
values['tp'] + values['fp'] + values['tn'] + values['fn'])
values['false_discovery_rate'] = tf.math.divide_no_nan(
values['fp'], values['fp'] + values['tp'])
values['false_omission_rate'] = tf.math.divide_no_nan(
values['fn'], values['fn'] + values['tn'])
# pytype: enable=unsupported-operands
update_op = tf.group(update_ops['fn'], update_ops['tn'], update_ops['fp'],
update_ops['tp'])
value_op = tf.transpose(
a=tf.stack([
values['fn'], values['tn'], values['fp'], values['tp'],
values['precision'], values['recall']
]))
output_dict = {
self._metric_key(self.matrices_key): (value_op, update_op),
self._metric_key(self.thresholds_key): (tf.identity(self._thresholds),
tf.no_op()),
}
for i, threshold in enumerate(self._thresholds):
output_dict[self._metric_key(
metric_keys.base_key(
'positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['positive_rate'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'true_positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['recall'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'false_positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['fpr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['negative_rate'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'true_negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['tnr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'false_negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['fnr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key('false_discovery_rate@%.*f' %
(self._key_digits, threshold)))] = (
values['false_discovery_rate'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key('false_omission_rate@%.*f' %
(self._key_digits, threshold)))] = (
values['false_omission_rate'][i], update_op)
return output_dict # pytype: disable=bad-return-type
def populate_stats_and_pop(
self, unused_slice_key: slicer.SliceKeyType, combine_metrics: Dict[str,
Any],
output_metrics: Dict[str, metrics_pb2.MetricValue]) -> None:
matrices = combine_metrics.pop(self._metric_key(self.matrices_key))
thresholds = combine_metrics.pop(self._metric_key(self.thresholds_key))
# We assume that thresholds are already sorted.
if len(matrices) != len(thresholds):
raise ValueError(
'matrices should have the same length as thresholds, but lengths '
'were: matrices: %d, thresholds: %d' %
(len(matrices), len(thresholds)))
for threshold, raw_matrix in zip(thresholds, matrices):
# Adds confusion matrix table as well as ratios used for fairness metrics.
if isinstance(threshold, types.ValueWithTDistribution):
threshold = threshold.unsampled_value
output_matrix = post_export_metrics._create_confusion_matrix_proto(
raw_matrix, threshold)
(output_metrics[self._metric_key(metric_keys.FAIRNESS_CONFUSION_MATRIX)]
.confusion_matrix_at_thresholds.matrices.add().CopyFrom(output_matrix))
# If the fairness_indicator in enabled, the slicing inside the tfx evaluator
# config will also be added into this metrics as a subgroup key.
# However, handling the subgroup metrics with slices is still TBD.
@post_export_metrics._export('fairness_auc')
class _FairnessAuc(post_export_metrics._PostExportMetric):
"""Metric that computes bounded AUC for predictions in [0, 1].
This metrics calculates the subgroup auc, the background positive subgroup
negative auc and background negative subgroup positive auc. For more
explanation about the concepts of these auc metrics, please refer to paper
[Measuring and Mitigating Unintended Bias in Text
Classification](https://ai.google/research/pubs/pub46743)
"""
_target_prediction_keys = ... # type: List[str]
_labels_key = ... # type: str
_metric_tag = None # type: str
_tensor_index = ... # type: int
def __init__(self,
subgroup_key: str,
example_weight_key: Optional[str] = None,
num_buckets: int = post_export_metrics._DEFAULT_NUM_BUCKETS,
target_prediction_keys: Optional[List[str]] = None,
labels_key: Optional[str] = None,
metric_tag: Optional[str] = None,
tensor_index: Optional[int] = None) -> None:
"""Create a metric that computes fairness auc.
Predictions should be one of:
(a) a single float in [0, 1]
(b) a dict containing the LOGISTIC key
(c) a dict containing the PREDICTIONS key, where the prediction is
in [0, 1]
Label should be a single float that is either exactly 0 or exactly 1
(soft labels, i.e. labels between 0 and 1 are *not* supported).
Args:
subgroup_key: The key inside the feature column to indicate where this
example belongs to the subgroup or not. The expected mapping tensor of
this key should contain an integer/float value that's either 1 or 0.
example_weight_key: The key of the example weight column in the features
dict. If None, all predictions are given a weight of 1.0.
num_buckets: The number of buckets used for the curve. (num_buckets + 1)
is used as the num_thresholds in tf.metrics.auc().
target_prediction_keys: If provided, the prediction keys to look for in
order.
labels_key: If provided, a custom label key.
metric_tag: If provided, a custom metric tag. Only necessary to
disambiguate instances of the same metric on different predictions.
tensor_index: Optional index to specify class predictions to calculate
metrics on in the case of multi-class models.
"""
self._subgroup_key = subgroup_key
self._example_weight_key = example_weight_key
self._curve = 'ROC'
self._num_buckets = num_buckets
self._metric_name = metric_keys.FAIRNESS_AUC
self._subgroup_auc_metric = self._metric_key(self._metric_name +
'/subgroup_auc/' +
self._subgroup_key)
self._bpsn_auc_metric = self._metric_key(
f'{self._metric_name}/bpsn_auc/{self._subgroup_key}')
self._bnsp_auc_metric = self._metric_key(self._metric_name + '/bnsp_auc/' +
self._subgroup_key)
super().__init__(
target_prediction_keys=target_prediction_keys,
labels_key=labels_key,
metric_tag=metric_tag,
tensor_index=tensor_index)
def check_compatibility(self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict) -> None:
post_export_metrics._check_feature_present(features_dict,
self._example_weight_key)
post_export_metrics._check_feature_present(features_dict,
self._subgroup_key)
self._get_labels_and_predictions(predictions_dict, labels_dict)
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[str, Tuple[types.TensorType, types.TensorType]]:
# Note that we have to squeeze predictions, labels, weights so they are all
# N element vectors (otherwise some of them might be N x 1 tensors, and
# multiplying a N element vector with a N x 1 tensor uses matrix
# multiplication rather than element-wise multiplication).
predictions, labels = self._get_labels_and_predictions(
predictions_dict, labels_dict)
predictions = post_export_metrics._flatten_to_one_dim(
tf.cast(predictions, tf.float64))
labels = post_export_metrics._flatten_to_one_dim(
tf.cast(labels, tf.float64))
weights = tf.ones_like(predictions)
subgroup = post_export_metrics._flatten_to_one_dim(
tf.cast(features_dict[self._subgroup_key], tf.bool))
if self._example_weight_key:
weights = post_export_metrics._flatten_to_one_dim(
tf.cast(features_dict[self._example_weight_key], tf.float64))
predictions, labels, weights = (
post_export_metrics
._create_predictions_labels_weights_for_fractional_labels(
predictions, labels, weights))
# To let subgroup tensor match the size with prediction, labels and weights
# above.
subgroup = tf.concat([subgroup, subgroup], axis=0)
labels_bool = tf.cast(labels, tf.bool)
pos_subgroup = tf.math.logical_and(labels_bool, subgroup)
neg_subgroup = tf.math.logical_and(
tf.math.logical_not(labels_bool), subgroup)
pos_background = tf.math.logical_and(labels_bool,
tf.math.logical_not(subgroup))
neg_background = tf.math.logical_and(
tf.math.logical_not(labels_bool), tf.math.logical_not(subgroup))
bnsp = tf.math.logical_or(pos_subgroup, neg_background)
bpsn = tf.math.logical_or(neg_subgroup, pos_background)
ops_dict = {}
# Add subgroup auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._subgroup_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(subgroup, tf.float64)),
self._num_buckets + 1, self._curve))
# Add backgroup positive subgroup negative auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._bpsn_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(bpsn, tf.float64)),
self._num_buckets + 1, self._curve))
# Add backgroup negative subgroup positive auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._bnsp_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(bnsp, tf.float64)),
self._num_buckets + 1, self._curve))
return ops_dict
def populate_stats_and_pop(
self, slice_key: slicer.SliceKeyType, combine_metrics: Dict[str, Any],
output_metrics: Dict[str, metrics_pb2.MetricValue]) -> None:
for metrics_key in (self._subgroup_auc_metric, self._bpsn_auc_metric,
self._bnsp_auc_metric):
if slice_key:
combine_metrics.pop(metric_keys.lower_bound_key(metrics_key))
combine_metrics.pop(metric_keys.upper_bound_key(metrics_key))
combine_metrics.pop(metrics_key)
else:
post_export_metrics._populate_to_auc_bounded_value_and_pop(
combine_metrics, output_metrics, metrics_key)
# pylint: enable=protected-access
|
python
|
# terrascript/arukas/d.py
|
python
|
# Simple NTP daemon for MicroPython using asyncio.
# Copyright (c) 2020 by Thorsten von Eicken
# Based on https://github.com/wieck/micropython-ntpclient by Jan Wieck
# See LICENSE file
try:
import uasyncio as asyncio
from sys import print_exception
except ImportError:
import asyncio
import sys, socket, struct, time, logging
try:
from time import time_us, settime, adjtime
except ImportError:
# (date(2000, 1, 1) - date(1970, 1, 1)).days * 24*60*60
UNIX_DELTA = 946684800
def time_us():
return int((time.time() - UNIX_DELTA) * 1000000)
def settime(usecs):
print("settime(%d) - a step of %d" % (usecs, time_us() - (usecs + UNIX_DELTA)))
def adjtime(usecs):
print("adjtime(%d) - an adjustment of %d" % (usecs, time_us() - (usecs + UNIX_DELTA)))
from asyncio_dgram import connect as dgram_connect
log = logging.getLogger(__name__)
# (date(2000, 1, 1) - date(1900, 1, 1)).days * 24*60*60
NTP_DELTA = 3155673600
# Delta from MP Epoch of 2000/1/1 to NTP Epoch 1 of Feb 7, 2036 06:28:16 UTC
# NTP_DELTA = 1139293696
# Offsets into the NTP packet
OFF_ORIG = 24
OFF_RX = 32
OFF_TX = 40
# Poll and adjust intervals
MIN_POLL = 64 # never poll faster than every 32 seconds
MAX_POLL = 1024 # default maximum poll interval
# ntp2mp converts from NTP seconds+fraction with an Epoch 1 of Feb 7, 2036 06:28:16 UTC
# to MP microseconds with an Epoch of 2000/1/1
def ntp2mp(secs, frac):
usec = (frac * 1000000) >> 32
# print(secs, frac, "->", secs - NTP_DELTA, (secs - NTP_DELTA) * 1000000, usec)
return ((secs - NTP_DELTA) * 1000000) + usec
# mp2ntp converts from MP microseconds to NTP seconds and frac
def mp2ntp(usecs):
(secs, usecs) = divmod(usecs, 1000000)
return (secs + NTP_DELTA, (usecs << 32) // 1000000)
# ntpclient -
# Class implementing the uasyncio based NTP client
class SNTP:
def __init__(self, host="pool.ntp.org", poll=MAX_POLL, max_step=1):
self._host = host
self._sock = None
self._addr = None
self._send = None
self._recv = None
self._close = None
self._req_poll = poll
self._min_poll = MIN_POLL
self._max_step = int(max_step * 1000000)
self._poll_task = None
def start(self):
self._poll_task = asyncio.get_event_loop().create_task(self._poller())
async def stop(self):
if self._poll_task is not None:
self._poll_task.cancel()
try:
await self._poll_task
except:
pass
self._close()
self._poll_task = None
async def _poll(self):
# We try to stay with the same server as long as possible. Only
# lookup the address on startup or after errors.
if self._sock is None:
self._addr = socket.getaddrinfo(self._host, 123)[0][-1]
log.debug("server %s->%s", self._host, self._addr)
if sys.implementation.name == "micropython":
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.connect(self._addr)
stream = asyncio.StreamReader(self._sock)
async def write_drain(pkt):
stream.write(pkt)
await stream.drain()
self._send = write_drain
self._recv = lambda length: stream.read(length)
self._close = lambda: self._sock.close()
else:
stream = await dgram_connect(self._addr)
async def stream_send(pkt):
return await stream.send(pkt)
self._send = stream_send
async def stream_recv(length):
return (await stream.recv())[0]
self._recv = stream_recv
self._close = lambda: stream.close()
# Send the NTP v3 request to the server
wbuf = bytearray(48)
wbuf[0] = 0b00011011
send_us = time_us()
send_ntp = mp2ntp(send_us)
struct.pack_into("!II", wbuf, OFF_TX, send_ntp[0], send_ntp[1]) # set tx timestamp
await self._send(wbuf)
# Get server reply
while True:
# Raises asyncio.TimeoutError on time-out
rbuf = await asyncio.wait_for(self._recv(48), timeout=1)
recv_us = time_us()
# Verify it's truly a response to our request
orig_ntp = struct.unpack_from("!II", rbuf, OFF_ORIG) # get originate timestamp
if orig_ntp == send_ntp:
break
# Calculate clock step to apply per RFC4330
rx_us = ntp2mp(*struct.unpack_from("!II", rbuf, OFF_RX)) # get server recv timestamp
tx_us = ntp2mp(*struct.unpack_from("!II", rbuf, OFF_TX)) # get server transmit timestamp
delay = (recv_us - send_us) - (tx_us - rx_us)
step = ((rx_us - send_us) + (tx_us - recv_us)) // 2
tup = struct.unpack_from("!IIIIII", rbuf, OFF_ORIG)
r = mp2ntp(recv_us)
# log.debug( "orig=[%d,%x] rx=[%d,%x] tx=[%d,%x] recv=[%d,%x] -> delay=%fms step=%dus",
# tup[0], tup[1], tup[2], tup[3], tup[4], tup[5], r[0], r[1], delay / 1000, step)
return (delay, step)
async def _poller(self):
self._status = 0
while True:
# print("\nperforming NTP query")
try:
self.status = (self._status << 1) & 0xFFFF
(delay_us, step_us) = await self._poll()
if step_us > self._max_step or -step_us > self._max_step:
# print(time.localtime())
(tgt_s, tgt_us) = divmod(time.time_us() + step_us, 1000000)
log.warning("stepping to %s", time.localtime(tgt_s))
settime(tgt_s, tgt_us)
# print(time.localtime())
else:
lvl = logging.DEBUG if abs(step_us) < 10000 else logging.INFO
log.log(lvl, "adjusting by %dus (delay=%dus)", step_us, delay_us)
adjtime(step_us)
self.status |= 1
await asyncio.sleep(61)
except asyncio.TimeoutError:
log.warning("%s timed out", self._host)
if (self._status & 0x7) == 0:
# Three failures in a row, force fresh DNS look-up
self.sock = None
await asyncio.sleep(11)
except OSError as e:
# Most likely DNS lookup failure
log.warning("%s: %s", self._host, e)
self.sock = None
await asyncio.sleep(11)
except Exception as e:
log.error("%s", e)
print_exception(e)
await asyncio.sleep(121)
def start(mqtt, config):
from utime import tzset
tzset(config.pop("zone", "UTC+0"))
async def on_init(config):
ss = SNTP(**config)
ss.start()
mqtt.on_init(on_init(config))
# if __name__ == "__main__":
#
# logging.basicConfig(level=logging.DEBUG)
#
# async def runner():
# ss = SNTP(host="192.168.0.1")
# ss.start()
# while True:
# await asyncio.sleep(300)
#
# asyncio.run(runner())
|
python
|
import re
import random
TOOBIG = -1
TOOSMALL = -2
NOTNEW = -3
EMPTY = -1
class NameJoiner:
def __init__(self, str1, str2):
words = [str1, str2]
random.shuffle(words)
self.fullStartName = words[0]
self.fullEndName = words[1]
self.initVariables()
def initVariables(self):
self.lower_limit = min(len(self.fullStartName), len(self.fullEndName))
self.upper_limit = max(len(self.fullStartName), len(self.fullEndName)) + self.lower_limit - 1
self.firstPositions = self.getKeyVocalsPositions(self.fullStartName)
self.secondPositions = self.getKeyVocalsPositions(self.fullEndName)
def join(self):
res = self.tryToJoin()
if res == -1:
self.fullStartName, self.fullEndName = self.fullEndName, self.fullStartName
self.initVariables()
res = self.tryToJoin()
if res == -1:
self.initVariables()
return self.fullStartName+self.fullEndName[self.secondPositions[-1]+1:]
return res
def tryToJoin(self):
firstSplitPlace = self.chooseRandomFirstSplit()
secondSplitPlace = NOTNEW
while secondSplitPlace < 0:
secondSplitPlace = self.chooseRandomSecondSplit(firstSplitPlace)
if secondSplitPlace < 0:
self.handleErrorWithFirstPlace(
secondSplitPlace, firstSplitPlace)
firstSplitPlace = self.chooseRandomFirstSplit()
if firstSplitPlace == EMPTY:
return EMPTY
else:
namex = self.fullStartName[:firstSplitPlace] + \
self.fullEndName[secondSplitPlace:]
if namex in [self.fullStartName, self.fullEndName]:
self.secondPositions = [
i for i in self.secondPositions if i != secondSplitPlace-1]
if len(self.secondPositions) == 0:
self.secondPositions = self.getKeyVocalsPositions(
self.fullEndName)
self.firstPositions = self.erasePlaceEq(
firstSplitPlace)
firstSplitPlace = self.chooseRandomFirstSplit()
if firstSplitPlace == EMPTY:
return EMPTY
secondSplitPlace = NOTNEW
return self.fullStartName[:firstSplitPlace] + self.fullEndName[secondSplitPlace:]
def handleErrorWithFirstPlace(self, error, firstSplitPlace):
if error == TOOBIG: # Need smaller first part
self.firstPositions = self.erasePlacesGreaterEq(firstSplitPlace)
elif error == TOOSMALL: # Need greater first part
self.firstPositions = self.erasePlacesLowerEq(firstSplitPlace)
def erasePlaceEq(self, position):
p = position - 1
res = [i for i in self.firstPositions if i != p]
return res
def erasePlacesLowerEq(self, position):
p = position - 1
res = [i for i in self.firstPositions if i > p]
return res
def erasePlacesGreaterEq(self, position):
p = position - 1
res = [i for i in self.firstPositions if i < p]
return res
def chooseRandomFirstSplit(self):
if len(self.firstPositions) == 0:
print(
f"{self.fullStartName} has been omitted while trying to join with {self.fullEndName}")
return -1
pos = random.choice(self.firstPositions) + 1
return pos
def getKeyVocalsPositions(self, s):
regex_iter = re.finditer(r'[aeiouy][^aeiou]', s.lower())
positions = [i.start() for i in regex_iter]
return positions
def chooseRandomSecondSplit(self, firstSplitPlace):
minimumCharactersLeft = self.lower_limit - firstSplitPlace
maximumCharactersLeft = self.upper_limit - firstSplitPlace
minimumIndex = len(self.fullEndName) - maximumCharactersLeft
maximumIndex = len(self.fullEndName) - minimumCharactersLeft
filtered_big_positions = [
i for i in self.secondPositions if i <= maximumIndex]
if len(filtered_big_positions) == 0:
return -2
filtered_positions = [
i for i in self.secondPositions if minimumIndex <= i + 1 <= maximumIndex]
if len(filtered_positions) == 0:
return -1
return random.choice(filtered_positions) + 1
|
python
|
import sys
from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QTextBrowser, QMainWindow, QTextEdit
from PySide2.QtCore import QFile, Slot
from ui_mainwindow import Ui_MainWindow
class MainWindow(QMainWindow):
def __init__(self, filename : str):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.filename = filename
self.getChildren()
self.rechercher_button.clicked.connect(self.check_if_in_table)
self.inserer_button.clicked.connect(self.add_in_table)
self.supprimer_button.clicked.connect(self.delete_from_table)
self.output_field.setReadOnly(True)
self.output_field.append("Messages :\n")
self.table_visualizer.setReadOnly(True)
self.visualize_table()
self.show()
def getChildren(self):
self.central_widget = self.findChild(QWidget, "centralwidget")
self.inserer_button = self.central_widget.findChild(QPushButton,"inserer_button")
self.rechercher_button = self.central_widget.findChild(QPushButton,"rechercher_button")
self.supprimer_button = self.central_widget.findChild(QPushButton,"supprimer_button")
self.output_field = self.central_widget.findChild(QTextEdit,"output_field")
self.table_visualizer = self.central_widget.findChild(QTextEdit,"table_visualizer")
self.corps_text_input = self.central_widget.findChild(QLineEdit,"corps_text_input")
self.tete_text_input = self.central_widget.findChild(QLineEdit,"tete_text_input")
self.antenne_text_input = self.central_widget.findChild(QLineEdit,"antenne_text_input")
self.bras_text_input = self.central_widget.findChild(QLineEdit,"bras_text_input")
self.jambes_text_input = self.central_widget.findChild(QLineEdit,"jambes_text_input")
self.doigts_text_input = self.central_widget.findChild(QLineEdit,"doigts_text_input")
self.yeux_text_input = self.central_widget.findChild(QLineEdit,"yeux_text_input")
def visualize_table(self):
self.table_visualizer.setText("Ficher texte \"{}\" :\n".format(filename))
with open(self.filename, "r") as table:
for line in table.readlines():
entry_as_list = line.replace("\n", '').split(",")[:-1]
self.table_visualizer.append("{}".format(entry_as_list))
def _get_input_list(self) -> list:
input_list = []
input_list.append(self.corps_text_input.text())
input_list.append(self.tete_text_input.text())
input_list.append(self.antenne_text_input.text())
input_list.append(self.bras_text_input.text())
input_list.append(self.jambes_text_input.text())
input_list.append(self.doigts_text_input.text())
input_list.append(self.yeux_text_input.text())
new_list = []
for val in input_list:
if(val == ''):
new_list.append('0')
else:
new_list.append(val)
return new_list
def check_if_in_table(self, input_list : list = None, standalone : bool = True) -> bool:
"""
Function that checks whether a given input_list is already in the table.
Can be called either by itself, in this case the boolean outputdoesn't matter but an
output is produced on the GUI.
Or it is called by the methods add_to_table() or _delete_from_table() where the boolean
value is used but there is no output on the GUI.
"""
print("In function check_if_in_table()")
if(not input_list):
input_list = self._get_input_list()
print("Input list is {}".format(input_list))
is_numeric = self._is_input_numeric(input_list)
if(not is_numeric and standalone):
print("ERROR, INPUT IS NOT NUMERIC")
self.output_field.append("{} n'est pas valide. Reessayez (chiffres uniquement).".format(input_list))
else:
with open(self.filename, "r") as table:
print("Opening file")
for line in table.readlines():
entry_as_list = line.replace("\n", '').split(",")[:-1]
if(input_list == entry_as_list):
print("{} is present in table".format(input_list))
if(standalone): # Write only when check_if_in_table is called by itself
self.output_field.append("{} est déjà présent dans la table".format(input_list))
return True
if(standalone):
self.output_field.append("{} n'est pas présent dans la table".format(input_list))
return False
def add_in_table(self):
print("In function add_in_table()")
input_list = self._get_input_list()
print("Input list is {}".format(input_list))
if(self.check_if_in_table(input_list, False)):
print("{} already in table. Aborting...".format(input_list))
self.output_field.append("{} est déjà présent dans la table. Abandon...".format(input_list))
else:
with open(self.filename, "a") as table:
status = self._add_one_line(table, input_list)
if(status):
self.output_field.append("{} inséré.".format(input_list))
self.visualize_table()
def _is_input_numeric(self, input_list) -> bool:
write_line = True
# Check if everything is correct with the input list
for input in input_list:
write_line = write_line and input.isnumeric()
return write_line
def _add_one_line(self, file, input_list) -> bool:
# Check if everything is correct with the input list
is_numeric = self._is_input_numeric(input_list)
# If correct, write to file
if(is_numeric):
for input in input_list:
file.write("{},".format(int(input)))
file.write("\n")
print("Inserted {}".format(input_list))
# Else, print error
else:
print("ERROR, INPUT IS NOT NUMERIC")
self.output_field.append("{} n'est pas valide. Reessayez (chiffres uniquement).".format(input_list))
return is_numeric # Report status
def delete_from_table(self):
print("In function delete_from_table()")
input_list = self._get_input_list()
if(not self._is_input_numeric(input_list)):
print("ERROR, INPUT IS NOT NUMERIC")
self.output_field.append("{} n'est pas valide. Reessayez (chiffres uniquement).".format(input_list))
return
all_entries = []
if(self.check_if_in_table(input_list, False)):
print("Deleting {}".format(input_list))
self.output_field.append("{} supprimé.".format(input_list))
# Reading all line from file and converting them to a list
with open(self.filename, "r") as table:
for line in table.readlines():
all_entries.append(line.replace("\n", '').split(",")[:-1])
print(line)
# Re-opening the same file and writing back every item except
# the input_list that we want to delete
with open(self.filename, "w") as table:
for entry in all_entries:
if(entry != input_list):
print("Writing {} to file".format(entry))
self._add_one_line(table, entry)
self.visualize_table()
else:
print("{} not in file".format(input_list))
self.output_field.append("{} n'est PAS présent dans la table. Abandon...".format(input_list))
if __name__ == "__main__":
app = QApplication(sys.argv)
# filename = sys.argv[1]
filename = 'tables.txt'
window = MainWindow(filename)
sys.exit(app.exec_())
|
python
|
#! usr/bin/activate
"""AstroThings library: imagination and Universe."""
def main():
pass
if __name__ == '__main__':
main()
__version__ = '0.1.0.dev1'
|
python
|
from unittest import TestCase
from tilapia.lib.provider.chains.btc.sdk import transaction
class TestTransaction(TestCase):
def test_calculate_vsize(self):
self.assertEqual(79, transaction.calculate_vsize(["P2WPKH"], []))
self.assertEqual(176, transaction.calculate_vsize(["P2WPKH"], ["P2WPKH", "P2PKH", "P2WPKH-P2SH"]))
self.assertEqual(255, transaction.calculate_vsize(["P2PKH"], ["P2WPKH", "P2PKH", "P2WPKH-P2SH"]))
self.assertEqual(199, transaction.calculate_vsize(["P2WPKH-P2SH"], ["P2WPKH", "P2PKH", "P2WPKH-P2SH"]))
self.assertEqual(246, transaction.calculate_vsize([], [], op_return="a" * 200))
|
python
|
from applauncher.event import EventManager, KernelReadyEvent, ConfigurationReadyEvent
class TestClass:
def test_events(self):
em = EventManager()
class KernelCounter:
c = 0
@staticmethod
def inc(event):
KernelCounter.c += 1
@staticmethod
def dec(event):
KernelCounter.c -= 1
class OtherCounter:
c = 0
@staticmethod
def inc(event):
OtherCounter.c += 1
@staticmethod
def dec(event):
OtherCounter.c -= 1
assert KernelCounter.c == 0
assert OtherCounter.c == 0
em.add_listener(KernelReadyEvent, KernelCounter.inc)
em.add_listener(ConfigurationReadyEvent, OtherCounter.inc)
assert KernelCounter.c == 0
em.dispatch(KernelReadyEvent())
assert KernelCounter.c == 1
assert OtherCounter.c == 0
em.dispatch(KernelReadyEvent())
assert KernelCounter.c == 2
assert OtherCounter.c == 0
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert KernelCounter.c == 2
assert OtherCounter.c == 1
def test_event_content(self):
em = EventManager()
class OtherCounter:
config = None
@staticmethod
def event(event):
OtherCounter.config = event.configuration
assert OtherCounter.config is None
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config is None
em.add_listener(ConfigurationReadyEvent, OtherCounter.event)
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config == {"config": "config"}
def test_string_event(self):
"""The same as above but using a string value instead of the event"""
em = EventManager()
class OtherCounter:
config = None
@staticmethod
def event(event):
OtherCounter.config = event.configuration
assert OtherCounter.config is None
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config is None
em.add_listener(ConfigurationReadyEvent.event_name, OtherCounter.event)
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config == {"config": "config"}
|
python
|
from flask_wtf import FlaskForm
from wtforms import BooleanField, SelectField, IntegerField
from wtforms.validators import Required, Optional
from .vcconnect import get_main_area_dropdown, get_service_dropdown, get_client_group_dropdown
class OrgSearchForm(FlaskForm):
main_area_id = SelectField('Area',
coerce=int,
choices=get_main_area_dropdown(),
validators=[Optional()])
service_id = SelectField('Service',
coerce=int,
choices=get_service_dropdown(),
validators=[Optional()])
client_group_id = SelectField('Client Group',
coerce=int,
choices=get_client_group_dropdown(),
validators=[Optional()])
class VenueSearchForm(FlaskForm):
area_id = SelectField('Area',
coerce=int,
choices=get_main_area_dropdown(),
validators=[Optional()])
venue_car_parking = BooleanField('Car Parking')
disabled = BooleanField('Disabled Parking')
catering = BooleanField('Catering')
event_management = BooleanField('Event Management')
hearing = BooleanField('Hearing Loop')
photocopy = BooleanField('Photocopying')
refreshments = BooleanField('Refreshments')
wheelchair = BooleanField('Wheelchair Access')
max_capacity = IntegerField('Room Capacity', validators=[Optional()])
|
python
|
from airflow.hooks.base_hook import BaseHook
def get_conn(conn_id):
# get connection by name from BaseHook
conn = BaseHook.get_connection(conn_id)
return conn
|
python
|
import sys
import torch.nn as nn
from net6c import ClusterNet6c, ClusterNet6cTrunk
from vgg import VGGNet
__all__ = ["ClusterNet6cTwoHead"]
class ClusterNet6cTwoHeadHead(nn.Module):
def __init__(self, config, output_k, semisup=False):
super(ClusterNet6cTwoHeadHead, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.cfg = ClusterNet6c.cfg
num_features = self.cfg[-1][0]
self.semisup = semisup
# Features are downsampled three times by MaxPool layers, size is halved every time, so each dimension is
# effectively divided by 8
if config.input_sz == [24, 24]:
features_sp_size = [3, 3]
elif config.input_sz == [64, 64]:
features_sp_size = [8, 8]
elif config.input_sz == [64, 216]:
features_sp_size = [8, 27]
else:
raise NotImplementedError("input images have to be of size 24x24, 64x64 or 64x216")
if not semisup:
self.num_subheads = config.num_subheads
# is default (used for iid loss)
# use multi heads
# include softmax
self.heads = nn.ModuleList([
nn.Sequential(
nn.Linear(num_features * features_sp_size[0] * features_sp_size[1], output_k),
nn.Softmax(dim=1)
)
for _ in xrange(self.num_subheads)
])
else:
self.head = nn.Linear(num_features * features_sp_size[0] * features_sp_size[1], output_k)
def forward(self, x, kmeans_use_features=False):
if not self.semisup:
results = []
for i in xrange(self.num_subheads):
if kmeans_use_features:
results.append(x) # duplicates
else:
results.append(self.heads[i](x))
return results
else:
return self.head(x)
class ClusterNet6cTwoHead(VGGNet):
cfg = [(64, 1), ('M', None), (128, 1), ('M', None), (256, 1), ('M', None), (512, 1)]
def __init__(self, config):
super(ClusterNet6cTwoHead, self).__init__()
assert len(config.output_ks) == 2
self.batchnorm_track = config.batchnorm_track
self.trunk = ClusterNet6cTrunk(config)
self.head_A = ClusterNet6cTwoHeadHead(config, output_k=config.output_ks[0])
semisup = (hasattr(config, "semisup") and config.semisup)
print("semisup: %s" % semisup)
self.head_B = ClusterNet6cTwoHeadHead(config, output_k=config.output_ks[1], semisup=semisup)
self._initialize_weights()
def forward(self, x, head_idx=1, kmeans_use_features=False, trunk_features=False, penultimate_features=False):
if penultimate_features:
print("Not needed/implemented for this arch")
exit(1)
# default is index 1 (for head B) for use by eval code
# training script switches between A (index 0) and B
x = self.trunk(x)
if trunk_features: # for semisup
return x
# returns list or single
if head_idx == 0:
x = self.head_A(x, kmeans_use_features=kmeans_use_features)
elif head_idx == 1:
x = self.head_B(x, kmeans_use_features=kmeans_use_features)
else:
assert False, "Index too high for TwoHead architecture"
return x
|
python
|
import webbrowser
import click
from ghutil.types import Repository
@click.command()
@Repository.argument('repo')
def cli(repo):
""" Open a repository in a web browser """
webbrowser.open_new(repo.data["html_url"])
|
python
|
from __future__ import print_function
import os
import json
from typtop.dbaccess import (
UserTypoDB, get_time, on_wrong_password,
on_correct_password, logT, auxT,
FREQ_COUNTS, INDEX_J, WAITLIST_SIZE,
WAIT_LIST, pkdecrypt,
NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN,
logT, auxT, call_check
)
import typtop.config as config
import typtop.dbaccess as dbaccess
import yaml
import pytest
import time
import pwd
dbaccess.WARM_UP_CACHE = False
NN = 5
secretAuxSysT = "SecretAuxData"
ORIG_PW_ID = 'OrgPwID'
dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN = 30
dbaccess.WARM_UP_CACHE = 0
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
monkeypatch.setattr("typtop.config.TEST", True)
def get_username():
user = pwd.getpwuid(os.getuid()).pw_name
return user
def DB_path():
# TODO _ for some reason it does't work
user = get_username()
db = UserTypoDB(user, debug_mode=True)
return db.get_db_path()
#return "/home/{}/{}.db".format(get_username(), DB_NAME)
def remove_DB():
print(DB_path())
os.remove(DB_path())
def start_DB():
remove_DB()
db = UserTypoDB(get_username(), debug_mode=True)
db.init_typtop(get_pw(), allow_typo_login=True)
return db
def test_warmed_cache():
t1, dbaccess.WARM_UP_CACHE = dbaccess.WARM_UP_CACHE, 1
t2, dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN = dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN, 0
typoDB = start_DB()
assert typoDB.check(pws[1]), pws[1]
assert typoDB.check(pws[0]), pws[0]
dbaccess.WARM_UP_CACHE, dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN = t1, t2
def count_real_typos_in_cache(t_db, PW_CHANGE=False):
flist_ctx = t_db.get_from_auxtdb(FREQ_COUNTS) # , yaml.load)
f_list_all = json.loads(pkdecrypt(t_db._sk, flist_ctx))
f_list = [f for f in f_list_all if f>0]
return len(f_list), sum(f_list)
def test_login_settings():
typoDB = start_DB()
#db = typoDB.getdb()
assert typoDB.is_allowed_login()
typoDB.allow_login(allow=False)
assert not typoDB.is_allowed_login()
typoDB.allow_login()
assert typoDB.is_allowed_login()
@pytest.mark.skip(reason='Root is allowed now')
def test_root_login():
with pytest.raises(AssertionError):
db = UserTypoDB('root', debug_mode=True)
def test_db_not_readable():
import stat
db = start_DB()
on_correct_password(db, get_pw())
on_wrong_password(db, get_pw()+'1')
s = os.stat(db.get_db_path()).st_mode
assert not ((stat.S_IROTH | stat.S_IWOTH) & s)
remove_DB()
def test_waitlist(isStandAlone=True):
typoDB = start_DB()
pwset = set(pws[:4])
for i in range(4):
typoDB.check(pws[i])
typos_in_waitlist = set()
install_id = typoDB.get_installation_id()
for typo_ctx in typoDB.get_from_auxtdb(WAIT_LIST):
typo_txt = pkdecrypt(typoDB._sk, typo_ctx)
typo, ts = yaml.safe_load(typo_txt)
if not typo.startswith(install_id):
typos_in_waitlist.add(typo)
assert not (typos_in_waitlist - pwset) and not (pwset - typos_in_waitlist)
def test_unintialized_exceptions():
db = UserTypoDB(get_username(), debug_mode=True)
assert not call_check(0, get_username(), get_pw())
assert call_check(1, get_username(), get_pw())
db.init_typtop(get_pw())
assert call_check(0, get_username(), get_pw()) == 0
assert call_check(0, get_username(), pws[1]) == 0
assert call_check(0, get_username(), get_pw()) == 0
def test_typtop_id():
db = start_DB()
oid = db.get_installation_id()
db.reinit_typtop(pws[0])
nid = db.get_installation_id()
assert oid == nid
def test_add_to_cache(isStandAlone=True):
typoDB = start_DB()
indexj = typoDB.get_from_auxtdb(INDEX_J) # , int)
typoDB.check(pws[0])
typoDB.check(pws[0])
typoDB.check(pws[1])
typoDB.check(pws[5])
typoDB.check(pws[2])
assert (typoDB.get_from_auxtdb(INDEX_J) - indexj) % WAITLIST_SIZE == 5
typoDB.check(get_pw())
# ntypo, fcount = count_real_typos_in_cache(typoDB)
# assert ntypo == 3
# assert fcount > 5
# No idea what the followig is doing.
# sk_dict1, isIn_t1 = typoDB.fetch_from_cache(pws[0], False, False)
# t1_h,_ = sk_dict1.popitem()
# assert isIn_t1
# assert hash_t.count(H_typo=t1_h) == 1
# assert
# assert hash_t.count(H_typo=t5_h) == 1
if isStandAlone:
remove_DB()
else:
return typoDB
def test_alt_typo(isStandAlone = True):
typoDB = test_add_to_cache(False)
# assert count_real_typos_in_cache(typoDB) > 0
for _ in xrange(30):
typoDB.check_login_count(update=True)
for _ in range(5):
typoDB.check(pws[4])
## print("added 5 typos to waitlist")
assert typoDB.check(get_pw())
assert typoDB.check(pws[4])
if isStandAlone:
remove_DB()
else:
return typoDB
def test_many_entries(isStandAlone = True):
print("TEST MANY ENTRIES")
BIG = 60
config.WARM_UP_CACHE = True
typoDB = start_DB()
log_t = typoDB.getdb('Log')
assert all(a['ts'] == -1 for a in log_t)
assert len(log_t)> 0 and len(log_t) <= len(config.warm_up_with(get_pw()))
print("start log:{}".format(len(log_t)))
for typ in listOfOneDist(BIG):
typoDB.check(typ)
typoDB.check(get_pw())
print("log len:{}".format(len(log_t)))
# print("hash len:{}".format(count_real_typos_in_cache(typoDB)))
assert(len(log_t) >= WAITLIST_SIZE + 1) # plus the original password
# realIn = min(BIG, NN)
# tcnt, fcnt = count_real_typos_in_cache(typoDB)
config.WARM_UP_CACHE = False
if isStandAlone:
remove_DB()
else:
return typoDB
def test_deleting_logs(isStandAlone = True):
typoDB = start_DB()
insert = 10
for i in range(10):
typoDB.check(pws[i%len(pws)])
typoDB.check(get_pw())
log_t = typoDB.getdb('Log')
assert len(log_t) >= 11 # because that's the length of the log so far
to_send, log_iter = typoDB.get_last_unsent_logs_iter()
assert not to_send
typoDB.update_last_log_sent_time('0')
to_send,log_iter = typoDB.get_last_unsent_logs_iter()
count = len(list(log_iter))
now = get_time()
typoDB.update_last_log_sent_time(now)
typoDB.update_last_log_sent_time(now,delete_old_logs=True)
assert len(log_t) == 0
if isStandAlone:
remove_DB()
else:
return typoDB
def test_pw_change(isStandAlone = True):
typoDB = test_alt_typo(isStandAlone = False)
db = typoDB._db
typoDB.reinit_typtop(new_pw())
# assert count_real_typos_in_cache(typoDB,True)[0] == 1
# assert len(db[logT]) == 0
assert len(db[auxT][WAIT_LIST]) == WAITLIST_SIZE
for newTypo in listOfOneDist(5):
typoDB.check(newTypo)
typoDB.check(new_pw())
# ntypo, ftypo = count_real_typos_in_cache(typoDB, True)
# assert ntypo == 1
for newTypo in listOfOneDist(5):
typoDB.check(newTypo)
assert not typoDB.check(get_pw())
if isStandAlone:
remove_DB()
else:
return typoDB
def test_edit_dist_entropy_cap(is_stand_alone=True):
typodb = start_DB()
typodb.allow_login()
on_correct_password(typodb, get_pw())
on_wrong_password(typodb, '')
on_wrong_password(typodb, ' ')
log = typodb._db[logT]
assert all((l['edit_dist'] <= 5 for l in log))
assert all((-10 <= l['rel_entropy'] <= 10 for l in log))
if is_stand_alone:
remove_DB()
else:
return typodb
# TODO: assert some property of logT
def test_logT(is_stand_alone=True):
typoDB = start_DB()
typoDB.allow_login()
assert typoDB.is_allowed_login()
assert not on_wrong_password(typoDB, pws[0])
assert on_correct_password(typoDB, get_pw()) # 1
assert not on_wrong_password(typoDB, pws[0]) # not enough login count
for _ in range(dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN-1):
# on_wrong_password(typoDB, pws[0]) # not enough login count
assert typoDB.check(get_pw())
# on_correct_password(typoDB, get_pw())
assert on_wrong_password(typoDB, pws[0]) # now it should work
typoDB.allow_login(allow=False)
assert not on_wrong_password(typoDB, pws[0]) # now it should work
assert on_correct_password(typoDB, get_pw())
typoDB.allow_login(allow=True)
assert on_wrong_password(typoDB, pws[0])
assert set(typoDB._db[logT][0].keys()) == set(config.logT_cols)
if is_stand_alone:
remove_DB()
else:
return typoDB
# TODO: assert some property of logT
# this test takes a bit longer
def test_disabling_first_30_times(isStandAlone = True):
# checks that entry with a typo is allowed
# only after the real pw was entered more than 30 times
typoDB = start_DB()
assert not on_wrong_password(typoDB, pws[0])
assert not on_wrong_password(typoDB, pws[1])
assert on_correct_password(typoDB, get_pw())
# count = 1
# 29 left
for i in xrange(29):
print("{}th try".format(i))
assert not on_wrong_password(typoDB, pws[0])
assert not on_wrong_password(typoDB, pws[1])
assert on_correct_password(typoDB, get_pw())
# 30 entries have been done
assert on_wrong_password(typoDB,pws[0])
assert on_wrong_password(typoDB,pws[1])
if isStandAlone:
remove_DB()
else:
return typoDB
def add_pw(pw, correct=False):
db = UserTypoDB(get_username(), debug_mode=False)
if correct:
on_correct_password(db, pw)
else:
on_wrong_password(db, pw)
def test_profile():
typoDB = start_DB()
time_to_add, time_to_delete = 0, 0
for t in xrange(10):
t0 = time.time()
for i in xrange(10):
add_pw(pws[i%len(pws)], correct=False)
time_to_add += (time.time() - t0)/(i+1)
t0 = time.time()
add_pw(get_pw(), correct=True)
time_to_delete += time.time() - t0
time_to_delete /= (t+1)
time_to_add /= (t+1)
assert time_to_add < 0.06 and time_to_delete < 0.07
remove_DB()
def get_pw():
return 'GoldApp&3'
def new_pw():
return "Beetle*Juice94"
pws = [
'goldApp&3', # 0, lower initial
'gOLDaPP&3', # 1, caps
'GoldApp3', # 2, dropped 1 char, too low entropy
'GoldApp&2', # 3, 1 edit distance
'GoldApp&35', # 4, 1 edit distance
'G0ldAppp&3' # 5, 2 edit dist
]
def listOfOneDist(length):
# using only lower letters
# to avoid shift --> 2 edit dist
m = ord('a')
M = ord('z') + 1 - m
for ii in range(length):
col = ii/M + 1
newC = chr(ii%M + m)
typo = get_pw()[:col]+newC+get_pw()[col:]
yield typo
# profile()
# pytest.main([__file__])
|
python
|
#!/usr/bin/env python
import argparse
import os
import sys
from functools import partial
from glob import iglob as glob
from itertools import chain
from dautil.IO import makedirs
from dautil.util import map_parallel
PY2 = sys.version_info[0] == 2
if PY2:
import cPickle as pickle
else:
import pickle
__version__ = '0.1'
def convert(basedir, output, in_path, protocol=2):
out_path = in_path.replace(basedir, output, 1)
if os.path.isfile(out_path):
print('{} existed, skip.'.format(out_path))
return
makedirs(os.path.dirname(out_path))
with open(in_path, 'rb') as f:
data = pickle.load(f) if PY2 else pickle.load(f, encoding='latin1')
with open(out_path, 'wb') as f:
pickle.dump(data, f, protocol=2)
return
def main(args):
_glob = partial(glob, recursive=True) if args.recursive else glob
in_paths = chain(*(_glob(os.path.join(args.basedir, glob_i))
for glob_i in args.glob))
_convert = partial(convert, args.basedir, args.output, protocol=args.protocol)
Nones = map_parallel(
_convert,
in_paths,
mode=('mpi' if args.mpi else 'multiprocessing'),
processes=args.processes
)
if args.verbose:
print('Finish converting {} pickle files.'.format(len(Nones)))
def cli():
parser = argparse.ArgumentParser(description='Convert pickle to pickle in a certain protocol.')
parser.add_argument('basedir',
help='Base directory of input pickle files.')
parser.add_argument('-o', '--output', required=True,
help='Base directory of output pickle files.')
parser.add_argument('--glob', required=True, nargs='+',
help='Glob pattern from BASEDIR.')
parser.add_argument('-R', '--recursive', action='store_true',
help='If specified, recursive globbing, Python 3 only.')
parser.add_argument('--protocol', type=int, default=2,
help='Output pickle procotol. Default: 2.')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-V', '--verbose', action='store_true',
help='Print info on the input files. If not selected, only the filename of the to-be-deleted files are print.')
parser.add_argument('--mpi', action='store_true',
help='If specified, use MPI.')
parser.add_argument('-p', '--processes', type=int, default=1,
help='use p processes with multiprocessing. Hint: use total no. of threads available.')
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli()
|
python
|
# -*- coding: utf-8 -*-
from scrapy import signals
from scrapy.exporters import CsvItemExporter
class CompanyListStorePipeline:
exporter = None
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
self.file = open(spider.name + '.csv', 'wb')
self.exporter = CsvItemExporter(self.file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
python
|
"""
:mod:`cookie`
-------------
This is a cookie authentication implementation for Pando.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from .. import auth
from ..utils import to_rfc822, utcnow
from ..website import THE_PAST
MINUTE = datetime.timedelta(seconds=60)
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
TIMEOUT = 2 * HOUR
# Public config knobs
# ===================
# Feel free to modify for your application.
NAME = "auth"
DOMAIN = None
PATH = "/"
HTTPONLY = "Yes, please."
# Hooks
# =====
def inbound_early(request):
"""Authenticate from a cookie.
"""
if 'user' not in request.context:
token = None
if NAME in request.headers.cookie:
token = request.headers.cookie[NAME].value
token = token.decode('US-ASCII')
request.context['user'] = auth.User(token)
def outbound(response):
"""Set outbound auth cookie.
"""
if 'user' not in response.request.context:
# XXX When does this happen? When auth.inbound_early hasn't run, eh?
raise # XXX raise what?
user = response.request.context['user']
if not isinstance(user, auth.User):
raise Exception("If you define 'user' in a simplate it has to be an "
"instance of pando.auth.User.")
if NAME not in response.request.headers.cookie:
# no cookie in the request, don't set one on response
return
elif user.ANON:
# user is anonymous, instruct browser to delete any auth cookie
cookie_value = ''
cookie_expires = THE_PAST
else:
# user is authenticated, keep it rolling for them
cookie_value = user.token
cookie_expires = to_rfc822(utcnow() + TIMEOUT)
# Configure outgoing cookie.
# ==========================
response.headers.cookie[NAME] = cookie_value # creates a cookie object?
cookie = response.headers.cookie[NAME] # loads a cookie object?
cookie['expires'] = cookie_expires
if DOMAIN is not None:
# Browser default is the domain of the resource requested.
# Pando default is the browser default.
cookie['domain'] = DOMAIN
if PATH is not None:
# XXX What's the browser default? Probably /? Or current dir?
# Pando default is "/".
cookie['path'] = PATH
if HTTPONLY is not None:
# Browser default is to allow access from JavaScript.
# Pando default is to prevent access from JavaScript.
cookie['httponly'] = HTTPONLY
|
python
|
from query_generator.query import Query
from utils.contracts import Operator, Schema
class UnionOperator(Operator):
def __init__(self, schema: Schema, leftSubQuery: Query, rightSubQuery: Query):
super().__init__(schema)
self._leftSubQuery = leftSubQuery
self._rightSubQuery = rightSubQuery
def generate_code(self) -> str:
return f"{self._leftSubQuery.generate_code()}.unionWith({self._rightSubQuery.generate_code()})"
|
python
|
#!/usr/bin/python3
from flask import request
from modules import simple_jwt
from modules.database import get_db_conn
# ----------------------------------------
# Check Auth Token before execute request
# ----------------------------------------
from server_error import server_error
def logged_before_request():
try:
auth = request.headers.get('Authorization')
if auth is not None:
token = auth.split(' ')
if token[0].lower() == 'bearer':
token_check = simple_jwt.check(token[1])
if token_check:
return
except UnicodeDecodeError:
pass
return server_error('AUTH_FAILED')
# ------------------------
# SQLite3 results to list
# ------------------------
def db_data_to_list(db_data, db_desc):
db_result = []
if db_data:
for row in db_data:
result_row = dict(map(lambda x, y: (x[0], y), db_desc, row))
db_result.append(result_row)
return db_result
# --------------------------------------
# Return permissions of given "role_id"
# --------------------------------------
def get_role_perms(role_id: int):
if role_id:
perms = get_role_perms.__cache.get(role_id)
if perms:
return perms
else:
with get_db_conn(True) as database:
cursor = database.cursor()
cursor.execute('SELECT * FROM roles WHERE id = ?', [role_id])
db_data = cursor.fetchone()
db_desc = cursor.description
cursor.close()
get_role_perms.__cache[role_id] = dict(map(lambda x, y: (x[0], y), db_desc, db_data))
return get_role_perms.__cache.get(role_id)
return {}
get_role_perms.__cache = {} # Static variable
|
python
|
import os
import tempfile
import re
import shutil
import requests
import io
import urllib
from mitmproxy.net import tcp
from mitmproxy.test import tutils
from pathod import language
from pathod import pathoc
from pathod import pathod
from pathod import test
from pathod.pathod import CA_CERT_NAME
def treader(bytes):
"""
Construct a tcp.Read object from bytes.
"""
fp = io.BytesIO(bytes)
return tcp.Reader(fp)
class DaemonTests:
nohang = False
ssl = False
timeout = None
hexdump = False
ssloptions = None
nocraft = False
explain = True
@classmethod
def setup_class(cls):
opts = cls.ssloptions or {}
cls.confdir = tempfile.mkdtemp()
opts["confdir"] = cls.confdir
so = pathod.SSLOptions(**opts)
cls.d = test.Daemon(
staticdir=tutils.test_data.path("pathod/data"),
anchors=[
(re.compile("/anchor/.*"), "202:da")
],
ssl=cls.ssl,
ssloptions=so,
sizelimit=1 * 1024 * 1024,
nohang=cls.nohang,
timeout=cls.timeout,
hexdump=cls.hexdump,
nocraft=cls.nocraft,
logreq=True,
logresp=True,
explain=cls.explain
)
@classmethod
def teardown_class(cls):
cls.d.shutdown()
shutil.rmtree(cls.confdir)
def teardown(self):
self.d.wait_for_silence()
self.d.clear_log()
def _getpath(self, path, params=None):
scheme = "https" if self.ssl else "http"
resp = requests.get(
"%s://localhost:%s/%s" % (
scheme,
self.d.port,
path
),
verify=os.path.join(self.d.thread.server.ssloptions.confdir, CA_CERT_NAME),
params=params
)
return resp
def getpath(self, path, params=None):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
if params:
path = path + "?" + urllib.parse.urlencode(params)
resp = c.request("get:%s" % path)
return resp
def get(self, spec):
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=self.ssl,
fp=logfp,
)
with c.connect():
resp = c.request(
"get:/p/%s" % urllib.parse.quote(spec)
)
return resp
def pathoc(
self,
specs,
timeout=None,
connect_to=None,
ssl=None,
ws_read_limit=None,
use_http2=False,
):
"""
Returns a (messages, text log) tuple.
"""
if ssl is None:
ssl = self.ssl
logfp = io.StringIO()
c = pathoc.Pathoc(
("localhost", self.d.port),
ssl=ssl,
ws_read_limit=ws_read_limit,
timeout=timeout,
fp=logfp,
use_http2=use_http2,
)
with c.connect(connect_to):
ret = []
for i in specs:
resp = c.request(i)
if resp:
ret.append(resp)
for frm in c.wait():
ret.append(frm)
c.stop()
return ret, logfp.getvalue()
def render(r, settings=language.Settings()):
r = r.resolve(settings)
s = io.BytesIO()
assert language.serve(r, s, settings)
return s.getvalue()
|
python
|
import zipfile
import pandas as pd
from datanator_query_python.util import mongo_util
from datanator_query_python.config import config as q_conf
import os
class proteinHalfLives(mongo_util.MongoUtil):
def __init__(self, MongoDB, db, collection, username, password,
path):
super().__init__(MongoDB=MongoDB,
db=db,
username=username,
password=password)
self.collection = self.db_obj[collection]
self.MongoDB = MongoDB
self.username = username
self.password = password
self.path = path
def unzip_file(self):
with zipfile.ZipFile(self.path) as zfile:
zfile.extractall(self.path.replace('.zip', ''))
def parse_protein(self):
"""parse data for protein half life and protein abundances
"""
filepath = self.path.replace('.zip', '')
data = pd.read_excel(filepath+"/Table_S4.xls")
#print(data.columns)
for i in range(len(data)):
d = {}
query = Query(MongoDB=self.MongoDB, db='datanator-test', entity_query_collection='uniprot', taxon_query_collection='taxon_tree',
username=self.username, password=self.password)
d['entity'] = {'type': 'protein',
'name': query.query_entity(data.iloc[i,0][:6]),
'identifiers': [{'namespace': 'uniprot_id',
'value': data.iloc[i,0][:6]},
{'namespace': 'entry_name',
'value': data.iloc[i,0][7:]}]}
d['identifier'] = {'namespace': 'uniprot_id', 'value': data.iloc[i,0][:6]}
d['values'] = [{'type': 'half life', 'value': data['Half_Life/days'][i]*86400, 'units': 'seconds'},
{'type': 'protein abundance', 'value': data['protein abundance'][i], 'units': 'copies/cell'}]
d['genotype'] = query.query_genotype('Mycoplasma pneumoniae')
d['source'] = [{'namespace': 'doi', 'value': '10.1038/msb.2011.38'}]
d['schema_version'] = '2.0'
#print(d)
self.collection.update_one({'type': 'protein',
'name': query.query_entity(data.iloc[i,0][:6]),
'identifiers': [{'namespace': 'uniprot_id',
'value': data.iloc[i,0][:6]},
{'namespace': 'entry_name',
'value': data.iloc[i,0][7:]}]},
{'$set': d},
upsert=True)
print('row {} has been added'.format(str(i)))
def parse_rna(self):
''' parse data of RNA abundance
'''
filepath = self.path.replace('.zip', '')
data = pd.read_excel(filepath+'/Table_S4.xls')
rows_no_gene_name = [] # list with rows which are missing a gene name in database
for i in range(len(data)): # ROW 3 DOES NOT HAVE A GENE NAME
d = {}
query = Query(MongoDB=self.MongoDB, db='datanator-test', entity_query_collection='uniprot', taxon_query_collection='taxon_tree',
username=self.username, password=self.password)
d['entity'] = {'type': 'RNA',
'name': query.query_gene_name(data.iloc[i,0][:6]),
'identifiers': [query.query_gene_identifier(data.iloc[i,0][:6]),
{'namespace': 'uniprot_id',
'value': data.iloc[i,0][:6]},
{'namespace': 'entry_name',
'value': data.iloc[i,0][7:]}]}
d['identifier'] = query.query_gene_identifier(data.iloc[i,0][:6])
d['values'] = [{'type': 'RNA abundance', 'value': data['mRNA abundance'][i], 'units': 'copies/cell'}]
d['genotype'] = query.query_genotype('Mycoplasma pneumoniae')
d['source'] = [{'namespace': 'doi', 'value': '10.1038/msb.2011.38'}]
d['schema_version'] = '2.0'
#print(d)
if d['entity']['name'] == None:
rows_no_gene_name.append(i)
else:
''' self.collection.update_one({'type': 'RNA',
'name': query.query_entity(data.iloc[i,0][:6]),
'identifiers': [{'namespace': 'uniprot_id',
'value': data.iloc[i,0][:6]},
{'namespace': 'entry_name',
'value': data.iloc[i,0][7:]}]},
{'$set': d},
upsert=True)'''
print('row {} has been added'.format(str(i)))
print('Rows missing gene names: ' + str(rows_no_gene_name))
class Query(mongo_util.MongoUtil):
def __init__(self, MongoDB, db, entity_query_collection, taxon_query_collection, username, password):
super().__init__(MongoDB=MongoDB, db=db, username=username, password=password)
self.entity_collection = self.db_obj[entity_query_collection]
self.taxon_collection = self.db_obj[taxon_query_collection]
def query_entity(self, uniprot_id):
"""takes the uniprot id and returns sring with protein name
Args:
uniprot_id {:obj:`str`}: uniprotID of the wanted protein name
Returns:
protein_name (:obj:`str`}: string of the protein name
"""
query = {'uniprot_id': uniprot_id}
projection = {'protein_name': 1, '_id': 0}
result = self.entity_collection.find_one(query, projection)
return str(result['protein_name'])
def query_genotype(self, tax_name):
'''takes name of species and returns genotype object
Args:
tax_name {:obj:'str'}: name of species
Returns:
genotype_obj {:obj:obj}: genotype object for document
'''
query = {'tax_name': tax_name}
projection = {'_id': 0, 'tax_id': 1, 'canon_anc_ids': 1, 'canon_anc_names': 1}
result = self.taxon_collection.find_one(query, projection)
genotype = {'taxon': {'ncbi_taxonomy_id': result['tax_id'], 'name': 'Mycoplasma pneumoniae', 'canon_ancestors': []}}
for i in range(len(result['canon_anc_ids'])):
genotype['taxon']['canon_ancestors'].append({'ncbi_taxonomy_id': result['canon_anc_ids'][i], 'name': result['canon_anc_names'][i]})
return genotype
def query_gene_identifier(self, uniprot_id):
""" takes uniprot ID and returns the identifier object of the protein
Args:
uniprot_id {:obj:'str'}: uniprot ID of protein
Returns:
identifier {:obj:'obj'}: object for identifier
"""
query = {'uniprot_id': uniprot_id}
projection = {'_id': 0, 'ko_number': 1, 'gene_name': 1}
result = self.entity_collection.find_one(query, projection)
return {'namespace': 'ko_number', 'value': result['ko_number']}
def query_gene_name(self, uniprot_id):
""" takes uniprot ID and returns the gene name of the protein
Args:
uniprot_id (:obj:'str'): uniprot ID of protein
Return:
gene_name (:obj:'str'): gene name of the protein
"""
query = {'uniprot_id': uniprot_id}
projection = {'_id': 0, 'gene_name': 1}
result = self.entity_collection.find_one(query, projection)
return result['gene_name']
def main():
conf = q_conf.Justin()
username = conf.USERNAME
password = conf.PASSWORD
MongoDB = conf.SERVER
db = 'datanator-demo'
collection = 'observation'
filepath = 'datanator/docs/msb201138-sup-0003.zip'
src = proteinHalfLives(MongoDB=MongoDB, db=db, collection=collection, username=username, password=password, path=filepath)
#src.unzip_file()
#src.parse_protein()
src.parse_rna()
if __name__ == "__main__":
main()
|
python
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
h = int(readline())
cnt = 0
if h == 1:
print(1)
elif h == 2:
print(3)
else:
for i in range(h):
if h < 2 ** i:
print(cnt)
exit()
else:
cnt += 2 ** i
|
python
|
_SCRIPT_VERSION = "Script version: Nuthouse01 - 6/10/2021 - v6.00"
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
try:
# these imports work if running from GUI
from . import nuthouse01_core as core
from . import nuthouse01_pmx_parser as pmxlib
from . import nuthouse01_pmx_struct as pmxstruct
from . import model_shift
from . import morph_scale
except ImportError as eee:
try:
# these imports work if running from double-click on THIS script
import nuthouse01_core as core
import nuthouse01_pmx_parser as pmxlib
import nuthouse01_pmx_struct as pmxstruct
import model_shift
import morph_scale
except ImportError as eee:
print(eee.__class__.__name__, eee)
print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = pmxlib = model_shift = morph_scale = pmxstruct = None
# when debug=True, disable the catchall try-except block. this means the full stack trace gets printed when it crashes,
# but if launched in a new window it exits immediately so you can't read it.
DEBUG = False
helptext = '''=================================================
model_scale:
Scale the entire model around 0,0,0 by some X,Y,Z value.
This also scales all vertex and bone morphs by the same amount, so you don't need to do that separately.
Output: PMX file '[modelname]_scale.pmx'
'''
def main(moreinfo=True):
# prompt PMX name
core.MY_PRINT_FUNC("Please enter name of PMX input file:")
input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
# to shift the model by a set amount:
# first, ask user for X Y Z
# create the prompt popup
scale_str = core.MY_GENERAL_INPUT_FUNC(
lambda x: (model_shift.is_3float(x) is not None),
["Enter the X,Y,Z amount to scale this model by:",
"Three decimal values separated by commas.",
"Empty input will quit the script."])
# if empty, quit
if scale_str == "":
core.MY_PRINT_FUNC("quitting")
return None
# use the same func to convert the input string
scale = model_shift.is_3float(scale_str)
uniform_scale = (scale[0] == scale[1] == scale[2])
if not uniform_scale:
core.MY_PRINT_FUNC("Warning: when scaling by non-uniform amounts, rigidbody sizes will not be modified")
####################
# what does it mean to scale the entire model?
# scale vertex position, sdef params
# ? scale vertex normal vectors, then normalize? need to convince myself of this interaction
# scale bone position, tail offset
# scale fixedaxis and localaxis vectors, then normalize
# scale vert morph, bone morph
# scale rigid pos, size
# scale joint pos, movelimits
for v in pmx.verts:
# vertex position
for i in range(3):
v.pos[i] *= scale[i]
# vertex normal
for i in range(3):
if scale[i] != 0:
v.norm[i] /= scale[i]
else:
v.norm[i] = 100000
# then re-normalize the normal vector
v.norm = core.normalize_distance(v.norm)
# c, r0, r1 params of every SDEF vertex
# these correspond to real positions in 3d space so they need to be modified
if v.weighttype == pmxstruct.WeightMode.SDEF:
for param in v.weight_sdef:
for i in range(3):
param[i] *= scale[i]
for b in pmx.bones:
# bone position
for i in range(3):
b.pos[i] *= scale[i]
# bone tail if using offset mode
if not b.tail_usebonelink:
for i in range(3):
b.tail[i] *= scale[i]
# scale fixedaxis and localaxis vectors, then normalize
if b.has_fixedaxis:
for i in range(3):
b.fixedaxis[i] *= scale[i]
# then re-normalize
b.fixedaxis = core.normalize_distance(b.fixedaxis)
# scale fixedaxis and localaxis vectors, then normalize
if b.has_localaxis:
for i in range(3):
b.localaxis_x[i] *= scale[i]
for i in range(3):
b.localaxis_z[i] *= scale[i]
# then re-normalize
b.localaxis_x = core.normalize_distance(b.localaxis_x)
b.localaxis_z = core.normalize_distance(b.localaxis_z)
for m in pmx.morphs:
# vertex morph and bone morph (only translate, not rotate)
if m.morphtype in (pmxstruct.MorphType.VERTEX, pmxstruct.MorphType.BONE):
morph_scale.morph_scale(m, scale, bone_mode=1)
for rb in pmx.rigidbodies:
# rigid body position
for i in range(3):
rb.pos[i] *= scale[i]
# rigid body size
# NOTE: rigid body size is a special conundrum
# spheres have only one dimension, capsules have two, and only boxes have 3
# what's the "right" way to scale a sphere by 1,5,1? there isn't a right way!
# boxes and capsules can be rotated and stuff so their axes dont line up with world axes, too
# is it at least possible to rotate bodies so they are still aligned with their bones?
# eh, why even bother with any of that. 95% of the time full-model scale will be uniform scaling.
# only scale the rigidbody size if doing uniform scaling: that is guaranteed to be safe!
if uniform_scale:
for i in range(3):
rb.size[i] *= scale[i]
for j in pmx.joints:
# joint position
for i in range(3):
j.pos[i] *= scale[i]
# joint min slip
for i in range(3):
j.movemin[i] *= scale[i]
# joint max slip
for i in range(3):
j.movemax[i] *= scale[i]
# that's it? that's it!
# write out
output_filename_pmx = input_filename_pmx[0:-4] + "_scale.pmx"
output_filename_pmx = core.get_unused_file_name(output_filename_pmx)
pmxlib.write_pmx(output_filename_pmx, pmx, moreinfo=moreinfo)
core.MY_PRINT_FUNC("Done!")
return None
if __name__ == '__main__':
print(_SCRIPT_VERSION)
if DEBUG:
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
core.MY_PRINT_FUNC("")
main()
core.pause_and_quit("Done with everything! Goodbye!")
else:
try:
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
core.MY_PRINT_FUNC("")
main()
core.pause_and_quit("Done with everything! Goodbye!")
except (KeyboardInterrupt, SystemExit):
# this is normal and expected, do nothing and die normally
pass
except Exception as ee:
# if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
print(ee)
core.pause_and_quit("ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho")
|
python
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name="sio2puppetMaster",
version="0.3",
packages=find_packages(),
install_requires=[],
scripts=['scripts/sio2pm-spawndocker'],
license="MIT",
author="Mateusz Żółtak",
author_email="[email protected]"
)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import re, os
if not hasattr(__builtins__, 'cmp'):
def cmp(a, b):
return (a > b) - (a < b)
from collections import OrderedDict, namedtuple as NamedTuple
from functools import wraps
from pkg_resources import parse_version as pkg_resources_parse_version # type: ignore
from pkg_resources.extern.packaging.version import Version as PkgResourcesVersion
from clu.version.read_version import read_version_file
FIELDS = ('major', 'minor', 'patch',
'pre', 'build')
# The `namedtuple` ancestor,
# from which our VersionInfo struct inherits:
VersionAncestor = NamedTuple('VersionAncestor', FIELDS) # type: ignore
# sets, for various comparisons and checks:
fields = frozenset(FIELDS)
string_types = { type(lit) for lit in ('', u'', r'') }
byte_types = { bytes, bytearray } - string_types # On py2, bytes == str
dict_types = { dict, OrderedDict }
comparable = dict_types | { VersionAncestor }
# utility conversion functions:
def intify(arg):
if arg is None:
return None
return int(arg)
def strify(arg):
if arg is None:
return None
if isinstance(arg, tuple(string_types)):
return arg
if isinstance(arg, tuple(byte_types)):
return arg.decode('UTF-8') # type: ignore
return str(arg)
def dictify(arg):
if arg is None:
return None
if hasattr(arg, '_asdict'):
return arg._asdict()
if hasattr(arg, 'to_dict'):
return arg.to_dict()
if isinstance(arg, tuple(dict_types)):
return arg
return dict(arg)
# compare version information by dicts:
def compare_keys(dict1, dict2):
""" Blatantly based on code from “semver”: https://git.io/fhb98 """
for key in ('major', 'minor', 'patch'):
result = cmp(dict1.get(key), dict2.get(key))
if result:
return result
pre1, pre2 = dict1.get('pre'), dict2.get('pre')
if pre1 is None and pre2 is None:
return 0
if pre1 is None:
pre1 = '<unknown>'
elif pre2 is None:
pre2 = '<unknown>'
preresult = cmp(pre1, pre2)
if not preresult:
return 0
if not pre1:
return 1
elif not pre2:
return -1
return preresult
# comparison-operator method decorator:
def comparator(operator):
""" Wrap a VersionInfo binary op method in a typechecker """
@wraps(operator)
def wrapper(self, other):
if not isinstance(other, tuple(comparable)):
return NotImplemented
return operator(self, other)
return wrapper
# the VersionInfo class:
class VersionInfo(VersionAncestor):
""" NamedTuple-descendant class allowing for convenient
and reasonably sane manipulation of semantic-version
(née “semver”) string-triple numberings, or whatever
the fuck is the technical term for them, erm. Yes!
"""
SEPARATORS = '..-+'
UNKNOWN = '‽'
NULL_VERSION = f"{UNKNOWN}.{UNKNOWN}.{UNKNOWN}"
REG = re.compile(r'(?P<major>[\d‽]+)\.' \
r'(?P<minor>[\d‽]+)' \
r'(?:\.(?P<patch>[\d‽]+)' \
r'(?:\-(?P<pre>[\w‽]+)' \
r'(?:[\+\-](?P<build>g?[0-9a-f‽]+))?)?)?',
re.IGNORECASE)
@classmethod
def from_string(cls, version_string):
""" Instantiate a VersionInfo with a semver string """
result = cls.REG.search(version_string)
if result:
return cls.from_dict(result.groupdict())
return cls.from_dict({ field : cls.UNKNOWN for field in FIELDS })
@classmethod
def from_dict(cls, version_dict):
""" Instantiate a VersionInfo with a dict of related values
(q.v. FIELD string names supra.)
"""
for field in FIELDS[:2]: # major, minor
assert field in version_dict
assert frozenset(version_dict.keys()).issubset(fields)
return cls(**version_dict)
def to_string(self):
""" Return the VersionInfo data as a semver string """
if not bool(self):
return type(self).NULL_VERSION
SEPARATORS = type(self).SEPARATORS
out = f"{self.major or 0}{SEPARATORS[0]}{self.minor or 0}"
if self.patch is not None:
out += f"{SEPARATORS[1]}{self.patch}"
if self.pre:
out += f"{SEPARATORS[2]}{self.pre}"
if self.build:
out += f"{SEPARATORS[3]}{self.build}"
return out
def to_dict(self):
""" Returns what you think it returns """
out = {} # type: dict
for field in FIELDS:
if getattr(self, field, None) is not None:
out[field] = getattr(self, field)
return out
def to_tuple(self):
""" Return a complete tuple (as in, including “pre” and “build” fields) """
return (self.major, self.minor, self.patch,
self.pre, self.build)
def to_packaging_version(self):
""" aka an instance of `pkg_resources.extern.packaging.version.Version` """
return pkg_resources_parse_version(self.to_string())
def __new__(cls, from_value=None, major='‽', minor='‽',
patch='‽', pre='‽',
build=0):
""" Instantiate a VersionInfo, populating its fields per args """
if from_value is not None:
if type(from_value) in string_types:
return cls.from_string(from_value)
elif type(from_value) is PkgResourcesVersion:
return cls.from_string(str(from_value))
elif type(from_value) in byte_types:
return cls.from_string(from_value.decode('UTF-8'))
elif type(from_value) in dict_types:
return cls.from_dict(from_value)
elif type(from_value) is cls:
return cls.from_dict(from_value.to_dict())
if cls.UNKNOWN in str(major):
major = None
if cls.UNKNOWN in str(minor):
minor = None
if cls.UNKNOWN in str(patch):
patch = None
if cls.UNKNOWN in str(pre):
pre = None
if cls.UNKNOWN in str(build):
build = 0
instance = super(VersionInfo, cls).__new__(cls, intify(major),
intify(minor),
intify(patch),
strify(pre),
strify(build))
return instance
def __str__(self):
""" Stringify the VersionInfo (q.v. “to_string(…)” supra.) """
return self.to_string()
def __repr__(self):
""" Repr-cize the VersionInfo (q.v. “to_string(…)” supra.) """
return self.to_string()
def __bytes__(self):
""" Bytes-ify the VersionInfo (q.v. “to_string(…)” supra.) """
return bytes(self.to_string(), encoding='UTF-8')
def __hash__(self):
""" Hash the VersionInfo, using its tuplized value """
return hash(self.to_tuple())
def __bool__(self):
""" An instance of VersionInfo is considered Falsey if its “major”,
“minor”, and “patch” fields are all set to None; otherwise it’s
a Truthy value in boolean contexts
"""
return not (self.major is None and \
self.minor is None and \
self.patch is None)
# Comparison methods also lifted from “semver”: https://git.io/fhb9i
@comparator
def __eq__(self, other):
return compare_keys(self._asdict(), dictify(other)) == 0
@comparator
def __ne__(self, other):
return compare_keys(self._asdict(), dictify(other)) != 0
@comparator
def __lt__(self, other):
return compare_keys(self._asdict(), dictify(other)) < 0
@comparator
def __le__(self, other):
return compare_keys(self._asdict(), dictify(other)) <= 0
@comparator
def __gt__(self, other):
return compare_keys(self._asdict(), dictify(other)) > 0
@comparator
def __ge__(self, other):
return compare_keys(self._asdict(), dictify(other)) >= 0
# Get the project version tag without importing:
BASEPATH = os.path.dirname(os.path.dirname(__file__))
__version__ = read_version_file(BASEPATH)
version_info = VersionInfo(__version__)
|
python
|
#!/usr/bin/env python
##
# omnibus - deadbits
# fullcontact.com
##
from http import get
from common import get_apikey
class Plugin(object):
def __init__(self, artifact):
self.artifact = artifact
self.artifact['data']['fullcontact'] = None
self.api_key = get_apikey('fullcontact')
self.headers = {
'X-FullContact-APIKey': self.api_key,
'User-Agent': 'OSINT Omnibus (https://github.com/InQuest/Omnibus)'
}
def run(self):
try:
status, response = get('https://api.fullcontact.com/v2/person.json?email=%s' % self.artifact['name'],
headers=self.headers)
if status:
self.artifact['data']['fullcontact'] = response.json()
if 'socialProfiles' in self.artifact['data']['fullcontact'].keys():
for profile in self.artifact['data']['fullcontact']['socialProfiles']:
child = {
'type': 'user',
'name': profile['username'],
'source': 'fullcontact',
'subtype': profile['type']
}
self.artifact['children'].append(child)
except:
pass
def main(artifact):
plugin = Plugin(artifact)
plugin.run()
return plugin.artifact
|
python
|
NAME='console_broadcast'
GCC_LIST=['broadcast']
|
python
|
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql import SparkSession
from pyspark.sql import Row
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pyspark.ml.fpm import FPGrowth
if __name__ == "__main__":
sc = SparkContext('local', 'arules')
sqlContext = SQLContext(sc)
spark = SparkSession\
.builder\
.appName("arules")\
.getOrCreate()
#dataset = sc.textFile("./data/retail.txt")
df = spark.createDataFrame([
(0, [1, 2, 5]),
(1, [1, 2, 3, 5]),
(2, [1, 2])
], ["id", "items"])
fpGrowth = FPGrowth(itemsCol="items", minSupport=0.5, minConfidence=0.6)
model = fpGrowth.fit(df)
#display frequent itemsets
model.freqItemsets.show()
#display generated association rules
model.associationRules.show()
#apply transform
model.transform(df).show()
|
python
|
#coding:utf-8
#KDD99数据集预处理
#共使用39个特征,去除了原数据集中20、21号特征
import numpy as np
import pandas as pd
import csv
from datetime import datetime
from sklearn import preprocessing # 数据标准化处理
#定义KDD99字符型特征转数值型特征函数
def char2num(sourceFile, handledFile):
print('START: 字符型特征转数值型特征函数中')
data_file=open(handledFile,'w',newline='') #python3.x中添加newline=''这一参数使写入的文件没有多余的空行
global dataCnt
with open(sourceFile, 'r') as data_source:
csv_reader=csv.reader(data_source)
csv_writer=csv.writer(data_file)
dataCnt=0 #记录数据的行数,初始化为0
for row in csv_reader:
temp_line=np.array(row) #将每行数据存入temp_line数组里
temp_line[1]=handleProtocol(row) #将源文件行中3种协议类型转换成数字标识
temp_line[2]=handleService(row) #将源文件行中70种网络服务类型转换成数字标识
temp_line[3]=handleFlag(row) #将源文件行中11种网络连接状态转换成数字标识
temp_line[41]=handleLabel(row) #将源文件行中23种攻击类型转换成数字标识
csv_writer.writerow(temp_line)
dataCnt+=1
#输出每行数据中所修改后的状态
data_file.close()
print('FINISH: 字符型特征转数值型特征函数完成\n')
#将相应的非数字类型转换为数字标识即符号型数据转化为数值型数据
def find_index(x,y):
return [i for i in range(len(y)) if y[i]==x]
#定义将源文件行中3种协议类型转换成数字标识的函数
def handleProtocol(input):
protocol_list=['tcp','udp','icmp']
if input[1] in protocol_list:
return find_index(input[1],protocol_list)[0]
#定义将源文件行中70种网络服务类型转换成数字标识的函数
def handleService(input):
service_list=['aol','auth','bgp','courier','csnet_ns','ctf','daytime','discard','domain','domain_u',
'echo','eco_i','ecr_i','efs','exec','finger','ftp','ftp_data','gopher','harvest','hostnames',
'http','http_2784','http_443','http_8001','imap4','IRC','iso_tsap','klogin','kshell','ldap',
'link','login','mtp','name','netbios_dgm','netbios_ns','netbios_ssn','netstat','nnsp','nntp',
'ntp_u','other','pm_dump','pop_2','pop_3','printer','private','red_i','remote_job','rje','shell',
'smtp','sql_net','ssh','sunrpc','supdup','systat','telnet','tftp_u','tim_i','time','urh_i','urp_i',
'uucp','uucp_path','vmnet','whois','X11','Z39_50']
if input[2] in service_list:
return find_index(input[2],service_list)[0]
#定义将源文件行中11种网络连接状态转换成数字标识的函数
def handleFlag(input):
flag_list=['OTH','REJ','RSTO','RSTOS0','RSTR','S0','S1','S2','S3','SF','SH']
if input[3] in flag_list:
return find_index(input[3],flag_list)[0]
#定义将源文件行中攻击类型转换成数字标识的函数(共出现了22个攻击类型+1个未受到攻击)
def handleLabel(input):
global label_list
label_list = ['normal.', # normal
'back.', 'land.', 'neptune.', 'pod.', 'smurf.', 'teardrop.', # DOS
'ipsweep.', 'nmap.', 'portsweep.', 'satan.', # PROBE
'ftp_write.', 'guess_passwd.', 'imap.', 'multihop.', 'phf.', 'spy.', 'warezclient.', 'warezmaster.', # R2L
'buffer_overflow.', 'loadmodule.', 'perl.', 'rootkit.'] # U2R
if input[41] in label_list:
return find_index(input[41], label_list)[0]
else:
label_list.append(input[41])
return find_index(input[41], label_list)[0]
def standardize(inputFile):
import warnings
# 忽略UserWarning: Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.
# warnings.warn("Numerical issues were encountered "
warnings.filterwarnings("ignore", message="Numerical issues were encountered ")
print('START: 数据标准化中')
dataMatrix = np.loadtxt(open(inputFile,"rb"),delimiter=",",skiprows=0) # 读入数据
labelColumn = dataMatrix[:,-1]
result = preprocessing.scale(dataMatrix[:,:-1]) # 标签列不参与训练
print('FINISH: 数据标准化完成\n')
return result, labelColumn
def normalize(inMatrix):
print('START: 数据归一化中')
np.seterr(divide='ignore',invalid='ignore') # 忽略0/0的报错
minVals = inMatrix.min(0)
maxVals = inMatrix.max(0)
ranges = maxVals - minVals
# normData = np.zeros(np.shape(inMatrix))
m = inMatrix.shape[0]
normData = inMatrix - np.tile(minVals, (m, 1))
normData = normData/np.tile(ranges, (m, 1))
# 去掉数据中的空列
print('FINISH: 数据归一化完成\n')
return normData, ranges, minVals
def exportData(npData, outputFile):
pd_data = pd.DataFrame(npData, columns=['duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment',
'urgent', 'hot', 'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted',
'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'num_outbound_cmds',
'is_host_login', 'is_guest_login', 'count', 'srv_count', 'serror_rate', 'srv_serror_rate',
'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate',
'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate',
'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',
'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate'])
pd_data.drop('num_outbound_cmds', axis=1, inplace=True) # 删除存在空值的列
pd_data.drop('is_host_login', axis=1, inplace=True) # 删除存在空值的列
pd_data.to_csv(outputFile, header=None, index=None)
def run(source,temp):
char2num(source, temp) # 字符型特征转数字型特征
stdData, labelColumn = standardize(temp)
normData, _, _ = normalize(stdData)
#数据集乱序
np.random.seed(116)
np.random.shuffle(normData)
np.random.seed(116)
np.random.shuffle(labelColumn)
#按6:2:2分出训练集,验证集和测试集
n_data=len(labelColumn)
split_ind1 = int(n_data * 0.6)
split_ind2 = int(n_data * 0.8)
train_data=normData[:split_ind1,:]
train_label = labelColumn[:split_ind1]
val_data=normData[split_ind1:split_ind2,:]
val_label = labelColumn[split_ind1:split_ind2]
test_data=normData[split_ind2:,:]
test_label = labelColumn[split_ind2:]
label = pd.DataFrame(train_label,columns=["attack_type"])
label.to_csv(".//dataset//"+"train_label.csv", header=None, index=None)
label = pd.DataFrame(val_label, columns=["attack_type"])
label.to_csv(".//dataset//"+"val_label.csv", header=None, index=None)
label = pd.DataFrame(test_label, columns=["attack_type"])
label.to_csv(".//dataset//"+"test_label.csv", header=None, index=None)
print('START: 数据导出中')
exportData(train_data, ".//dataset//"+"train_data.csv")
exportData(val_data, ".//dataset//"+"val_data.csv")
exportData(test_data, ".//dataset//"+"test_data.csv")
print(f'FINISH: 数据导出成功\n共导出 {dataCnt} 条数据')
if __name__=='__main__':
start_time=datetime.now()
sourceFile= './/dataset//kddcup.data_10_percent_corrected'
deCharFile = './/dataset//decharedData.csv'
run(sourceFile,deCharFile)
end_time=datetime.now()
print("运行时间 ",(end_time-start_time),'s') #输出程序运行时间
|
python
|
import matplotlib.pyplot as plt
import os
from pathlib import Path
from typing import Union
import bilby
import redback.get_data
from redback.likelihoods import GaussianLikelihood, PoissonLikelihood
from redback.model_library import all_models_dict
from redback.result import RedbackResult
from redback.utils import logger
from redback.transient.afterglow import Afterglow
from redback.transient.prompt import PromptTimeSeries
from redback.transient.transient import OpticalTransient
dirname = os.path.dirname(__file__)
def fit_model(
transient: redback.transient.transient.Transient, model: Union[callable, str], outdir: str = None,
label: str = None, sampler: str = "dynesty", nlive: int = 2000, prior: dict = None, walks: int = 200,
truncate: bool = True, use_photon_index_prior: bool = False, truncate_method: str = "prompt_time_error",
resume: bool = True, save_format: str = "json", model_kwargs: dict = None, plot=True, **kwargs)\
-> redback.result.RedbackResult:
"""
:param transient: The transient to be fitted
:param model: Name of the model to fit to data or a function.
:param outdir: Output directory. Will default to a sensible structure if not given.
:param label: Result file labels. Will use the model name if not given.
:param sampler: The sampling backend. Nested samplers are encouraged to allow evidence calculation.
(Default value = 'dynesty')
:param nlive: Number of live points.
:param prior: Priors to use during sampling. If not given, we use the default priors for the given model.
:param walks: Number of `dynesty` random walks.
:param truncate: Flag to confirm whether to truncate the prompt emission data
:param use_photon_index_prior: flag to turn off/on photon index prior and fits according to the curvature effect
:param truncate_method: method of truncation
:param resume: Whether to resume the run from a checkpoint if available.
:param save_format: The format to save the result in. (Default value = 'json'_
:param model_kwargs: Additional keyword arguments for the model.
:param kwargs: Additional parameters that will be passed to the sampler
:param plot: If True, create corner and lightcurve plot
:return: Redback result object, transient specific data object
"""
if isinstance(model, str):
model = all_models_dict[model]
if transient.data_mode in ["flux_density", "magnitude"]:
if model_kwargs["output_format"] != transient.data_mode:
raise ValueError(
f"Transient data mode {transient.data_mode} is inconsistent with "
f"output format {model_kwargs['output_format']}. These should be the same.")
prior = prior or bilby.prior.PriorDict(filename=f"{dirname}/Priors/{model}.prior")
outdir = outdir or f"{transient.directory_structure.directory_path}/{model.__name__}"
Path(outdir).mkdir(parents=True, exist_ok=True)
label = label or transient.name
if isinstance(transient, Afterglow):
return _fit_grb(
transient=transient, model=model, outdir=outdir, label=label, sampler=sampler, nlive=nlive, prior=prior,
walks=walks, use_photon_index_prior=use_photon_index_prior, resume=resume, save_format=save_format,
model_kwargs=model_kwargs, truncate=truncate, truncate_method=truncate_method, plot=plot, **kwargs)
elif isinstance(transient, PromptTimeSeries):
return _fit_prompt(
transient=transient, model=model, outdir=outdir, label=label, sampler=sampler, nlive=nlive, prior=prior,
walks=walks, resume=resume, save_format=save_format, model_kwargs=model_kwargs, plot=plot, **kwargs)
elif isinstance(transient, OpticalTransient):
return _fit_optical_transient(
transient=transient, model=model, outdir=outdir, label=label, sampler=sampler, nlive=nlive, prior=prior,
walks=walks, truncate=truncate, use_photon_index_prior=use_photon_index_prior,
truncate_method=truncate_method, resume=resume, save_format=save_format, model_kwargs=model_kwargs,
plot=plot, **kwargs)
else:
raise ValueError(f'Source type {transient.__class__.__name__} not known')
def _fit_grb(transient, model, outdir, label, sampler='dynesty', nlive=3000, prior=None, walks=1000,
use_photon_index_prior=False, resume=True, save_format='json', model_kwargs=None, plot=True, **kwargs):
if use_photon_index_prior:
label += '_photon_index'
if transient.photon_index < 0.:
logger.info('photon index for GRB', transient.name, 'is negative. Using default prior on alpha_1')
prior['alpha_1'] = bilby.prior.Uniform(-10, -0.5, 'alpha_1', latex_label=r'$\alpha_{1}$')
else:
prior['alpha_1'] = bilby.prior.Gaussian(mu=-(transient.photon_index + 1), sigma=0.1,
latex_label=r'$\alpha_{1}$')
if transient.flux_density_data or transient.magnitude_data:
x, x_err, y, y_err = transient.get_filtered_data()
else:
x, x_err, y, y_err = transient.x, transient.x_err, transient.y, transient.y_err
likelihood = \
kwargs.get('likelihood', GaussianLikelihood(x=x, y=y, sigma=y_err, function=model, kwargs=model_kwargs))
meta_data = dict(model=model.__name__, transient_type=transient.__class__.__name__.lower())
transient_kwargs = {k.lstrip("_"): v for k, v in transient.__dict__.items()}
meta_data.update(transient_kwargs)
meta_data['model_kwargs'] = model_kwargs
result = None
if not kwargs.get("clean", False):
try:
result = redback.result.read_in_result(
outdir=outdir, label=label, extension=kwargs.get("extension", "json"), gzip=kwargs.get("gzip", False))
plt.close('all')
return result
except Exception:
pass
result = result or bilby.run_sampler(
likelihood=likelihood, priors=prior, label=label, sampler=sampler, nlive=nlive,
outdir=outdir, plot=plot, use_ratio=False, walks=walks, resume=resume,
maxmcmc=10 * walks, result_class=RedbackResult, meta_data=meta_data,
nthreads=4, save_bounds=False, nsteps=nlive, nwalkers=walks, save=save_format, **kwargs)
plt.close('all')
if plot:
result.plot_lightcurve(model=model)
return result
def _fit_optical_transient(transient, model, outdir, label, sampler='dynesty', nlive=3000, prior=None,
walks=1000, resume=True, save_format='json', model_kwargs=None, plot=True, **kwargs):
if transient.flux_density_data or transient.magnitude_data:
x, x_err, y, y_err = transient.get_filtered_data()
else:
x, x_err, y, y_err = transient.x, transient.x_err, transient.y, transient.y_err
likelihood = kwargs.get(
'likelihood', GaussianLikelihood(x=x, y=y, sigma=y_err, function=model, kwargs=model_kwargs))
meta_data = dict(model=model.__name__, transient_type=transient.__class__.__name__.lower())
transient_kwargs = {k.lstrip("_"): v for k, v in transient.__dict__.items()}
meta_data.update(transient_kwargs)
meta_data['model_kwargs'] = model_kwargs
result = None
if not kwargs.get("clean", False):
try:
result = redback.result.read_in_result(
outdir=outdir, label=label, extension=kwargs.get("extension", "json"), gzip=kwargs.get("gzip", False))
plt.close('all')
return result
except Exception:
pass
result = result or bilby.run_sampler(
likelihood=likelihood, priors=prior, label=label, sampler=sampler, nlive=nlive,
outdir=outdir, plot=plot, use_ratio=False, walks=walks, resume=resume,
maxmcmc=10 * walks, result_class=RedbackResult, meta_data=meta_data,
nthreads=4, save_bounds=False, nsteps=nlive, nwalkers=walks, save=save_format, **kwargs)
plt.close('all')
if plot:
result.plot_lightcurve(model=model)
return result
def _fit_prompt(transient, model, outdir, label, integrated_rate_function=True, sampler='dynesty', nlive=3000,
prior=None, walks=1000, resume=True, save_format='json',
model_kwargs=None, plot=True, **kwargs):
likelihood = PoissonLikelihood(
time=transient.x, counts=transient.y, dt=transient.bin_size, function=model,
integrated_rate_function=integrated_rate_function, kwargs=model_kwargs)
meta_data = dict(model=model.__name__, transient_type=transient.__class__.__name__.lower())
transient_kwargs = {k.lstrip("_"): v for k, v in transient.__dict__.items()}
meta_data.update(transient_kwargs)
meta_data['model_kwargs'] = model_kwargs
result = None
if not kwargs.get("clean", False):
try:
result = redback.result.read_in_result(
outdir=outdir, label=label, extension=kwargs.get("extension", "json"), gzip=kwargs.get("gzip", False))
plt.close('all')
return result
except Exception:
pass
result = result or bilby.run_sampler(
likelihood=likelihood, priors=prior, label=label, sampler=sampler, nlive=nlive,
outdir=outdir, plot=False, use_ratio=False, walks=walks, resume=resume,
maxmcmc=10 * walks, result_class=RedbackResult, meta_data=meta_data,
nthreads=4, save_bounds=False, nsteps=nlive, nwalkers=walks, save=save_format, **kwargs)
plt.close('all')
if plot:
result.plot_lightcurve(model=model)
return result
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/errors/conversion_action_error.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/errors/conversion_action_error.proto',
package='google.ads.googleads.v4.errors',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v4.errorsB\032ConversionActionErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v4/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V4.Errors\312\002\036Google\\Ads\\GoogleAds\\V4\\Errors\352\002\"Google::Ads::GoogleAds::V4::Errors'),
serialized_pb=_b('\nBgoogle/ads/googleads_v4/proto/errors/conversion_action_error.proto\x12\x1egoogle.ads.googleads.v4.errors\x1a\x1cgoogle/api/annotations.proto\"\x8b\x03\n\x19\x43onversionActionErrorEnum\"\xed\x02\n\x15\x43onversionActionError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x12\n\x0e\x44UPLICATE_NAME\x10\x02\x12\x14\n\x10\x44UPLICATE_APP_ID\x10\x03\x12\x37\n3TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD\x10\x04\x12\x31\n-BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION\x10\x05\x12)\n%DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED\x10\x06\x12\x1d\n\x19\x44\x41TA_DRIVEN_MODEL_EXPIRED\x10\x07\x12\x1b\n\x17\x44\x41TA_DRIVEN_MODEL_STALE\x10\x08\x12\x1d\n\x19\x44\x41TA_DRIVEN_MODEL_UNKNOWN\x10\t\x12\x1a\n\x16\x43REATION_NOT_SUPPORTED\x10\nB\xf5\x01\n\"com.google.ads.googleads.v4.errorsB\x1a\x43onversionActionErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v4/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V4.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V4\\Errors\xea\x02\"Google::Ads::GoogleAds::V4::Errorsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CONVERSIONACTIONERRORENUM_CONVERSIONACTIONERROR = _descriptor.EnumDescriptor(
name='ConversionActionError',
full_name='google.ads.googleads.v4.errors.ConversionActionErrorEnum.ConversionActionError',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUPLICATE_NAME', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUPLICATE_APP_ID', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DRIVEN_MODEL_EXPIRED', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DRIVEN_MODEL_STALE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_DRIVEN_MODEL_UNKNOWN', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATION_NOT_SUPPORTED', index=10, number=10,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=163,
serialized_end=528,
)
_sym_db.RegisterEnumDescriptor(_CONVERSIONACTIONERRORENUM_CONVERSIONACTIONERROR)
_CONVERSIONACTIONERRORENUM = _descriptor.Descriptor(
name='ConversionActionErrorEnum',
full_name='google.ads.googleads.v4.errors.ConversionActionErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CONVERSIONACTIONERRORENUM_CONVERSIONACTIONERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=133,
serialized_end=528,
)
_CONVERSIONACTIONERRORENUM_CONVERSIONACTIONERROR.containing_type = _CONVERSIONACTIONERRORENUM
DESCRIPTOR.message_types_by_name['ConversionActionErrorEnum'] = _CONVERSIONACTIONERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConversionActionErrorEnum = _reflection.GeneratedProtocolMessageType('ConversionActionErrorEnum', (_message.Message,), dict(
DESCRIPTOR = _CONVERSIONACTIONERRORENUM,
__module__ = 'google.ads.googleads_v4.proto.errors.conversion_action_error_pb2'
,
__doc__ = """Container for enum describing possible conversion action errors.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.errors.ConversionActionErrorEnum)
))
_sym_db.RegisterMessage(ConversionActionErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
python
|
#!/usr/bin/env python3
import sys
import os
import time
import docker
def error(msg):
print(msg, file=sys.stderr)
exit(1)
def main():
if len(sys.argv) != 2:
error(f"{sys.argv[0]} <container_name>")
container_name = sys.argv[1]
client = docker.from_env()
try:
container = client.containers.get(container_name)
except docker.errors.NotFound:
error("No active challenge session; start a challenge!")
original_command = os.getenv("SSH_ORIGINAL_COMMAND", "/bin/bash")
ssh_tty = os.getenv("SSH_TTY") is not None
if not ssh_tty:
global print
print = lambda *args, **kwargs: None
attempts = 0
while attempts < 30:
try:
container = client.containers.get(container_name)
status = container.status
except docker.errors.NotFound:
status = "uninitialized"
if status == "running":
attempts = 0
print("\r", " " * 80, "\rConnected!")
else:
attempts += 1
print("\r", " " * 80, f"\rConnecting -- instance status: {status}", end="")
time.sleep(1)
continue
if not os.fork():
os.execve(
"/usr/bin/docker",
[
"docker",
"exec",
"-it" if ssh_tty else "-i",
"--user=hacker",
container_name,
"/bin/bash",
"-c",
original_command,
],
{
"HOME": os.environ["HOME"],
},
)
else:
_, status = os.wait()
if status == 0:
break
print()
print("\r", " " * 80, "\rConnecting", end="")
time.sleep(0.5)
else:
print("\r", " " * 80, "\rError: failed to connect!")
if __name__ == "__main__":
main()
|
python
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import json
import sys
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, TypeVar
import yaml
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import BlobClient
from flask import Flask, Response, jsonify, request
from werkzeug.http import HTTP_STATUS_CODES
ROOT_DIR = Path(__file__).parent.parent.parent.parent
sys.path.append(str(ROOT_DIR))
PYBCKG_DIR = ROOT_DIR / "libraries" / "PyBCKG"
sys.path.append(str(PYBCKG_DIR))
SPECS_DIR = ROOT_DIR / "libraries/ABEX/tests/data/specs"
from abex.optimizers.optimizer_base import OptimizerBase # noqa: E402
from abex.settings import load_config_from_path_or_name, load_resolutions # noqa: E402 # type: ignore # auto
from libraries.PyBCKG.pyBCKG.azurestorage.api import from_connection_string # noqa: E402
from libraries.PyBCKG.pyBCKG.utils import HttpRequestMethod # noqa: E402
app = Flask(__name__)
@app.route("/")
@app.route("/get-experiment-options", methods=["GET"])
def get_experiments():
connection_string = request.headers.get("storageConnectionString")
az_conn = from_connection_string(connection_string)
query = "experiments()"
# queryfilter = az_conn._queryfilter('deprecated', "False")
# queryfilter = f"?$filter=Deprecated eq '{False}'"
queryfilter = ""
expt_json = az_conn.query_table(HttpRequestMethod.GET, query, queryfilter)
experiments = expt_json["value"]
return Response(json.dumps(experiments), mimetype="application/json")
@app.route("/get-aml-runs", methods=["GET"])
def get_aml_runs():
connection_string = request.headers.get("storageConnectionString")
az_conn = from_connection_string(connection_string)
query = "azuremlruns()"
queryfilter = ""
aml_runs_json = az_conn.query_table(HttpRequestMethod.GET, query, queryfilter)
aml_runs = aml_runs_json["value"]
return Response(json.dumps(aml_runs), mimetype="application/json")
@app.route("/get-experiment-result", methods=["GET"])
def get_experiment_results():
# connection_string = request.headers.get("storageConnectionString")
experiment_name = request.args.get("experimentName")
print(f"experiment name: {experiment_name}")
# TODO: Download AML run and construct IExperimentResult object
experiment_results = [
{
"id": 1,
"description": "",
"samples": [{}],
"signals": [{}],
"type": "",
"timestamp": "",
"deprecated": "",
"name": "experiment1",
"iterations": ["1"],
"folds": ["1", "2", "3"],
"imageFolders": ["/abex-results/tutorial-intro"],
"imageNames": [
"slice1d_",
"slice2d_",
"acquisition1d_",
"train_test_",
"bo_distance",
"bo_experiment",
],
"suggestedExperiments": [
{"x": 5.0, "y": 8.4},
{"x": 5.6, "y": 8.3},
{"x": 5.3, "y": 8.5},
{"x": 5.7, "y": 8.8},
{"x": 5.4, "y": 8.2},
],
},
{
"id": 2,
"description": "",
"samples": [{}],
"signals": [{}],
"type": "",
"timestamp": "",
"deprecated": "",
"name": "experiment2",
"iterations": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
"folds": ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
"imageFolders": ["/abex-results/synthetic_3input_batch5"],
"imageNames": [
"slice1d_",
"slice2d_",
"acquisition1d_",
"train_only",
"bo_distance",
"bo_experiment",
],
"suggestedExperiments": [
{"ARA": 1.0, "ATC": 3.3, "C_on": 1},
{"ARA": 1.1, "ATC": 3.5, "C_on": 1},
{"ARA": 1.1, "ATC": 3.6, "C_on": 1},
{"ARA": 1.1, "ATC": 3.8, "C_on": 1},
{"ARA": 1.1, "ATC": 3.5, "C_on": 1},
],
},
]
# This will be the real result once we are storing these storing results in BCKG
# expt_result =[ex for ex in experiment_results if ex.name == experiment_name][0]
expt_result = experiment_results[0]
return Response(json.dumps(expt_result), mimetype="application/json")
@app.route("/get-config-options", methods=["GET"])
def get_configs():
configs = [{"id": "1", "name": "config1"}, {"id": "2", "name": "config2"}]
return Response(json.dumps(configs), mimetype="application/json")
@app.route("/get-dataset-options", methods=["GET"])
def get_datasets():
datasets = [
{
"id": "1",
"name": "dataset1",
"dateCreated": "17 Feb 2021",
"dataRecords": [
{
"SampleId": "S1",
"ObservationId": "O3",
"Arabinose": 4.67,
"C6": 345.23,
"C12": 12334.34,
"EYFP": 187982.23,
"ECFP": 23445.4,
"MRFP": 765.67,
},
{
"SampleId": "S2",
"ObservationId": "O1",
"Arabinose": 6.54,
"C6": 234.63,
"C12": 3243.98,
"EYFP": 87668.34,
"ECFP": 72726.21,
"MRFP": 7725.43,
},
],
},
{
"id": "2",
"name": "dataset2",
"dateCreated": "01 March 2021",
"dataRecords": [
{"A": "a", "B": 2, "C": "e", "D": 4},
{"A": "b", "B": 1, "C": "f", "D": 1},
],
},
]
return Response(json.dumps(datasets), mimetype="application/json")
def parse_binary_to_dict(data: bytes) -> yaml.YAMLObject:
header_dict = json.loads(data)
data_dict = header_dict.get("headers")
return data_dict
def get_file_name(data_dict: Dict[str, Any]):
config_name = data_dict.get("fileName")
return config_name
def get_config_data(data_dict: Dict[str, Any]):
config_data = data_dict.get("config")
config_json = yaml.safe_load(config_data) # type: ignore
return config_json
T = TypeVar("T")
def get_csv_data(data_dict: Dict[str, T]) -> T:
observation_data = data_dict.get("observations")
print("observation data: ", observation_data)
# TODO: PARSE CSV
observations = observation_data
assert observations is not None # since non-None return is assumed by some callers
return observations
def get_connection_str_from_binary(data: bytes):
header_dict = json.loads(data)
data_dict = header_dict.get("headers")
connection_str = data_dict.get("storageConnectionString")
return connection_str
def http_response(status_code, message, error_code=None, response=None, reason=None):
response_status_code = HTTP_STATUS_CODES.get(status_code, "Unknown error")
if response_status_code == 409 and "blob already exists" in message:
message += " If you are sure this is new data, try renaming the file."
response = jsonify(
error=response_status_code,
error_code=error_code,
reason=reason,
message=message,
)
response.status_code = status_code
return response
def upload_to_blob_storage(yaml_data: yaml.YAMLObject, connection_string: str, blob_name: str):
blob = BlobClient.from_connection_string(
conn_str=connection_string, container_name="testfiles", blob_name=blob_name
)
# upload blob
try:
blob.upload_blob(json.dumps(yaml_data), overwrite=False)
return http_response(200, "Success")
except Exception as e:
# TODO: specify the type of the exception more exactly, so we can be sure it has the fields assumed here.
response = http_response(
e.status_code, # type: ignore
e.message, # type: ignore
error_code=e.error_code, # type: ignore
response=e.response, # type: ignore
reason=e.reason, # type: ignore
)
print(response)
return response
# List the blobs in the container
# blob_list_after = container_client.list_blobs()
def insert_config_table_entry(connection_string: str, config_name: str, config_path: str):
table_conn = TableService(connection_string=connection_string)
new_entry = {
"PartitionKey": "app",
"RowKey": config_name,
"Timestamp": datetime.now(),
"ConfigName": config_name,
"PathToBlob": config_path,
}
table_conn.insert_entity("abexconfigs", new_entry)
def insert_observation_table_entry(connection_string: str, file_name: str, file_path: str):
table_conn = TableService(connection_string=connection_string)
new_entry = {
"PartitionKey": "app",
"RowKey": file_name,
"Timestamp": datetime.now(),
"FileName": file_name,
"PathToBlob": file_path,
}
table_conn.insert_entity("abexObservations", new_entry)
@app.route("/upload-config-data", methods=["GET", "POST"])
def upload_config_data():
"""
Parse data into yaml and then upload to blob storage, as well as creating table entry
"""
data = request.get_data()
data_dict = parse_binary_to_dict(data)
config_name: str = get_file_name(data_dict) # type: ignore # auto
config_data = get_config_data(data_dict) # type: ignore # auto
blob_name = config_name.split(".")[0]
blob_path = "testfiles/" + blob_name
# Upload the file to blob storage
connection_string = get_connection_str_from_binary(data)
upload_blob_response = upload_to_blob_storage(config_data, connection_string, blob_name)
if upload_blob_response.status_code != 200:
return upload_blob_response
# TODO: move this once specs folders fixed
# copy into abex specs folder
new_spec_path = SPECS_DIR / config_name
print("saving new spec to: ", new_spec_path)
with open(new_spec_path, "w+") as f_path:
yaml.dump(config_data, f_path)
assert new_spec_path.is_file()
# Add storage table entry
insert_config_table_entry(connection_string, blob_name, blob_path)
return {"filePath": config_name}
@app.route("/upload-observation-data", methods=["GET", "POST"])
def upload_observation_data():
"""
Upload observations
"""
data = request.get_data()
data_dict = parse_binary_to_dict(data)
print(f"data dict: {data_dict}")
file_name = get_file_name(data_dict) # type: ignore # auto
csv_data: yaml.YAMLObject = get_csv_data(data_dict) # type: ignore # auto
blob_name = file_name.split(".")[0]
blob_path = "testfiles/" + blob_name
# Upload the file to blob storage
connection_string = get_connection_str_from_binary(data)
upload_blob_response = upload_to_blob_storage(csv_data, connection_string, blob_name)
if upload_blob_response.status_code != 200:
return upload_blob_response
# Add storage table entry
insert_observation_table_entry(connection_string, blob_name, blob_path)
return {"filePath": file_name}
@app.route("/login/<string:connection_string>", methods=["GET"])
def login(connection_string: str):
conn = from_connection_string(connection_string)
if conn:
print("conn successful")
return {"success": True}
@app.route("/submit-new-experiment", methods=["GET", "POST"])
def submit_new_experiment():
# TODO: start new experiment track
"""
Submit a new experiment action.
1. Retrieve the config from user's config table
2. Retrieve the csv to user's csv table
x. Create ABEX Config
y. Submit the ABEX experiment
"""
data = request.get_data()
print(f"Data sent to submit-new-experiment: {data}")
data_dict = parse_binary_to_dict(data)
print(f"\ndata dict: {data_dict}")
config_path = data_dict.get("configPath") # type: ignore # auto
# config_name = config_path.split('.')[0]
print(f"config path: {config_path}")
# observation_path = data_dict.get("observationsPath")
yaml_file_path, config_dict = load_config_from_path_or_name(config_path)
print(f"yaml file path: {yaml_file_path}")
print(f"config dict: {config_dict}")
for pair_list in load_resolutions(config_path):
for _, config in pair_list:
# Decide which optimization strategy should be used
print(f"\nConfig: {config}")
optimizer = OptimizerBase.from_strategy(config, config.optimization_strategy)
optimizer.run()
return data
@app.route("/submit-iteration", methods=["GET", "POST"])
def submit_iteration_form():
# TODO: kick off new iteration
data = request.get_data()
print(data)
return data
@app.route("/submit-clone", methods=["GET", "POST"])
def submit_cloneform():
# TODO: kick off clone of previous experiment
data = request.get_data()
print(data)
return data
|
python
|
import json
from carla.driving_benchmark.results_printer import print_summary
with open('../_benchmarks_results/outputs-27/metrics.json') as json_file:
metrics_summary = json.load(json_file)
train_weathers = [1, 3, 6, 8]
test_weathers = [4, 14]
print("Printing Data\n")
print_summary(metrics_summary, test_weathers, None)
print("\nDone Printing Data")
|
python
|
import json
import os.path as osp
from glob import glob
import numpy as np
from nms_cpu import nms_cpu as nms
'''
load annotation from BDD format json files
'''
def load_annos_bdd(path, folder=True, json_name='*/*_final.json'):
print("Loading GT file {} ...".format(path))
if folder:
jsonlist = sorted(glob(osp.join(path, json_name)))
else:
jsonlist = json.load(open(path, 'r'))
assert len(jsonlist) > 0, "{} has no files".format(path)
anno = []
for idx, trackjson in enumerate(jsonlist):
if folder:
trackinfo = json.load(open(trackjson, 'r'))
else:
trackinfo = trackjson
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'orientation': []
})
for obj in trackinfo['labels']:
if not obj['attributes']['ignore']:
annotations['name'].append('Car')
annotations['truncated'].append(obj['attributes']['truncated'])
annotations['occluded'].append(obj['attributes']['occluded'])
annotations['bbox'].append(
[obj['box2d']['x1'], obj['box2d']['y1'],
obj['box2d']['x2'], obj['box2d']['y2']])
annotations['alpha'].append(obj['box3d']['alpha'])
annotations['dimensions'].append(obj['box3d']['dimension'])
annotations['location'].append(obj['box3d']['location'])
annotations['orientation'].append(obj['box3d']['orientation'])
annotations['name'] = np.array(annotations['name'])
annotations['truncated'] = np.array(annotations['truncated'])
annotations['occluded'] = np.array(annotations['occluded'])
annotations['alpha'] = np.array(annotations['alpha']).astype(
'float')
annotations['bbox'] = np.array(annotations['bbox']).astype(
'float').reshape(-1, 4)
annotations['dimensions'] = np.array(
annotations['dimensions']).reshape(-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(annotations['location']).reshape(
-1, 3)
annotations['orientation'] = np.array(
annotations['orientation']).reshape(-1)
anno.append(annotations)
return anno
def load_preds_bdd(path, use_nms=True, folder=False, json_name='*bdd_3d.json'):
print("Loading PD file {} ...".format(path))
# A flag indicate using kitti (bottom center) or gta (3D box center) format
use_kitti_location = 'kitti' in path
if folder:
jsonlists = sorted(glob(osp.join(path, json_name)))
jsonlist = [itm for ji in jsonlists for itm in json.load(open(ji, 'r'))]
else:
jsonlist = json.load(open(path, 'r'))
assert len(jsonlist) > 0, "{} has no files".format(path)
anno = []
for idx, trackinfo in enumerate(jsonlist):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'score': [],
'dimensions': [],
'location': [],
'orientation': []
})
for obj in trackinfo['prediction']:
if not obj['attributes']['ignore']:
annotations['name'].append('Car')
annotations['truncated'].append(0)
annotations['occluded'].append(0)
annotations['bbox'].append(
[obj['box2d']['x1'], obj['box2d']['y1'],
obj['box2d']['x2'], obj['box2d']['y2']])
annotations['score'].append(obj['box2d']['confidence'])
annotations['alpha'].append(obj['box3d']['alpha'])
annotations['dimensions'].append(obj['box3d']['dimension'])
annotations['location'].append(obj['box3d']['location'])
annotations['orientation'].append(obj['box3d']['orientation'])
name = np.array(annotations['name'])
truncated = np.array(annotations['truncated'])
occluded = np.array(annotations['occluded'])
box = np.array(annotations['bbox']).astype('float').reshape(-1, 4)
score = np.array(annotations['score']).astype('float').reshape(-1)
dim = np.array(annotations['dimensions']).reshape(-1, 3)[:, [2, 0, 1]]
alpha = np.array(annotations['alpha']).astype('float')
loc = np.array(annotations['location']).reshape(-1, 3)
if use_kitti_location:
# Bottom center of a 3D object, instead of 3D box center
loc[:, 1] += dim[:, 2] / 2
rot_y = np.array(annotations['orientation']).reshape(-1)
if use_nms:
# print("Using NMS to suppress number of bounding box")
keep = nms(np.hstack([box, score.reshape(-1, 1)]), 0.3)
name = name[keep]
truncated = truncated[keep]
occluded = occluded[keep]
box = box[keep]
score = score[keep]
dim = dim[keep].reshape(-1, 3)
alpha = alpha[keep]
loc = loc[keep].reshape(-1, 3)
rot_y = rot_y[keep].reshape(-1)
annotations['name'] = name
annotations['truncated'] = truncated
annotations['occluded'] = occluded
annotations['alpha'] = alpha
annotations['bbox'] = box
annotations['dimensions'] = dim
annotations['location'] = loc
annotations['orientation'] = rot_y
annotations['score'] = score
anno.append(annotations)
return anno
|
python
|
import context
import sys
import pytest
from dice import dice
from score import scoreCalculate
from game.Player import Player
from game.score_bracket import score_bracket
@pytest.fixture(scope='session')
def load_normal_player():
one = score_bracket(250,350,4)
two = score_bracket(400,500,3)
three = score_bracket(550,850,2)
four = score_bracket(900,99999,1)
p = Player('test',350,3,2)
p.passToBrackets.append(one)
p.passToBrackets.append(two)
p.passToBrackets.append(three)
p.passToBrackets.append(four)
pytest.normal_player = p
def test_judge_normal_150(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(150, 3)
assert result == False
def test_judge_normal_300(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(300, 3)
assert result == False
def test_judge_normal_350(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(350, 4)
assert result == True
def test_judge_normal_400(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(400, 3)
assert result == True
def test_judge_normal_550(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(550, 2)
assert result == True
def test_judge_normal_550_One(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(550, 1)
assert result == False
def test_judge_normal_900(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(900, 2)
assert result == True
def test_judge_normal_900(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(1100, 1)
assert result == True
def test_judge_normal_200_2(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(200, 2)
assert result == False
def test_judge_normal_300_1(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(300, 2)
assert result == False
def test_judge_normal_2000_1(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(2000, 1)
assert result == True
def test_judge_normal_2000_2(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(2000, 2)
assert result == True
def test_judge_normal_1100_1(load_normal_player):
result = pytest.normal_player.player_judge_accept_passed(1100, 1)
assert result == True
|
python
|
import os
import random
import re
import sys
DAMPING = 0.85
SAMPLES = 10000
def main():
if len(sys.argv) != 2:
sys.exit("Usage: python pagerank.py corpus")
corpus = crawl(sys.argv[1])
ranks = sample_pagerank(corpus, DAMPING, SAMPLES)
print(f"PageRank Results from Sampling (n = {SAMPLES})")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
ranks = iterate_pagerank(corpus, DAMPING)
print(f"PageRank Results from Iteration")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
def crawl(directory):
"""
Parse a directory of HTML pages and check for links to other pages.
Return a dictionary where each key is a page, and values are
a list of all other pages in the corpus that are linked to by the page.
"""
pages = dict()
# Extract all links from HTML files
for filename in os.listdir(directory):
if not filename.endswith(".html"):
continue
with open(os.path.join(directory, filename)) as f:
contents = f.read()
links = re.findall(r"<a\s+(?:[^>]*?)href=\"([^\"]*)\"", contents)
pages[filename] = set(links) - {filename}
# Only include links to other pages in the corpus
for filename in pages:
pages[filename] = set(
link for link in pages[filename]
if link in pages
)
return pages
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
pagePointers = set()
for x, y in corpus.items():
if x == page:
pagePointers = y
break
chances = dict()
if len(pagePointers) == 0:
chance = 1/len(corpus)
for x in corpus:
chances[x] = chance
return chances
chanceAmortecimento = (1-damping_factor)/(len(corpus))
chanceComum = (damping_factor/len(pagePointers))
for x in corpus:
if x in pagePointers:
chances[x] = chanceAmortecimento + chanceComum
else:
chances[x] = chanceAmortecimento
return chances
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
visitedPages = []
paginaAtual = random.choice(list(corpus.keys()))
for i in range(n):
chances = transition_model(corpus, paginaAtual, damping_factor)
pageList = []
pageChances = []
for x, y in chances.items():
pageList.append(x)
pageChances.append(y)
paginaAtual = random.choices(pageList, pageChances)[0]
visitedPages.append(paginaAtual)
oneFactor = 1/n
pageRanks = dict()
for page in corpus:
pageRanks[page] = visitedPages.count(page) * oneFactor
return pageRanks
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
prs = dict()
initialValue = 1/len(corpus)
for x in corpus:
prs[x] = initialValue
altaDiferenca = True
while altaDiferenca:
altaDiferenca = False
for x in prs:
if recursive_pagerank(corpus, x, damping_factor, prs):
altaDiferenca = True
return prs
def recursive_pagerank(corpus, page, damping_factor, prs):
somatorio = 0
for i, links in corpus.items():
if page in links:
somatorio += prs[i]/len(links)
pr = ((1-damping_factor)/len(corpus)) + damping_factor*somatorio
if -0.001 < pr - prs[page] < 0.001:
prs[page] = pr
return False
else:
prs[page] = pr
return True
if __name__ == "__main__":
main()
|
python
|
import logging
import pandas as pd
import numpy as np
import glob_utils.file.utils
logger = logging.getLogger(__name__)
################################################################################
# Save/Load csv files
################################################################################
def save_as_csv(file_path:str, data:dict)->None:
"""Save data in a csv-file
Args:
file_path (str): saving path
data (dict): The values of dict should be ndarray or list. And it will be converted to 1-D array for saving
csv-file. The length of data should be the same.
"""
if not isinstance(data, dict):
logger.error(f'Saving of {data=} in csv file - failed, data should be a dict')
return
# convert list to 1-D nparray if values of dict is list or nested list
for key, values in data.items():
if isinstance(values, list):
data[key]= np.hstack(values)
# convert to 1-D array if dim not 1
data = {k: v.flatten() for k,v in data.items()}
file_path= glob_utils.file.utils.append_extension(file_path, glob_utils.file.utils.FileExt.csv)
df = pd.DataFrame.from_dict(data)
df.to_csv(file_path, index = False, header=True,)
def load_csv(file_path:str) -> dict:
"""Load a csv-file.
All variables contained in a csv-file (except the private var) are
return in a dictionnary
Args:
file_path (str): path of csv to load
Returns:
dict: variables contained in the csv-file
"""
if not glob_utils.file.utils.check_file(file_path, glob_utils.file.utils.FileExt.csv):
return None
return pd.read_csv(file_path).to_dict('list')
|
python
|
# Generated by Django 3.1.4 on 2021-05-07 01:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TFS', '0002_auto_20210507_0656'),
]
operations = [
migrations.AlterField(
model_name='customerdetail',
name='email',
field=models.EmailField(max_length=30),
),
migrations.AlterField(
model_name='transectiondetail',
name='email',
field=models.EmailField(max_length=30),
),
]
|
python
|
#-*-coding:utf-8-*-
import sys
import time
from flask import Flask,jsonify
app = Flask(__name__)
appJSON={
"controller":{
"title":"Speed Dial Squared Up",
"rightButton":{
"title":"请求",
"click":"${()=>{$dispatch('fetchData')}}"
}
},
"lifeCircle":{
"viewDidMount":"${()=>{$dispatch('fetchData')}}",
},
"components":[
{
"id":"header",
"type":"View",
"style":{
"position":"`{{2,0},{${UI.screenW-2},200}}`",
"backgroundColor":"rgb(255,255,255)",
},
"components":[
{
"id":"nameLabel",
"type":"TextView",
"style":{
"position":"`{{6,6},{${UI.screenW-12},264}}`",
"backgroundColor":"`rgb(255,255,255)`",
"text":" 相信大家肯定有这样的烦恼,因为各式各样的需求,每次app更新版本都需要编码->测试->打包->提交app store 审核,这样花费的时间太多,又或者你会想到这不是有现成的解决方案嘛,ReactNative就可以,并且可以热更新(不同于JSPatch)。是的,但是我不想因为简单的需求就引入那么多文件,更增加了app体积。JSONRenderKit核心就只有几个.h.m文件。\n\n 同时一个非常好玩的库,不用重新command+R,只要改动JSON就可以看到新的UI效果。之所以说他好玩,因为它可以做很多彩蛋,给用户惊喜。\n\n并且我也已经正式放进项目-(”掌上理工大 “app store 可以搜索)里面使用啦,并且抽出代码,详细注释做成了Demo放在github。你也可以修改源代码并扩展新组件后放进的你自己的项目,重要的是你也可以参考JavaScript是怎样和OC进行交互的,代码里面有详细的注释。要是有兴趣,你可以自己将他打造成为一个有用的工具。",
"textColor":"rgb(132,132,132)",
"cornerRadius":4,
"borderColor":"rgb(244,244,244)",
"borderWidth":1,
"allowEdit":0,
"fontSize":14.0,
},
},
]
},
{
"id":"listbox",
"type":"ListView",
"style":{
"position":"`{{0,270},{${UI.screenW},270}}`",
"itemSize":"`{${UI.screenW/3},90}`",
"itemVMarign":0,
"itemHMarign":0,
"scrollDirection":"vertical",
},
"itemStyle":{
"separatorDirection":"right|bottom",
"separatorColor":"rgb(240,240,240)",
},
"clickItem":"${(index)=>{UI.alert('you pressed',$props.apps[index].name)}}",
"item":[
{
"id":"appname",
"type":"Label",
"style":{
"position":"`{{0,60},{${UI.screenW/3},30}}`",
"text":"标签",
"align":"center",
"fontSize":14,
"textColor":"rgb(130,130,130)",
},
},
{
"id":"appicon",
"type":"ImageView",
"style":{
"position":"{{48,30},{26,26}}",
"image":"https://static.wutnews.net/icon/calendar/2x.png?2",
"imageMode":"aspectfit",
},
},
],
}
],
"props":{
"testIndex":1,
"apps":[],
},
"actions":{
"fetchData":{
"viewId":"listbox",
"URLRequest":{
"type":"GET",
"url":"http://palmwhut.sinaapp.com/member/get_app?timestamp=0&platform=ios",
"check":"${(json)=>{return json.status == '200';}}",
"failure":"${(desc)=>{UI.alert('信息获取失败',desc)}}",
"extractData":"${(json)=>{$props.apps=$props.apps.concat(json.data);return json.data;}}",
"itemToStyleTemplate":{
"subStyles":[
{
"viewId":"appname",
"style":{
"text":"`${item.name}`",
}
},
{
"viewId":"appicon",
"style":{
"image":"`${item.icon['3x']}`",#item.icon.3x 语法格式不正确
}
},
]
},
"render":"${(data)=>{$getView('listbox').setDataArray([]).addDatas(data).reloadData()}}",
},
},
}
}
translation={
"controller":{
"title":"translation",
},
"components":[
{
"id":"searchField",
"type":"TextField",
"style":{
"position":"`{{6,6},{${UI.screenW-80},36}}`",
"textColor":"rgb(80,80,80)",
"borderStyle":"none",
"backgroundColor":"rgb(240,240,240)",
"cornerRadius":4,
"placeholder":"请输入你要翻译的单词",
}
},
{
"id":"searchButton",
"type":"Button",
"style":{
"position":"`{{${UI.screenW-68},4},{68,40}}`",
"titleColor":"rgb(40,40,40)",
"title":"search"
},
"click":"${()=>{$dispatch('transAction');}}"
},
{
"id":"resultList",
"type":"ListView",
"style":{
"position":"`{{0,60},{${UI.screenW},${UI.screenH-60}}}`",
"itemSize":"`{${UI.screenW},54}`",
"itemVMarign":0,
"scrollDirection":"vertical",
},
"itemStyle":{
"separatorDirection":"bottom",
"separatorColor":"rgb(240,240,240)",
},
"item":[
{
"id":"fixLabel",
"type":"Label",
"style":{
"position":"`{{13,8},{${UI.screenW-13},20}}`",
"fontSize":12,
"textColor":"rgb(200,200,200)",
}
},
{
"id":"descLabel",
"type":"Label",
"style":{
"position":"`{{13,30},{${UI.screenW-13},20}}`",
"fontSize":14,
"textColor":"rgb(80,80,80)",
}
},
],
"dataArray":[
{
"subStyles":[
{
"viewId":"fixLabel",
"style":{
"text":"译文",
},
},
{
"viewId":"descLabel",
"style":{
"text":"Who am I?",
},
},
]
},
{
"subStyles":[
{
"viewId":"fixLabel",
"style":{
"text":"译文",
},
},
{
"viewId":"descLabel",
"style":{
"text":"Who are you?",
},
},
]
},
]
}
],
"props":{
"transResults":[]
},
"actions":{
"transAction":{
"viewId":"resultList",
"URLRequest":{
"type":"get",
"url":"`https://dict.bing.com.cn/api/http/v2/4154AA7A1FC54ad7A84A0236AA4DCAF3/zh-cn/en-us/?q=${$getView('searchField').text}&format=application/json`",
"check":"${(json)=>{return true}}", #进行检查 必须返回一个布尔值
"failure":"${(desc)=>{UI.alert('信息获取失败',desc)}}",
# "success":"${(data)=>{$props.apps=$props.apps.concat(data);}}", #将结果进行保存
"extractData":"${(json)=>{return json.LEX.C_DEF[1].SEN}}",
"willRender":"",
"willSend":"${()=>{UI.showIndicator('gray');}}",
"itemToStyleTemplate":{
"subStyles":[
{
"viewId":"descLabel",
"style":{
"text":"`${item.D}`",#item.icon.3x 语法格式不正确
}
},
{
"viewId":"fixLabel",
"style":{
"text":"译文",#item.icon.3x 语法格式不正确
}
},
]
},
"render":"${(renderData)=>{$getView('resultList').setDataArray([]).addDatas(renderData).reloadData()}}",
"didRender":"${()=>{UI.hideIndicatorDelay(0.618)}}",
},
},
}
}
todo={
"controller":{
"title":"todo",
},
"components":[
{
"id":"addField",
"type":"TextField",
"style":{
"position":"`{{6,6},{${UI.screenW-80},36}}`",
"textColor":"rgb(80,80,80)",
"borderStyle":"none",
"backgroundColor":"rgb(240,240,240)",
"cornerRadius":4,
"placeholder":"请输入项目",
}
},
{
"id":"addButton",
"type":"Button",
"style":{
"position":"`{{${UI.screenW-68},4},{68,40}}`",
"titleColor":"rgb(40,40,40)",
"title":"add"
},
"click":"${()=>{$dispatch('addToDo');}}"
},
{
"id":"todoList",
"type":"ListView",
"style":{
"position":"`{{0,60},{${UI.screenW},${UI.screenH-60-64}}}`",
"itemSize":"`{${UI.screenW},60}`",
"itemVMarign":0,
"scrollDirection":"vertical",
},
"itemStyle":{
"separatorDirection":"bottom",
"separatorColor":"rgb(240,240,240)",
"itemHighlightColor":"rgb(220,220,220)",
},
"item":[
{
"id":"nameLabel",
"type":"Label",
"style":{
"position":"`{{13,8},{${UI.screenW/2-13},20}}`",
"fontSize":18,
"textColor":"rgb(20,20,20)",
"adjustTextFont":1,
}
},
{
"id":"deleteLabel",
"type":"Label",
"style":{
"position":"`{{13,30},{${UI.screenW/2-13},20}}`",
"fontSize":14,
"textColor":"rgb(90,90,90)",
"adjustTextFont":1,
}
},
],
"clickItem":"${(index)=>{$props.selectedIndex=index;$dispatch('removeToDo')}}",
"dataArray":[
{
"subStyles":[
{
"viewId":"nameLabel",
"style":{
"text":"apple",
},
},
{
"viewId":"deleteLabel",
"style":{
"text":"tap to delete",
},
},
]
},
{
"subStyles":[
{
"viewId":"nameLabel",
"style":{
"text":"orange",
},
},
{
"viewId":"deleteLabel",
"style":{
"text":"tap to delete",
},
},
]
},
]
}],
"props":{
"selectedIndex":-1,
"toDoItemStyle":{
"subStyles":[
{
"viewId":"nameLabel",
"style":{
"text":"`${$getView('addField').text}`",
},
},
{
"viewId":"deleteLabel",
"style":{
"text":"tap to delete",
},
},
]
},
},
"actions":{
"removeToDo":{
"viewId":"todoList",
"valKey":"dataArray",
"reduce":"${(oldVal)=>{return oldVal.removeAtIndex($props.selectedIndex);}}",
},
"addToDo":{
"viewId":"todoList",
"valKey":"dataArray",
"reduce":"${(oldVal)=>{oldVal.push($props.getCopy('toDoItemStyle'));return oldVal;}}",
}
}
}
newApi = {
"controller":{
"title":"swipe the banner",
},
"components":[
{
"id":"imageLists",
"type":"ListView",
"style":{
"position":"`{{0,0},{${UI.screenW},${220}}}`",
"itemSize":"`{${UI.screenW},220}`",
"itemHMarign":0,
"itemVMarign":0,
"scrollDirection":"horizontal",
"backgroundColor":"rgb(255,255,255)",
"splitPage":1,
"infiniteScroll":1,
"showHBar":0
},
"clickItem":"${(index)=>{UI.log(index)}}",
"item":[
{
"id":"image",
"type":"ImageView",
"style":{
"position":"`{{0,0},{${UI.screenW},220}}`",
}
},
],
"dataArray":[
{
"subStyles":[
{
"viewId":"image",
"style":{
"image":"`https://unsplash.it/${UI.screenW*2}/440/?image=210`",
},
},
]
},
{
"subStyles":[
{
"viewId":"image",
"style":{
"image":"`https://unsplash.it/${UI.screenW*2}/440/?image=211`",
},
},
]
},
{
"subStyles":[
{
"viewId":"image",
"style":{
"image":"`https://unsplash.it/${UI.screenW*2}/440/?image=223`",
},
},
]
},
]
},
{
"id":"UIToolLists",
"type":"ListView",
"style":{
"position":"`{{0,220},{${UI.screenW},${UI.screenH-220-64}}}`",
"itemSize":"`{${UI.screenW},50}`",
"itemVMarign":0,
"scrollDirection":"vertical",
"backgroundColor":"rgb(255,255,255)",
},
"clickItem":"${(index)=>{$props.runFuncs(index)}}",
"itemStyle":{
"separatorDirection":"bottom",
"separatorColor":"rgb(220,220,220)",
},
"item":[
{
"id":"toolLabel",
"type":"Label",
"style":{
"position":"`{{14,8},{${UI.screenW-14},40}}`",
"fontSize":18,
"textColor":"rgb(20,20,20)",
}
},
],
"dataArray":[
{
"subStyles":[
{
"viewId":"toolLabel",
"style":{
"text":"showActionSheet",
},
},
]
},
{
"subStyles":[
{
"viewId":"toolLabel",
"style":{
"text":"showActionAlert",
},
},
]
},
{
"subStyles":[
{
"viewId":"toolLabel",
"style":{
"text":"alertDeviceScreenSize",
},
},
]
},
{
"subStyles":[
{
"viewId":"toolLabel",
"style":{
"text":"showIndicator",
},
},
]
},
{
"subStyles":[
{
"viewId":"toolLabel",
"style":{
"text":"hideIndicatorDelay",
},
},
]
},
]
},
],
"props":{
"$runFunctions":[
"${()=>{UI.showSheetView('sheetView标题','点击非标题列表按钮,index=-1',['apple','orange','litchi'],(index)=>{UI.log('你点击了'+index)});}}",
"${()=>{UI.alertTitles('alert标题','点击非标题列表按钮,index=-1',['banana','grape'],(index)=>{UI.log('你点击了'+index)});}}",
"${()=>{let info = `设备宽:${UI.screenW} \n 设备高:${UI.screenH}`;UI.alert('设备屏幕详情',info);}}",
"${()=>{UI.showIndicator();}}",
"${()=>{UI.hideIndicatorDelay(0);}}"
]
}
}
@app.route('/')
def hello_world():
return 'JSON render Hello World!'
@app.route('/appjson', methods=['GET'])
def get_appjson():
return jsonify(appJSON)
@app.route('/trans', methods=['GET'])
def get_translation():
return jsonify(translation)
@app.route('/todo', methods=['GET'])
def get_todo():
return jsonify(todo)
@app.route('/newApi', methods=['GET'])
def get_newApi():
return jsonify(newApi)
if __name__ == '__main__':
app.run()
|
python
|
import tensorflow as tf
import numpy as np
interpreter = tf.lite.Interpreter('models/mobilefacenet.tflite')
interpreter.allocate_tensors()
def set_input_tensor_face(input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = np.float32((input-127.5)/127.5)
def get_embeddings(input):
set_input_tensor_face(input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
return output
|
python
|
from typing import Sequence
from torch import nn
class Model(nn.Module):
def __init__(
self,
n_features: int,
n_classes: int,
units: Sequence[int] = [512],
dropout: float = 0.5,
):
super().__init__()
sizes = [n_features] + list(units)
self.hidden = nn.Sequential(
*(
nn.Sequential(
nn.Linear(sizes[i], sizes[i + 1]), nn.ReLU(), nn.Dropout(dropout)
)
for i in range(len(units))
)
)
self.final = nn.Linear(units[-1], n_classes)
def forward(self, x):
return self.final(self.hidden(x))
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2019-02-12 23:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
def forwards_func(apps, schema_editor):
Menu = apps.get_model("das", "Menu")
db_alias = schema_editor.connection.alias
Menu.objects.using(db_alias).bulk_create([
Menu(menu_name=u'默认右侧边栏', menu_type="tool")
])
def reverse_func(apps, schema_editor):
Menu = apps.get_model("das", "Menu")
db_alias = schema_editor.connection.alias
Menu.objects.using(db_alias).filter(menu_name=u'默认右侧边栏').delete()
dependencies = [
('das', '0033_auto_20190212_2241'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
python
|
"""Unit tests for the `cf_predict` package."""
|
python
|
import torch
import argparse
from Transformer import Transformer
parser = argparse.ArgumentParser(description='Test transformer')
parser.add_argument('--src_len', '-s', type=int, default=5, help='the source sequence length')
parser.add_argument('--batch_size', '-bs', type=int, default=2, help='batch size')
parser.add_argument('--dmodel', '-d', type=int, default=512, help='embedding dimension')
parser.add_argument('--tgt_len', '-t', type=int, default=6, help='the target sequence length')
parser.add_argument('--num_head', '-nh', type=int, default=8, help='the number of head')
parser.add_argument('--num_encoder_layers', '-ne', type=int, default=6,
help='the number of encoder layers')
parser.add_argument('--num_decoder_layers', '-nd', type=int, default=6,
help='the number of decoder layers')
parser.add_argument('--dim_feedforward', '-df', type=int, default=2048,
help='the dimension of the feedforward network model')
args = parser.parse_args()
if __name__ == '__main__':
print(args)
transformer = Transformer(d_model=args.dmodel,
nhead=args.num_head,
num_encoder_layers=args.num_encoder_layers,
num_decoder_layers=args.num_decoder_layers,
dim_feedforward=args.dim_feedforward
)
src = torch.rand((args.src_len, args.batch_size, args.dmodel)) # shape: [src_len, batch_size, embed_dim]
# src_mask = transformer.generate_square_subsequent_mask(args.src_len)
src_key_padding_mask = torch.tensor([[True, True, True, False, False],
[True, True, True, True, False]]) # shape: [batch_size, src_len]
tgt = torch.rand((args.tgt_len, args.batch_size, args.dmodel)) # shape: [tgt_len, batch_size, embed_dim]
tgt_mask = transformer.generate_square_subsequent_mask(args.tgt_len)
tgt_key_padding_mask = torch.tensor([[True, True, True, False, False, False],
[True, True, True, True, False, False]]) # shape: [batch_size, tgt_len]
out = transformer(src=src,
tgt=tgt,
tgt_mask=tgt_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=src_key_padding_mask
)
print(out.shape)
|
python
|
import FWCore.ParameterSet.Config as cms
#
#
#------------------
#Preshower clustering:
#------------------
from RecoEcal.EgammaClusterProducers.multi5x5SuperClustersWithPreshower_cfi import *
# producer for endcap SuperClusters including preshower energy
from RecoEcal.EgammaClusterProducers.correctedMulti5x5SuperClustersWithPreshower_cfi import *
# producer for preshower cluster shapes
from RecoEcal.EgammaClusterProducers.multi5x5PreshowerClusterShape_cfi import *
# create sequence for preshower clustering
multi5x5PreshowerClusteringSequence = cms.Sequence(correctedMulti5x5SuperClustersWithPreshower*
multi5x5PreshowerClusterShape*
uncleanedOnlyMulti5x5SuperClustersWithPreshower*
uncleanedOnlyCorrectedMulti5x5SuperClustersWithPreshower)
|
python
|
# Process PET from worldclim temperature
# Peter Uhe
# 25/3/2019
#
# This script uses python3, set up using conda environment gdal_env2
import os,sys,glob
import gdal
import numpy as np
import datetime,calendar
import netCDF4
sys.path.append('/home/pu17449/gitsrc/PyETo')
import pyeto
inpath = '/home/pu17449/data2/worldclim_precip/'
outfile = '/home/pu17449/data2/worldclim_precip/pet_yearmean_v3.nc'
#f_tiffout = '/home/pu17449/data2/worldclim_precip/pet_yearmean_v2.tif'
# Construct lat and lon arrays (centrepoints)
step = 0.25/30.
lons = np.arange(-180,180,step)+step/2.
lats = np.arange(90,-90,-1*step)-step/2.
nlats = len(lats)
nlons = len(lons)
lats_rad = pyeto.deg2rad(lats)
#lat2D = np.repeat(lats_rad[:,np.newaxis],len(lons),axis=1)
# Data array to store time/lon slices of PET
pet_timeslice = np.zeros([12,nlons],dtype=np.float32)
pet = np.zeros([nlons],dtype=np.float32) #yearly mean
###############################################################################
# Write output file for modsim:
# Follows format of variables needed in 'domain' file for metsim e.g. '/home/bridge/pu17449/src/MetSim/metsim/data/domain.nc'
with netCDF4.Dataset(outfile,'w') as f_out:
f_out.createDimension('lat',nlats)
f_out.createVariable('lat',np.float,('lat'))
f_out.variables['lat'].standard_name = "latitude"
f_out.variables['lat'].long_name = "latitude"
f_out.variables['lat'].units = "degrees_north"
f_out.variables['lat'].axis = "Y"
f_out.variables['lat'][:] = lats
f_out.createDimension('lon',nlons)
f_out.createVariable('lon',np.float,('lon'))
f_out.variables['lon'].standard_name = "longitude"
f_out.variables['lon'].long_name = "longitude"
f_out.variables['lon'].units = "degrees_east"
f_out.variables['lon'].axis = "X"
f_out.variables['lon'][:] = lons
#f_out.createDimension('time',None)
#f_out.createVariable('time',np.float,('time'))
#f_out.variables['time'].standard_name = "time"
#f_out.variables['time'].long_name = "time"
#f_out.variables['time'].units = "days since 2000-01-01"
#f_out.variables['time'].axis = "T"
#f_out.variables['time'][:] = [15]
f_out.createVariable('pet',np.float32,('lat','lon'))
f_out.variables['pet'].long_name = 'Yearly average Potential Evapotranspiration'
f_out.variables['pet'].comment = 'Calculated by Hargraeves Method, using WorldClim2 monthly data and pyeto (https://pyeto.readthedocs.io)'
f_out.variables['pet'].units = 'mm/day'
# Try writing out to geotiff (currently not working)
#gtiffDriver = gdal.GetDriverByName('GTiff')
#f_template = os.path.join(inpath,'wc2.0_30s_tavg','wc2.0_30s_tavg_01.tif')
#f1 = gdal.Open(f_template)
#f2 = gtiffDriver.CreateCopy(f_tiffout,f1,strict=0,options=["COMPRESS=DEFLATE"])
#rasterband = f2.GetRasterBand(1)
# Monthdays: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
monthdays = [31, 28.25, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for i,lat in enumerate(lats_rad):
print(i)
#pet = np.zeros([12,nlons],dtype=np.float32)
for month in range(1,13):
# Test for Jan (this is a climatology project, just use date of year 1999)
# choose (approx) middle of month
tmp,day_in_month = calendar.monthrange(1999,month)
day_of_year = datetime.date(1999,month,(day_in_month+1)//2).timetuple().tm_yday
sol_dec = pyeto.sol_dec(day_of_year) # Solar declination
sha = pyeto.sunset_hour_angle(lat, sol_dec)
ird = pyeto.inv_rel_dist_earth_sun(day_of_year)
et_rad = pyeto.et_rad(lat, sol_dec, sha, ird) # Extraterrestrial radiation
# Load data files
f_tavg = os.path.join(inpath,'wc2.0_30s_tavg','wc2.0_30s_tavg_'+str(month).zfill(2)+'.tif')
#print('Loading tavg:',f_tavg)
f=gdal.Open(f_tavg)
rasterband = f.GetRasterBand(1)
tavg = rasterband.ReadAsArray(yoff=i,win_ysize=1)
#print(tavg.shape)
#f.close()
f_tmax = os.path.join(inpath,'wc2.0_30s_tmax','wc2.0_30s_tmax_'+str(month).zfill(2)+'.tif')
#print('Loading tmax:',f_tmax)
f=gdal.Open(f_tmax)
rasterband = f.GetRasterBand(1)
tmax = rasterband.ReadAsArray(yoff=i,win_ysize=1)
#f.close()
f_tmin = os.path.join(inpath,'wc2.0_30s_tmin','wc2.0_30s_tmin_'+str(month).zfill(2)+'.tif')
#print('Loading tmin:',f_tmax)
f=gdal.Open(f_tmin)
rasterband = f.GetRasterBand(1)
tmin = rasterband.ReadAsArray(yoff=i,win_ysize=1)
#f.close()
# pyeto gives mm/day, multiply by monthdays to get mm/month
pet_timeslice[month-1,:] = pyeto.hargreaves(tmin,tmax,tavg, et_rad)*monthdays[month-1]
# Sum over months to get mm/year, and write out
pet = pet_timeslice.sum(0,keepdims=True)
f_out.variables['pet'][i,:] = pet[0,:]
#print(rasterband)
# Also write data out to tiff format
#rasterband.WriteArray(pet,yoff=i)
# Close tif datasets
#f1 = None
#f2 = None
print('done')
|
python
|
"""
Created on Wed Nov 7 13:06:08 2018
@author: david
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
#from astropy.convolution import Gaussian2DKernel,convolve
#import scipy.linalg as linalg
#from mpl_toolkits.mplot3d import Axes3D
DATA=pd.read_csv("SIGCOL_RUN.txt",delimiter=" ",header=None)
x=DATA[2].values
y=DATA[3].values
names=DATA[0].values
#plt.scatter(x,y)
ii=x>np.amin(x)
x=x[ii]
y=y[ii]
n_bins=15
tam_x=np.amax(x)-np.amin(x)
l_bin=tam_x/n_bins
n_x=np.zeros(n_bins)
n_y=np.zeros(n_bins)
for i in range(n_bins):
n_x[i]=np.amin(x)+i*l_bin
ii=np.logical_and(x>=n_x[i],x<n_x[i]+l_bin)
n_y[i]=0
if(len(y[ii])!=0):
n_y[i]=np.mean(y[ii])
deg=5
poly = PolynomialFeatures(degree=deg)
X = poly.fit_transform(n_x.reshape(-1,1))
pred_x=poly.fit_transform(x.reshape(-1,1))
act_y=poly.fit_transform(y.reshape(-1,1))
Y=poly.fit_transform(n_y.reshape(-1,1))
clf = linear_model.LinearRegression()
clf.fit(X,Y)
pred_y=clf.predict(pred_x)
TH=2
rev=1
ii=TH*pred_y[:,1]<=act_y[:,1]
jj=np.logical_not(ii)
indexes=np.where(ii)[0]
#variabs=names[ii]
fileout=open("VAR_"+str(deg)+".txt","w")
for i in indexes:
fileout.write(names[i]+" "+str(x[i])+" " +str(y[i]))
fileout.write("\n")
fileout.close()
fileout=open("VAR_"+str(deg)+"_names.txt","w")
for i in indexes:
fileout.write(names[i])
fileout.write("\n")
fileout.close()
plt.figure(figsize=[10,10])
plt.title("Numero de candidatos = "+str(len(x[ii])))
plt.scatter(x[ii],y[ii],c="g",alpha=0.2)
print(len(x[ii]), " estrellas candidatas.")
plt.scatter(x[jj],y[jj],c="r",alpha=0.2)
t="Y = "
for i in range (len(clf.coef_[rev])):
t=t+str(round(clf.coef_[rev][i]*10**1,2))+"x $10^{-10}$ "+"$X^{"+str(i+1)+"}$ + "
t=t+str(round(clf.intercept_[rev]*10**1,2))+"x $10^{-10}$ "
plt.title(t,fontsize=int(65/len(clf.coef_[rev])))
plt.scatter(x,TH*pred_y[:,rev],s=2)
plt.savefig("FIT_"+str(deg)+"sigc.png")
#plt.scatter(x,y)
|
python
|
import autodisc as ad
from autodisc.gui.gui import DictTableGUI, BaseFrame
import collections
try:
import tkinter as tk
except:
import Tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
class StatisticTableGUI(DictTableGUI):
@staticmethod
def default_gui_config():
def_config = DictTableGUI.default_gui_config()
def_config.dialog.title = 'Statistics'
def_config.statistics = ad.Config()
return def_config
def display_exploration_data(self, exploration_data, run_id):
self.display_data(exploration_data.runs[run_id].statistics)
def display_data(self, statistics):
if not isinstance(statistics, collections.abc.Sequence):
self.statistics = [statistics]
else:
self.statistics = statistics
# get the data from the stats that should be displayed
data = collections.OrderedDict()
for stat_config in self.gui_config.statistics:
stat_name = stat_config['name']
disp_name = stat_config.get('disp_name', stat_name)
stats_idx = stat_config.get('stats_idx', 0)
format_str = stat_config.get('format', None)
stat_data = self.statistics[stats_idx][stat_name]
if format_str:
stat_data = format_str.format(stat_data)
data[disp_name] = stat_data
super().display_data(data)
class StatisticLineGUI(BaseFrame):
@staticmethod
def default_gui_config():
default_config = BaseFrame.default_gui_config()
default_config.figure = {'figsize':(6,5), 'dpi':100}
default_config.legend = {'loc': 'upper right'}
default_config.statistics = ad.Config()
return default_config
def __init__(self, master=None, gui_config=None, **kwargs):
super().__init__(master=master, gui_config=gui_config, **kwargs)
self.create_gui()
def create_gui(self):
# make the treeview in the frame resizable
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.figure = plt.Figure(**self.gui_config['figure'])
self.ax = self.figure.add_subplot(111)
self.legend = None
self.figure_canvas = FigureCanvasTkAgg(self.figure, master=self)
self.figure_canvas.get_tk_widget().grid(sticky=tk.NSEW)
self.master.update_idletasks()
def display_exploration_data(self, exploration_data, run_id):
self.display_data(exploration_data.runs[run_id].statistics)
def display_data(self, statistics):
if not isinstance(statistics, collections.abc.Sequence):
self.statistics = [statistics]
else:
self.statistics = statistics
self.ax.clear()
for stat_config in self.gui_config.statistics:
stat_name = stat_config['name']
disp_name = stat_config.get('disp_name', stat_name)
stats_idx = stat_config.get('stats_idx', 0)
stat_data = self.statistics[stats_idx][stat_name]
self.ax.plot(stat_data, label=disp_name)
self.legend = self.ax.legend(**self.gui_config.legend)
self.figure_canvas.draw_idle()
class StatisticBarGUI(BaseFrame):
@staticmethod
def default_gui_config():
default_config = BaseFrame.default_gui_config()
default_config.figure = {'figsize':(6,5), 'dpi':100}
default_config.legend = {'loc': 'upper right'}
default_config.statistics = ad.Config()
return default_config
def __init__(self, master=None, gui_config=None, **kwargs):
super().__init__(master=master, gui_config=gui_config, **kwargs)
self.create_gui()
def create_gui(self):
# make the treeview in the frame resizable
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.figure = plt.Figure(**self.gui_config['figure'])
self.ax = self.figure.add_subplot(111)
self.legend = None
self.figure_canvas = FigureCanvasTkAgg(self.figure, master=self)
self.figure_canvas.get_tk_widget().grid(sticky=tk.NSEW)
self.master.update_idletasks()
def display_exploration_data(self, exploration_data, run_id):
self.display_data(exploration_data.runs[run_id].statistics)
def display_data(self, statistics):
if not isinstance(statistics, collections.abc.Sequence):
self.statistics = [statistics]
else:
self.statistics = statistics
self.ax.clear()
stat_names = []
for stat_config_idx in range(len(self.gui_config.statistics)):
stat_config = self.gui_config.statistics[stat_config_idx]
stat_name = stat_config['name']
disp_name = stat_config.get('disp_name', stat_name)
stats_idx = stat_config.get('stats_idx', 0)
stat_data = self.statistics[stats_idx][stat_name]
stat_names.append(disp_name)
self.ax.bar(stat_config_idx, stat_data, align='center')
self.ax.set_xticks(range(len(stat_names)))
self.ax.set_xticklabels(stat_names)
self.figure_canvas.draw_idle()
|
python
|
from gf.models.account import Account
from time import time
from uuid import uuid4
import multiprocessing as mp
import gf.lib.db.db_utils as db
#db.get_row_by_id("users", uuid4())
#db.get_row_by_id("shit", uuid4())
Account(id=uuid4(), username= "hello", name="John", email="shit").commit()
quit()
init_id = uuid4()
acc = Account(id=init_id, name='Matt Morse',
username='matt',
email='[email protected]',
login_count=80,
email_authorized=True,
grade='14',
city_name='Buffalo',
bio_text='THE MOST BADASS STEM STUDENT IN TOWN',
tags=[uuid4(), uuid4(), uuid4(), uuid4(), uuid4()],
subscribed=[uuid4(), uuid4()],
subscribers=[uuid4(), uuid4()],
projects=[uuid4()],
following_projects=[uuid4(),uuid4()],
trophies=[uuid4(),uuid4(),uuid4()])
acc.commit()
quit()
n = 100
avg_write = 0.0
avg_read = 0.0
for i in range(n):
start = time()
acc.commit()
avg_write += time() - start
start = time()
Account.get_by_lookup('matt')
avg_read += time() - start
avg_read /= float(n)
avg_write /= float(n)
print "serial test:"
print "Average read speed:", avg_read
print "Average write speed:", avg_write
def threaded_test(filler):
print 'starting'
n = 100
avg_write = 0.0
avg_read = 0.0
for i in range(n):
print 'commiting'
start = time()
acc.commit()
avg_write += time() - start
print 'commited!'
print 'looking up'
start = time()
Account.get_by_lookup('matt')
avg_read += time() - start
print 'looked up!'
avg_read /= float(n)
avg_write /= float(n)
return avg_read, avg_write
num_threads = 4
pool = mp.Pool(num_threads)
results = pool.map(threaded_test, [1,1,1,1])
avg_write = 0.0
avg_read = 0.0
for read, write in results:
avg_write += write
avg_read += read
avg_read /= float(num_threads)
avg_write /= float(num_threads)
print "parallel test:"
print "Average read speed:", avg_read
print "Average write speed:", avg_write
|
python
|
from collections import deque
import json
from kafka import KafkaProducer, KafkaConsumer
from anonymizer import Anonymizer
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", value_deserializer=json.loads)
consumer.subscribe(["unanon"])
producer = KafkaProducer(bootstrap_servers="localhost:9092")
def anonymize(consumer):
cache = deque()
for msg in consumer:
print(msg)
cache.append(msg)
if len(cache) > CACHE_SIZE:
output = anonymizer.process([{**msg.value} for msg in cache])
if isinstance(output, list):
for _ in output:
yield _
else:
yield output
CACHE_SIZE = 5
anonymizer = Anonymizer({
"drop": {"keys": ["something-unimportant"]},
"mean": {"keys": ["some-number"]}
})
for anon_msg in anonymize(consumer):
producer.send("anon", anon_msg.value if hasattr(anon_msg, "value") else str(anon_msg).encode())
|
python
|
import click
import pprint
@click.command()
@click.argument('type', default='all')
@click.option('--check/--no-check', help="Just check (no changes)", default=False)
@click.option('--update/--no-update', help="Update if exists", default=False)
@click.option('--push/--no-push', help="Push update (when specifed with --update)", default=False)
@click.option('--diff/--no-diff', help="Show Diffs", default=False)
@click.option('--file', '-f', help="output File name", required=True)
# @click.option('--type',
# help="Device type [vedges, controllers]",
# type=click.Choice(['vedges', 'controllers']))
@click.pass_context
def policy(ctx, type, file, update, check, diff, push):
"""
Import policy from file
"""
vmanage_session = ctx.obj
pp = pprint.PrettyPrinter(indent=2)
click.echo(f"{'Checking' if check else 'Importing'} policy from {file}")
result = vmanage_session.import_policy_from_file(file, update=update, check_mode=check, push=push)
print(f"Policy List Updates: {len(result['policy_list_updates'])}")
if diff:
for diff_item in result['policy_list_updates']:
click.echo(f"{diff_item['name']}:")
pp.pprint(diff_item['diff'])
print(f"Policy Definition Updates: {len(result['policy_definition_updates'])}")
if diff:
for diff_item in result['policy_definition_updates']:
click.echo(f"{diff_item['name']}:")
pp.pprint(diff_item['diff'])
print(f"Central Policy Updates: {len(result['central_policy_updates'])}")
if diff:
for diff_item in result['central_policy_updates']:
click.echo(f"{diff_item['name']}:")
pp.pprint(diff_item['diff'])
print(f"Local Policy Updates: {len(result['local_policy_updates'])}")
if diff:
for diff_item in result['local_policy_updates']:
click.echo(f"{diff_item['name']}:")
pp.pprint(diff_item['diff'])
|
python
|
import pytest
import yaml
from utils import utils
class TestConvertReiwaToYear:
@pytest.fixture(scope="function")
def data_fixture(self):
return ((1, 2019), (2, 2020), (3, 2021))
def test_convert(self, data_fixture):
for reiwa, actual in data_fixture:
assert utils.convert_reiwa_to_year(reiwa) == actual
class TestLoadSettings:
@pytest.fixture(scope="function")
def yml_data(self, tmp_path):
_path = tmp_path / "settings_test.yml"
_expected = {"line": {"token": "test"}}
with _path.open(mode="w") as f:
yaml.safe_dump(_expected, f)
return _path, _expected
def test_load_settings(self, yml_data):
path, expected = yml_data
actual = utils.load_settings(str(path))
assert actual == expected
|
python
|
"""
FactSet ESG API
FactSet ESG (powered by FactSet Truvalue Labs) applies machine learning to uncover risks and opportunities from companies' Environmental, Social and Governance (ESG) behavior, which are aggregated and categorized into continuously updated, material ESG scores. The service focuses on company ESG behavior from external sources and includes both positive and negative events that go beyond traditional sources of ESG risk data.<p> FactSet ESG extracts, analyzes, and generates scores from millions of documents each month collected from more than 100,000 data sources in over 13 languages. Sources include news, trade journals, NGOs, watchdog groups, trade blogs, industry reports and social media. Products deliver investable insights by revealing value and risk factors from unstructured data at the speed of current events.</p> # noqa: E501
The version of the OpenAPI document: 1.3.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetESG.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetESG.exceptions import ApiAttributeError
class SdgCategories(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': ([str],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""SdgCategories - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([str]): The SDG Categories specified for the Truvalue Scores being requested. To specify select categories returned in the response, provide a comma-separated list of the scores using the description below. |**SDG Category Input**|**Description**| |---|---| |**IMPACT**|**Impact** - The aggregate SDG score for each company is simply named \"Impact.\" The SDG Impact Score is produced using a weighted average of individual category scores, where the weight utilized is the category score volume.| |**IMPACTARTICLES**|**Impact Articles** - The All Goals Category Volume measures the total number of times any of the 16 goals received a score over a trailing twelve-month (TTM) period of time. **( Data wil be returned only for `TTM_VOLUME` score type)** | |**GOAL1NOPOVERTY**|**No Poverty** - Goal 1 focuses on poverty in all its manifestations and also aims to ensure social protection for the poor and vulnerable, increase access to basic services and support people harmed by climate related extreme events and other economic, social and environmental shocks and disasters. <p>**Company-Level Issue Examples** *- Financial services access and affordability, Underserved groups,Unethical pricing.*| |**GOAL2ZEROHUNGER**|**Zero Hunger** - Goal 2 aims to end hunger and all forms of malnutrition and commits to universal access to safe, nutritious and sufficient food at all times of the year, particularly for the poor and people in vulnerable situations (e.g., infants). This will require sustainable food production systems and resilient agricultural practices, equal access to land, technology, and markets and international cooperation on investments in infrastructure and technology to boost agricultural productivity. <p>**Company-Level Issue Examples** *- Sustainable agricultural practices, Agricultural ingredients sourcing and certifications, Food safety concerns, Animal welfare.*| |**GOAL3GOODHEALTHANDWELLBEING**|**Good Health and Wellbeing** - Goal 3 seeks to ensure health and wellbeing for all, at every stage of life and addresses all major health priorities, including reproductive, maternal, and child health; communicable, noncommunicable and environmental diseases; universal health coverage; and access for all to safe, effective, quality, and affordable medicines and vaccines.<p> **Company-Level Issue Examples** *- Harmful Chemicals in Products, Product Recalls, Healthcare Access and Affordability.*| |**GOAL4QUALITYEDUCATION**|**Quality Education** - Goal 4 addresses access and affordability of education and skills development starting from childhood development and continuing through adulthood, including for girls, persons with disabilities, indigenous peoples and children in vulnerable situations, Improvements to the access to education it hopes to ensure that all youth and a substantial proportion of adults achieve literacy and numeracy. It also seeks to build and upgrade education facilities and to increase the supply of qualified teachers.<p>**Company-Level Issue Examples** *- Mentorship and training, Education company quality, Education company ethics.*| |**GOAL5GENDEREQUALITY**|**Gender Equality** - Goal 5 emphasizes eliminating discrimination and violence against women and girls. The Goal emphasizes ensuring women's full and effective participation and equal opportunities for leadership at all levels of decision-making in political, economic and public life. Access to sexual and reproductive health and reproductive rights and access to economic resources (e.g., land ownership, financial services) are also emphasized.<p>**Company-Level Issue Examples** *- Board Diversity, Gender Discrimination, Sexual Harassment.*| |**GOAL6CLEANWATERANDSANITATION**|**Clean Water and Sanitation** - Goal 6 not only addresses issues relating to drinking water, sanitation and hygiene, but also the quality and sustainability of water resources worldwide. It strives to achieve universal and equitable access to safe and affordable drinking water for all. It also focuses on adequate and equitable sanitation and hygiene and reducing pollution, minimizing release of hazardous chemicals and materials, and protection of water-related ecosystems. It also highlights increasing water-use efficiency across all sectors, recycling, and ensuring sustainable withdrawals and supply of freshwater.<p>**Company-Level Issue Examples** *- Water Pollution, Water Recycling and Stewardship, Water Infrastructure.*| |**GOAL7AFFORDABLEANDCLEANENERGY**|**Goal 7 Affordable and Clean Energy** - Goal 7 seeks to ensure access to affordable, reliable, and modern energy services for all. It aims to increase renewable energy in the global energy mix and improve energy efficiency significantly. It also calls for more access to clean energy research, technology, and infrastructure for renewable energy, energy efficiency, and advanced and cleaner fossil-fuel technology, and promoting investment in energy infrastructure and clean energy technology.<p>**Company-Level Issue Examples** *- Green Buildings, Renewable Energy, Unethical Utility Pricing.*| |**GOAL8DECENTWORKANDECONOMICGROWTH**|**Decent Work and Economic Growth** - Goal 8 focuses on economic productivity and supports policies for entrepreneurship, creativity and innovation that assist micro, small, and medium-sized enterprises. The Goal also seeks to reduce unemployment, the proportion of youth not working or in school, child labor, and forced labor. Also covered are the protection of labor rights, migrant workers, safe and secure working environments, sustainable tourism, and increasing the capacity of domestic financial institutions in regards to access to banking, insurance, and financial services.<p>**Company-Level Issue Examples** *- Job Creation, Labor Exploitation, Employee Health and Safety, Workplace Turnover, Supplier Transparency.*| |**GOAL9INDUSTRYINNOVATIONANDINFRASTRUCTURE**|**Industry Innovation and Infrastructure** - Goal 9 focuses on three important aspects of sustainable development, infrastructure, industrialization and innovation, including considerations for resiliency, equity, quality, reliability, access and affordability, and regional and transborder infrastructure. The Goal focuses on infrastructure upgrades and retrofitting of industries with increased resource-use efficiency and clean and environmentally sound technologies and industrial processes.<p>**Company-Level Issue Examples** *- Digital Divide, ESG integration in financial services, Engineering Structural Integrity.*| |**GOAL10REDUCEDINEQUALITIES**|**Reduced Inequalities** - Goal 10 calls for reducing inequalities in income as well as those based on age, sex, disability, race, ethnicity, origin, religion, or economic or other status within a country. The Goal addresses inequalities among countries, including those related to representation, migration, and development assistance. It aims to empower and promote social, economic, and political inclusion of all. The Goal stresses regulation and monitoring of global financial markets and institutions.<p>**Company-Level Issue Examples** *- Responsible Lending, Worker Discrimination, CEO Pay Gap, Worker Pay Gap, Workplace Diversity and Inclusion.*| |**GOAL11SUSTAINABLECITIESANDCOMMUNITIES**|**Sustainable Cities and Communities** - Goal 11 seeks to ensure access for all to adequate, safe, and affordable housing and basic services, and green and public spaces, and to upgrade slums. It focuses on improving transportation, air quality and municipal and other waste management, and creating inclusive and sustainable urbanization through participatory urban planning. The Goal also supports safeguarding the world's cultural and natural heritage, while aiming to increase the number of cities and human settlements adopting and implementing integrated policies and plans towards inclusion, resource efficiency, mitigation and adaptation to climate change, and resilience to disasters.<p>**Company-Level Issue Examples** *- Air Pollution, Environmental Justice, Human Rights Violations, Affordable Housing.*| |**GOAL12RESPONSIBLECONSUMPTIONANDPRODUCTION**|**Responsible Consumption and Production** - Goal 12 aims to achieve the sustainable management and efficient use of natural resources through both the public and private sector. It specifically addresses global food waste in consumption, production, and distribution, sustainable tourism, waste and chemicals management. Goal 12 encourages sustainability reporting in the private sector, while in the public sector it encourages restructuring taxation and subsidies for fossil fuels and promoting sustainable public procurement practices.<p>**Company-Level Issue Examples** *- Sustainability Reporting, Circular Economy, Hazardous Waste Management, Waste Reduction.*| |**GOAL13CLIMATEACTION**|**Climate Action** - While Goal 13 is focused on actions by countries towards climate mitigation and adaptation, the private sector can also play a role in these areas. The goal seeks to strengthen resilience and adaptive capacity to climate-related hazards and natural disasters in all countries. It calls for integrating climate change measures, including those related to climate resilience and low GHG development, into national policies, strategies, and planning. It aims to improve education and awareness of climate change mitigation, adaptation, impact reduction, and early warning.<p>**Company-Level Issue Examples** *- GHG Emissions, Sustainable Transportation, Physical Climate Impacts.*| |**GOAL14LIFEBELOWWATER**|**Life Below Water** - Goal 14 focuses on preventing marine pollution of all kinds, particularly from land-based activities, and to minimize and address the impacts of ocean acidification. The Goal also aims to achieve sustainable yields in fisheries, through regulation of harvesting, controlling subsidies, and ending overfishing. It seeks to sustainably manage and protect marine and coastal ecosystems to avoid significant adverse impacts, including by strengthening their resilience, and take action for their restoration in order to achieve healthy and productive oceans.<p>**Company-Level Issue Examples** *- Impacts on water-related endangered species and habitats, Oil Spills, Seafood Sourcing.*| |**GOAL15LIFEONLAND**|**Life On Land** - Goal 15 seeks to ensure the conservation, restoration, and sustainable use of terrestrial and inland freshwater ecosystems and their services, in order to preserve biodiversity. It focuses specifically on sustainably managing forests, halting deforestation, restoring degraded lands and successfully combating desertification, reducing degraded natural habitats and ending biodiversity loss, with an emphasis on threatened species and invasive alien species.<p>**Company-Level Issue Examples** *- Impacts on land-related endangered species and habitats, Sustainable forestry practices and certifications, Project lifecycle environmental impacts.*| |**GOAL16PEACEJUSTICEANDSTRONGINSTITUTIONS**|**Peace, Justice, and Strong Institutions** - Goal 16 aims to significantly reduce all forms of violence, and also focuses specifically on reducing violence against children in the forms of abuse, exploitation, trafficking, and torture. It also aims to significantly reduce illicit financial and arms flows and to substantially reduce corruption and bribery in all their forms. The Goal also emphasizes effective and transparent institutions at all levels, inclusive and participatory decision-making, ensuring public access to information, and protection of fundamental freedoms.<p>**Company-Level Issue Examples** *- Tax Avoidance, Anti-Competitive Behavior, Cyber Security, Corruption, ESG Resolutions.*| . if omitted defaults to ["IMPACT"] # noqa: E501
Keyword Args:
value ([str]): The SDG Categories specified for the Truvalue Scores being requested. To specify select categories returned in the response, provide a comma-separated list of the scores using the description below. |**SDG Category Input**|**Description**| |---|---| |**IMPACT**|**Impact** - The aggregate SDG score for each company is simply named \"Impact.\" The SDG Impact Score is produced using a weighted average of individual category scores, where the weight utilized is the category score volume.| |**IMPACTARTICLES**|**Impact Articles** - The All Goals Category Volume measures the total number of times any of the 16 goals received a score over a trailing twelve-month (TTM) period of time. **( Data wil be returned only for `TTM_VOLUME` score type)** | |**GOAL1NOPOVERTY**|**No Poverty** - Goal 1 focuses on poverty in all its manifestations and also aims to ensure social protection for the poor and vulnerable, increase access to basic services and support people harmed by climate related extreme events and other economic, social and environmental shocks and disasters. <p>**Company-Level Issue Examples** *- Financial services access and affordability, Underserved groups,Unethical pricing.*| |**GOAL2ZEROHUNGER**|**Zero Hunger** - Goal 2 aims to end hunger and all forms of malnutrition and commits to universal access to safe, nutritious and sufficient food at all times of the year, particularly for the poor and people in vulnerable situations (e.g., infants). This will require sustainable food production systems and resilient agricultural practices, equal access to land, technology, and markets and international cooperation on investments in infrastructure and technology to boost agricultural productivity. <p>**Company-Level Issue Examples** *- Sustainable agricultural practices, Agricultural ingredients sourcing and certifications, Food safety concerns, Animal welfare.*| |**GOAL3GOODHEALTHANDWELLBEING**|**Good Health and Wellbeing** - Goal 3 seeks to ensure health and wellbeing for all, at every stage of life and addresses all major health priorities, including reproductive, maternal, and child health; communicable, noncommunicable and environmental diseases; universal health coverage; and access for all to safe, effective, quality, and affordable medicines and vaccines.<p> **Company-Level Issue Examples** *- Harmful Chemicals in Products, Product Recalls, Healthcare Access and Affordability.*| |**GOAL4QUALITYEDUCATION**|**Quality Education** - Goal 4 addresses access and affordability of education and skills development starting from childhood development and continuing through adulthood, including for girls, persons with disabilities, indigenous peoples and children in vulnerable situations, Improvements to the access to education it hopes to ensure that all youth and a substantial proportion of adults achieve literacy and numeracy. It also seeks to build and upgrade education facilities and to increase the supply of qualified teachers.<p>**Company-Level Issue Examples** *- Mentorship and training, Education company quality, Education company ethics.*| |**GOAL5GENDEREQUALITY**|**Gender Equality** - Goal 5 emphasizes eliminating discrimination and violence against women and girls. The Goal emphasizes ensuring women's full and effective participation and equal opportunities for leadership at all levels of decision-making in political, economic and public life. Access to sexual and reproductive health and reproductive rights and access to economic resources (e.g., land ownership, financial services) are also emphasized.<p>**Company-Level Issue Examples** *- Board Diversity, Gender Discrimination, Sexual Harassment.*| |**GOAL6CLEANWATERANDSANITATION**|**Clean Water and Sanitation** - Goal 6 not only addresses issues relating to drinking water, sanitation and hygiene, but also the quality and sustainability of water resources worldwide. It strives to achieve universal and equitable access to safe and affordable drinking water for all. It also focuses on adequate and equitable sanitation and hygiene and reducing pollution, minimizing release of hazardous chemicals and materials, and protection of water-related ecosystems. It also highlights increasing water-use efficiency across all sectors, recycling, and ensuring sustainable withdrawals and supply of freshwater.<p>**Company-Level Issue Examples** *- Water Pollution, Water Recycling and Stewardship, Water Infrastructure.*| |**GOAL7AFFORDABLEANDCLEANENERGY**|**Goal 7 Affordable and Clean Energy** - Goal 7 seeks to ensure access to affordable, reliable, and modern energy services for all. It aims to increase renewable energy in the global energy mix and improve energy efficiency significantly. It also calls for more access to clean energy research, technology, and infrastructure for renewable energy, energy efficiency, and advanced and cleaner fossil-fuel technology, and promoting investment in energy infrastructure and clean energy technology.<p>**Company-Level Issue Examples** *- Green Buildings, Renewable Energy, Unethical Utility Pricing.*| |**GOAL8DECENTWORKANDECONOMICGROWTH**|**Decent Work and Economic Growth** - Goal 8 focuses on economic productivity and supports policies for entrepreneurship, creativity and innovation that assist micro, small, and medium-sized enterprises. The Goal also seeks to reduce unemployment, the proportion of youth not working or in school, child labor, and forced labor. Also covered are the protection of labor rights, migrant workers, safe and secure working environments, sustainable tourism, and increasing the capacity of domestic financial institutions in regards to access to banking, insurance, and financial services.<p>**Company-Level Issue Examples** *- Job Creation, Labor Exploitation, Employee Health and Safety, Workplace Turnover, Supplier Transparency.*| |**GOAL9INDUSTRYINNOVATIONANDINFRASTRUCTURE**|**Industry Innovation and Infrastructure** - Goal 9 focuses on three important aspects of sustainable development, infrastructure, industrialization and innovation, including considerations for resiliency, equity, quality, reliability, access and affordability, and regional and transborder infrastructure. The Goal focuses on infrastructure upgrades and retrofitting of industries with increased resource-use efficiency and clean and environmentally sound technologies and industrial processes.<p>**Company-Level Issue Examples** *- Digital Divide, ESG integration in financial services, Engineering Structural Integrity.*| |**GOAL10REDUCEDINEQUALITIES**|**Reduced Inequalities** - Goal 10 calls for reducing inequalities in income as well as those based on age, sex, disability, race, ethnicity, origin, religion, or economic or other status within a country. The Goal addresses inequalities among countries, including those related to representation, migration, and development assistance. It aims to empower and promote social, economic, and political inclusion of all. The Goal stresses regulation and monitoring of global financial markets and institutions.<p>**Company-Level Issue Examples** *- Responsible Lending, Worker Discrimination, CEO Pay Gap, Worker Pay Gap, Workplace Diversity and Inclusion.*| |**GOAL11SUSTAINABLECITIESANDCOMMUNITIES**|**Sustainable Cities and Communities** - Goal 11 seeks to ensure access for all to adequate, safe, and affordable housing and basic services, and green and public spaces, and to upgrade slums. It focuses on improving transportation, air quality and municipal and other waste management, and creating inclusive and sustainable urbanization through participatory urban planning. The Goal also supports safeguarding the world's cultural and natural heritage, while aiming to increase the number of cities and human settlements adopting and implementing integrated policies and plans towards inclusion, resource efficiency, mitigation and adaptation to climate change, and resilience to disasters.<p>**Company-Level Issue Examples** *- Air Pollution, Environmental Justice, Human Rights Violations, Affordable Housing.*| |**GOAL12RESPONSIBLECONSUMPTIONANDPRODUCTION**|**Responsible Consumption and Production** - Goal 12 aims to achieve the sustainable management and efficient use of natural resources through both the public and private sector. It specifically addresses global food waste in consumption, production, and distribution, sustainable tourism, waste and chemicals management. Goal 12 encourages sustainability reporting in the private sector, while in the public sector it encourages restructuring taxation and subsidies for fossil fuels and promoting sustainable public procurement practices.<p>**Company-Level Issue Examples** *- Sustainability Reporting, Circular Economy, Hazardous Waste Management, Waste Reduction.*| |**GOAL13CLIMATEACTION**|**Climate Action** - While Goal 13 is focused on actions by countries towards climate mitigation and adaptation, the private sector can also play a role in these areas. The goal seeks to strengthen resilience and adaptive capacity to climate-related hazards and natural disasters in all countries. It calls for integrating climate change measures, including those related to climate resilience and low GHG development, into national policies, strategies, and planning. It aims to improve education and awareness of climate change mitigation, adaptation, impact reduction, and early warning.<p>**Company-Level Issue Examples** *- GHG Emissions, Sustainable Transportation, Physical Climate Impacts.*| |**GOAL14LIFEBELOWWATER**|**Life Below Water** - Goal 14 focuses on preventing marine pollution of all kinds, particularly from land-based activities, and to minimize and address the impacts of ocean acidification. The Goal also aims to achieve sustainable yields in fisheries, through regulation of harvesting, controlling subsidies, and ending overfishing. It seeks to sustainably manage and protect marine and coastal ecosystems to avoid significant adverse impacts, including by strengthening their resilience, and take action for their restoration in order to achieve healthy and productive oceans.<p>**Company-Level Issue Examples** *- Impacts on water-related endangered species and habitats, Oil Spills, Seafood Sourcing.*| |**GOAL15LIFEONLAND**|**Life On Land** - Goal 15 seeks to ensure the conservation, restoration, and sustainable use of terrestrial and inland freshwater ecosystems and their services, in order to preserve biodiversity. It focuses specifically on sustainably managing forests, halting deforestation, restoring degraded lands and successfully combating desertification, reducing degraded natural habitats and ending biodiversity loss, with an emphasis on threatened species and invasive alien species.<p>**Company-Level Issue Examples** *- Impacts on land-related endangered species and habitats, Sustainable forestry practices and certifications, Project lifecycle environmental impacts.*| |**GOAL16PEACEJUSTICEANDSTRONGINSTITUTIONS**|**Peace, Justice, and Strong Institutions** - Goal 16 aims to significantly reduce all forms of violence, and also focuses specifically on reducing violence against children in the forms of abuse, exploitation, trafficking, and torture. It also aims to significantly reduce illicit financial and arms flows and to substantially reduce corruption and bribery in all their forms. The Goal also emphasizes effective and transparent institutions at all levels, inclusive and participatory decision-making, ensuring public access to information, and protection of fundamental freedoms.<p>**Company-Level Issue Examples** *- Tax Avoidance, Anti-Competitive Behavior, Cyber Security, Corruption, ESG Resolutions.*| . if omitted defaults to ["IMPACT"] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
value = ["IMPACT"]
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""SdgCategories - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([str]): The SDG Categories specified for the Truvalue Scores being requested. To specify select categories returned in the response, provide a comma-separated list of the scores using the description below. |**SDG Category Input**|**Description**| |---|---| |**IMPACT**|**Impact** - The aggregate SDG score for each company is simply named \"Impact.\" The SDG Impact Score is produced using a weighted average of individual category scores, where the weight utilized is the category score volume.| |**IMPACTARTICLES**|**Impact Articles** - The All Goals Category Volume measures the total number of times any of the 16 goals received a score over a trailing twelve-month (TTM) period of time. **( Data wil be returned only for `TTM_VOLUME` score type)** | |**GOAL1NOPOVERTY**|**No Poverty** - Goal 1 focuses on poverty in all its manifestations and also aims to ensure social protection for the poor and vulnerable, increase access to basic services and support people harmed by climate related extreme events and other economic, social and environmental shocks and disasters. <p>**Company-Level Issue Examples** *- Financial services access and affordability, Underserved groups,Unethical pricing.*| |**GOAL2ZEROHUNGER**|**Zero Hunger** - Goal 2 aims to end hunger and all forms of malnutrition and commits to universal access to safe, nutritious and sufficient food at all times of the year, particularly for the poor and people in vulnerable situations (e.g., infants). This will require sustainable food production systems and resilient agricultural practices, equal access to land, technology, and markets and international cooperation on investments in infrastructure and technology to boost agricultural productivity. <p>**Company-Level Issue Examples** *- Sustainable agricultural practices, Agricultural ingredients sourcing and certifications, Food safety concerns, Animal welfare.*| |**GOAL3GOODHEALTHANDWELLBEING**|**Good Health and Wellbeing** - Goal 3 seeks to ensure health and wellbeing for all, at every stage of life and addresses all major health priorities, including reproductive, maternal, and child health; communicable, noncommunicable and environmental diseases; universal health coverage; and access for all to safe, effective, quality, and affordable medicines and vaccines.<p> **Company-Level Issue Examples** *- Harmful Chemicals in Products, Product Recalls, Healthcare Access and Affordability.*| |**GOAL4QUALITYEDUCATION**|**Quality Education** - Goal 4 addresses access and affordability of education and skills development starting from childhood development and continuing through adulthood, including for girls, persons with disabilities, indigenous peoples and children in vulnerable situations, Improvements to the access to education it hopes to ensure that all youth and a substantial proportion of adults achieve literacy and numeracy. It also seeks to build and upgrade education facilities and to increase the supply of qualified teachers.<p>**Company-Level Issue Examples** *- Mentorship and training, Education company quality, Education company ethics.*| |**GOAL5GENDEREQUALITY**|**Gender Equality** - Goal 5 emphasizes eliminating discrimination and violence against women and girls. The Goal emphasizes ensuring women's full and effective participation and equal opportunities for leadership at all levels of decision-making in political, economic and public life. Access to sexual and reproductive health and reproductive rights and access to economic resources (e.g., land ownership, financial services) are also emphasized.<p>**Company-Level Issue Examples** *- Board Diversity, Gender Discrimination, Sexual Harassment.*| |**GOAL6CLEANWATERANDSANITATION**|**Clean Water and Sanitation** - Goal 6 not only addresses issues relating to drinking water, sanitation and hygiene, but also the quality and sustainability of water resources worldwide. It strives to achieve universal and equitable access to safe and affordable drinking water for all. It also focuses on adequate and equitable sanitation and hygiene and reducing pollution, minimizing release of hazardous chemicals and materials, and protection of water-related ecosystems. It also highlights increasing water-use efficiency across all sectors, recycling, and ensuring sustainable withdrawals and supply of freshwater.<p>**Company-Level Issue Examples** *- Water Pollution, Water Recycling and Stewardship, Water Infrastructure.*| |**GOAL7AFFORDABLEANDCLEANENERGY**|**Goal 7 Affordable and Clean Energy** - Goal 7 seeks to ensure access to affordable, reliable, and modern energy services for all. It aims to increase renewable energy in the global energy mix and improve energy efficiency significantly. It also calls for more access to clean energy research, technology, and infrastructure for renewable energy, energy efficiency, and advanced and cleaner fossil-fuel technology, and promoting investment in energy infrastructure and clean energy technology.<p>**Company-Level Issue Examples** *- Green Buildings, Renewable Energy, Unethical Utility Pricing.*| |**GOAL8DECENTWORKANDECONOMICGROWTH**|**Decent Work and Economic Growth** - Goal 8 focuses on economic productivity and supports policies for entrepreneurship, creativity and innovation that assist micro, small, and medium-sized enterprises. The Goal also seeks to reduce unemployment, the proportion of youth not working or in school, child labor, and forced labor. Also covered are the protection of labor rights, migrant workers, safe and secure working environments, sustainable tourism, and increasing the capacity of domestic financial institutions in regards to access to banking, insurance, and financial services.<p>**Company-Level Issue Examples** *- Job Creation, Labor Exploitation, Employee Health and Safety, Workplace Turnover, Supplier Transparency.*| |**GOAL9INDUSTRYINNOVATIONANDINFRASTRUCTURE**|**Industry Innovation and Infrastructure** - Goal 9 focuses on three important aspects of sustainable development, infrastructure, industrialization and innovation, including considerations for resiliency, equity, quality, reliability, access and affordability, and regional and transborder infrastructure. The Goal focuses on infrastructure upgrades and retrofitting of industries with increased resource-use efficiency and clean and environmentally sound technologies and industrial processes.<p>**Company-Level Issue Examples** *- Digital Divide, ESG integration in financial services, Engineering Structural Integrity.*| |**GOAL10REDUCEDINEQUALITIES**|**Reduced Inequalities** - Goal 10 calls for reducing inequalities in income as well as those based on age, sex, disability, race, ethnicity, origin, religion, or economic or other status within a country. The Goal addresses inequalities among countries, including those related to representation, migration, and development assistance. It aims to empower and promote social, economic, and political inclusion of all. The Goal stresses regulation and monitoring of global financial markets and institutions.<p>**Company-Level Issue Examples** *- Responsible Lending, Worker Discrimination, CEO Pay Gap, Worker Pay Gap, Workplace Diversity and Inclusion.*| |**GOAL11SUSTAINABLECITIESANDCOMMUNITIES**|**Sustainable Cities and Communities** - Goal 11 seeks to ensure access for all to adequate, safe, and affordable housing and basic services, and green and public spaces, and to upgrade slums. It focuses on improving transportation, air quality and municipal and other waste management, and creating inclusive and sustainable urbanization through participatory urban planning. The Goal also supports safeguarding the world's cultural and natural heritage, while aiming to increase the number of cities and human settlements adopting and implementing integrated policies and plans towards inclusion, resource efficiency, mitigation and adaptation to climate change, and resilience to disasters.<p>**Company-Level Issue Examples** *- Air Pollution, Environmental Justice, Human Rights Violations, Affordable Housing.*| |**GOAL12RESPONSIBLECONSUMPTIONANDPRODUCTION**|**Responsible Consumption and Production** - Goal 12 aims to achieve the sustainable management and efficient use of natural resources through both the public and private sector. It specifically addresses global food waste in consumption, production, and distribution, sustainable tourism, waste and chemicals management. Goal 12 encourages sustainability reporting in the private sector, while in the public sector it encourages restructuring taxation and subsidies for fossil fuels and promoting sustainable public procurement practices.<p>**Company-Level Issue Examples** *- Sustainability Reporting, Circular Economy, Hazardous Waste Management, Waste Reduction.*| |**GOAL13CLIMATEACTION**|**Climate Action** - While Goal 13 is focused on actions by countries towards climate mitigation and adaptation, the private sector can also play a role in these areas. The goal seeks to strengthen resilience and adaptive capacity to climate-related hazards and natural disasters in all countries. It calls for integrating climate change measures, including those related to climate resilience and low GHG development, into national policies, strategies, and planning. It aims to improve education and awareness of climate change mitigation, adaptation, impact reduction, and early warning.<p>**Company-Level Issue Examples** *- GHG Emissions, Sustainable Transportation, Physical Climate Impacts.*| |**GOAL14LIFEBELOWWATER**|**Life Below Water** - Goal 14 focuses on preventing marine pollution of all kinds, particularly from land-based activities, and to minimize and address the impacts of ocean acidification. The Goal also aims to achieve sustainable yields in fisheries, through regulation of harvesting, controlling subsidies, and ending overfishing. It seeks to sustainably manage and protect marine and coastal ecosystems to avoid significant adverse impacts, including by strengthening their resilience, and take action for their restoration in order to achieve healthy and productive oceans.<p>**Company-Level Issue Examples** *- Impacts on water-related endangered species and habitats, Oil Spills, Seafood Sourcing.*| |**GOAL15LIFEONLAND**|**Life On Land** - Goal 15 seeks to ensure the conservation, restoration, and sustainable use of terrestrial and inland freshwater ecosystems and their services, in order to preserve biodiversity. It focuses specifically on sustainably managing forests, halting deforestation, restoring degraded lands and successfully combating desertification, reducing degraded natural habitats and ending biodiversity loss, with an emphasis on threatened species and invasive alien species.<p>**Company-Level Issue Examples** *- Impacts on land-related endangered species and habitats, Sustainable forestry practices and certifications, Project lifecycle environmental impacts.*| |**GOAL16PEACEJUSTICEANDSTRONGINSTITUTIONS**|**Peace, Justice, and Strong Institutions** - Goal 16 aims to significantly reduce all forms of violence, and also focuses specifically on reducing violence against children in the forms of abuse, exploitation, trafficking, and torture. It also aims to significantly reduce illicit financial and arms flows and to substantially reduce corruption and bribery in all their forms. The Goal also emphasizes effective and transparent institutions at all levels, inclusive and participatory decision-making, ensuring public access to information, and protection of fundamental freedoms.<p>**Company-Level Issue Examples** *- Tax Avoidance, Anti-Competitive Behavior, Cyber Security, Corruption, ESG Resolutions.*| . if omitted defaults to ["IMPACT"] # noqa: E501
Keyword Args:
value ([str]): The SDG Categories specified for the Truvalue Scores being requested. To specify select categories returned in the response, provide a comma-separated list of the scores using the description below. |**SDG Category Input**|**Description**| |---|---| |**IMPACT**|**Impact** - The aggregate SDG score for each company is simply named \"Impact.\" The SDG Impact Score is produced using a weighted average of individual category scores, where the weight utilized is the category score volume.| |**IMPACTARTICLES**|**Impact Articles** - The All Goals Category Volume measures the total number of times any of the 16 goals received a score over a trailing twelve-month (TTM) period of time. **( Data wil be returned only for `TTM_VOLUME` score type)** | |**GOAL1NOPOVERTY**|**No Poverty** - Goal 1 focuses on poverty in all its manifestations and also aims to ensure social protection for the poor and vulnerable, increase access to basic services and support people harmed by climate related extreme events and other economic, social and environmental shocks and disasters. <p>**Company-Level Issue Examples** *- Financial services access and affordability, Underserved groups,Unethical pricing.*| |**GOAL2ZEROHUNGER**|**Zero Hunger** - Goal 2 aims to end hunger and all forms of malnutrition and commits to universal access to safe, nutritious and sufficient food at all times of the year, particularly for the poor and people in vulnerable situations (e.g., infants). This will require sustainable food production systems and resilient agricultural practices, equal access to land, technology, and markets and international cooperation on investments in infrastructure and technology to boost agricultural productivity. <p>**Company-Level Issue Examples** *- Sustainable agricultural practices, Agricultural ingredients sourcing and certifications, Food safety concerns, Animal welfare.*| |**GOAL3GOODHEALTHANDWELLBEING**|**Good Health and Wellbeing** - Goal 3 seeks to ensure health and wellbeing for all, at every stage of life and addresses all major health priorities, including reproductive, maternal, and child health; communicable, noncommunicable and environmental diseases; universal health coverage; and access for all to safe, effective, quality, and affordable medicines and vaccines.<p> **Company-Level Issue Examples** *- Harmful Chemicals in Products, Product Recalls, Healthcare Access and Affordability.*| |**GOAL4QUALITYEDUCATION**|**Quality Education** - Goal 4 addresses access and affordability of education and skills development starting from childhood development and continuing through adulthood, including for girls, persons with disabilities, indigenous peoples and children in vulnerable situations, Improvements to the access to education it hopes to ensure that all youth and a substantial proportion of adults achieve literacy and numeracy. It also seeks to build and upgrade education facilities and to increase the supply of qualified teachers.<p>**Company-Level Issue Examples** *- Mentorship and training, Education company quality, Education company ethics.*| |**GOAL5GENDEREQUALITY**|**Gender Equality** - Goal 5 emphasizes eliminating discrimination and violence against women and girls. The Goal emphasizes ensuring women's full and effective participation and equal opportunities for leadership at all levels of decision-making in political, economic and public life. Access to sexual and reproductive health and reproductive rights and access to economic resources (e.g., land ownership, financial services) are also emphasized.<p>**Company-Level Issue Examples** *- Board Diversity, Gender Discrimination, Sexual Harassment.*| |**GOAL6CLEANWATERANDSANITATION**|**Clean Water and Sanitation** - Goal 6 not only addresses issues relating to drinking water, sanitation and hygiene, but also the quality and sustainability of water resources worldwide. It strives to achieve universal and equitable access to safe and affordable drinking water for all. It also focuses on adequate and equitable sanitation and hygiene and reducing pollution, minimizing release of hazardous chemicals and materials, and protection of water-related ecosystems. It also highlights increasing water-use efficiency across all sectors, recycling, and ensuring sustainable withdrawals and supply of freshwater.<p>**Company-Level Issue Examples** *- Water Pollution, Water Recycling and Stewardship, Water Infrastructure.*| |**GOAL7AFFORDABLEANDCLEANENERGY**|**Goal 7 Affordable and Clean Energy** - Goal 7 seeks to ensure access to affordable, reliable, and modern energy services for all. It aims to increase renewable energy in the global energy mix and improve energy efficiency significantly. It also calls for more access to clean energy research, technology, and infrastructure for renewable energy, energy efficiency, and advanced and cleaner fossil-fuel technology, and promoting investment in energy infrastructure and clean energy technology.<p>**Company-Level Issue Examples** *- Green Buildings, Renewable Energy, Unethical Utility Pricing.*| |**GOAL8DECENTWORKANDECONOMICGROWTH**|**Decent Work and Economic Growth** - Goal 8 focuses on economic productivity and supports policies for entrepreneurship, creativity and innovation that assist micro, small, and medium-sized enterprises. The Goal also seeks to reduce unemployment, the proportion of youth not working or in school, child labor, and forced labor. Also covered are the protection of labor rights, migrant workers, safe and secure working environments, sustainable tourism, and increasing the capacity of domestic financial institutions in regards to access to banking, insurance, and financial services.<p>**Company-Level Issue Examples** *- Job Creation, Labor Exploitation, Employee Health and Safety, Workplace Turnover, Supplier Transparency.*| |**GOAL9INDUSTRYINNOVATIONANDINFRASTRUCTURE**|**Industry Innovation and Infrastructure** - Goal 9 focuses on three important aspects of sustainable development, infrastructure, industrialization and innovation, including considerations for resiliency, equity, quality, reliability, access and affordability, and regional and transborder infrastructure. The Goal focuses on infrastructure upgrades and retrofitting of industries with increased resource-use efficiency and clean and environmentally sound technologies and industrial processes.<p>**Company-Level Issue Examples** *- Digital Divide, ESG integration in financial services, Engineering Structural Integrity.*| |**GOAL10REDUCEDINEQUALITIES**|**Reduced Inequalities** - Goal 10 calls for reducing inequalities in income as well as those based on age, sex, disability, race, ethnicity, origin, religion, or economic or other status within a country. The Goal addresses inequalities among countries, including those related to representation, migration, and development assistance. It aims to empower and promote social, economic, and political inclusion of all. The Goal stresses regulation and monitoring of global financial markets and institutions.<p>**Company-Level Issue Examples** *- Responsible Lending, Worker Discrimination, CEO Pay Gap, Worker Pay Gap, Workplace Diversity and Inclusion.*| |**GOAL11SUSTAINABLECITIESANDCOMMUNITIES**|**Sustainable Cities and Communities** - Goal 11 seeks to ensure access for all to adequate, safe, and affordable housing and basic services, and green and public spaces, and to upgrade slums. It focuses on improving transportation, air quality and municipal and other waste management, and creating inclusive and sustainable urbanization through participatory urban planning. The Goal also supports safeguarding the world's cultural and natural heritage, while aiming to increase the number of cities and human settlements adopting and implementing integrated policies and plans towards inclusion, resource efficiency, mitigation and adaptation to climate change, and resilience to disasters.<p>**Company-Level Issue Examples** *- Air Pollution, Environmental Justice, Human Rights Violations, Affordable Housing.*| |**GOAL12RESPONSIBLECONSUMPTIONANDPRODUCTION**|**Responsible Consumption and Production** - Goal 12 aims to achieve the sustainable management and efficient use of natural resources through both the public and private sector. It specifically addresses global food waste in consumption, production, and distribution, sustainable tourism, waste and chemicals management. Goal 12 encourages sustainability reporting in the private sector, while in the public sector it encourages restructuring taxation and subsidies for fossil fuels and promoting sustainable public procurement practices.<p>**Company-Level Issue Examples** *- Sustainability Reporting, Circular Economy, Hazardous Waste Management, Waste Reduction.*| |**GOAL13CLIMATEACTION**|**Climate Action** - While Goal 13 is focused on actions by countries towards climate mitigation and adaptation, the private sector can also play a role in these areas. The goal seeks to strengthen resilience and adaptive capacity to climate-related hazards and natural disasters in all countries. It calls for integrating climate change measures, including those related to climate resilience and low GHG development, into national policies, strategies, and planning. It aims to improve education and awareness of climate change mitigation, adaptation, impact reduction, and early warning.<p>**Company-Level Issue Examples** *- GHG Emissions, Sustainable Transportation, Physical Climate Impacts.*| |**GOAL14LIFEBELOWWATER**|**Life Below Water** - Goal 14 focuses on preventing marine pollution of all kinds, particularly from land-based activities, and to minimize and address the impacts of ocean acidification. The Goal also aims to achieve sustainable yields in fisheries, through regulation of harvesting, controlling subsidies, and ending overfishing. It seeks to sustainably manage and protect marine and coastal ecosystems to avoid significant adverse impacts, including by strengthening their resilience, and take action for their restoration in order to achieve healthy and productive oceans.<p>**Company-Level Issue Examples** *- Impacts on water-related endangered species and habitats, Oil Spills, Seafood Sourcing.*| |**GOAL15LIFEONLAND**|**Life On Land** - Goal 15 seeks to ensure the conservation, restoration, and sustainable use of terrestrial and inland freshwater ecosystems and their services, in order to preserve biodiversity. It focuses specifically on sustainably managing forests, halting deforestation, restoring degraded lands and successfully combating desertification, reducing degraded natural habitats and ending biodiversity loss, with an emphasis on threatened species and invasive alien species.<p>**Company-Level Issue Examples** *- Impacts on land-related endangered species and habitats, Sustainable forestry practices and certifications, Project lifecycle environmental impacts.*| |**GOAL16PEACEJUSTICEANDSTRONGINSTITUTIONS**|**Peace, Justice, and Strong Institutions** - Goal 16 aims to significantly reduce all forms of violence, and also focuses specifically on reducing violence against children in the forms of abuse, exploitation, trafficking, and torture. It also aims to significantly reduce illicit financial and arms flows and to substantially reduce corruption and bribery in all their forms. The Goal also emphasizes effective and transparent institutions at all levels, inclusive and participatory decision-making, ensuring public access to information, and protection of fundamental freedoms.<p>**Company-Level Issue Examples** *- Tax Avoidance, Anti-Competitive Behavior, Cyber Security, Corruption, ESG Resolutions.*| . if omitted defaults to ["IMPACT"] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
value = ["IMPACT"]
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
|
python
|
#SPDX-License-Identifier: MIT
from augur.application import Application
from augur.augurplugin import AugurPlugin
from augur import logger
class FacadePlugin(AugurPlugin):
"""
This plugin serves as an example as to how to load plugins into Augur
"""
def __init__(self, augur):
self.__facade = None
# _augur will be set by the super init
super().__init__(augur)
def __call__(self):
from .facade import Facade
if self.__facade is None:
logger.debug('Initializing Facade')
self.__facade = Facade(
user=self._augur.read_config('Facade', 'user', 'AUGUR_FACADE_DB_USER', 'root'),
password=self._augur.read_config('Facade', 'pass', 'AUGUR_FACADE_DB_PASS', ''),
host=self._augur.read_config('Facade', 'host', 'AUGUR_FACADE_DB_HOST', '127.0.0.1'),
port=self._augur.read_config('Facade', 'port', 'AUGUR_FACADE_DB_PORT', '3306'),
dbname=self._augur.read_config('Facade', 'name', 'AUGUR_FACADE_DB_NAME', 'facade'),
projects=self._augur.read_config('Facade', 'projects', None, [])
)
return self.__facade
def create_routes(self, flask_app):
"""
Responsible for adding this plugin's data sources to the API
"""
from .routes import create_routes
create_routes(flask_app)
FacadePlugin.augur_plugin_meta = {
'name': 'facade',
'datasource': True
}
Application.register_plugin(FacadePlugin)
__all__ = ['FacadePlugin']
|
python
|
from .currencies import Currencies
from .rates import Rates
|
python
|
import pandas as pd
import numpy as np
import datetime
import time
import requests
from splinter import Browser
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from flask import Flask, render_template
import pymongo
def Scraper():
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url="https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
browser.visit(url)
html=browser.html
soup=bs(html,'html.parser')
print(soup.prettify())
news_title=soup.find('div',class_='content_title').get_text()
news_paragraph=soup.find('div',class_='article_teaser_body').get_text()
news_title
news_paragraph
# **JPL FEATURED IMAGE**
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
html=browser.html
soup=bs(html,'html.parser')
images=soup.footer.find('a',class_='button fancybox')['data-fancybox-href']
url2='https://www.jpl.nasa.gov'
actual_url=url2+images
# **MARS WEATHER TWEETS**
executable_path={'executable_path':'chromedriver.exe'}
browser=Browser('chrome',**executable_path,headless=False)
url='https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html=browser.html
soup=bs(html,'html.parser')
relevant_tweets=soup.find_all('p',class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text')
relevant_tweets
weather_tweet=relevant_tweets[7].get_text()
weather_tweet
# **MARS FACTS**
url='https://space-facts.com/mars/'
tables=pd.read_html(url)
tables
df=tables[0]
df
df=df.rename(columns={0:'Charactristic',1:'Value'})
df.set_index('Charactristic')
final_fact_table=df.to_html(classes='Striped-table')
final_fact_table
# **MARS HEMISPHERES**
executable_path={'executable_path':'chromedriver.exe'}
browser=Browser('chrome',**executable_path,headless=False)
url='https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html=browser.html
soup=bs(html,'html.parser')
titles=[]
hemi_titles=soup.find_all('h3')
for i in hemi_titles:
x=i.get_text()
titles.append(x)
titles
links=[]
for j in titles:
browser.visit(url)
browser.click_link_by_partial_text(j)
browser.click_link_by_text('Sample')
html=browser.html
soup=bs(html,'html.parser')
link=soup.find('div',class_='downloads').find('a')['href']
links.append(link)
links
# mars_hemis={}
# for m,k in zip(titles,links):
# mars_hemis[m]=k
mars_hemis=[]
for m,k in zip(titles,links):
mars_hemis.append({"title":m,"link":k})
data={"news_title":news_title,
"news_paragraph":news_paragraph,
"actual_url":actual_url,
"weather_tweet":weather_tweet,
"final_fact_table":final_fact_table,
"mars_hemis":mars_hemis}
# data={"Latest Mars News Headline":news_title,
# "Latest Mars News":news_paragraph,
# "Featured Image":image_url,
# "Latest Mars Weather Update":weather_tweet,
# "Mars Fun Facts":final_fact_table,
# "Mars Hemispheres":mars_hemis}
return data
|
python
|
import os, matplotlib
class FileSaver:
def save_figure(self, selected_month, plt, charttype):
DIR = (selected_month)
CHECK_FOLDER = os.path.isdir(f"output/{DIR}")
# If folder doesn't exist, then create it.
if not CHECK_FOLDER:
os.makedirs(f"output/{DIR}")
plt.savefig(f"output/{selected_month}/{charttype}", bbox_inches='tight')
print(f"Your {charttype} Chart has been saved!")
|
python
|
from django.apps import AppConfig
class books_repoConfig(AppConfig):
name = 'books_repo'
|
python
|
import discord
from discord.ext import commands, tasks
import os
import pickle
import config
import asyncio
import typing
import re
import traceback
from enum import Enum
from collections import OrderedDict
import datetime
import pytz
from apiclient import discovery
from google.oauth2 import service_account
class GPQ(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.scopes = ["https://www.googleapis.com/auth/spreadsheets"]
self.spreadsheetId = config.GPQ_SHEET_ID
self.sheet = None
self.gpqMessage = None
self.filePath = "{0}/gpq.pickle".format(os.getcwd())
self.fileLock = asyncio.Lock()
self.currentPageId = 0
self.currentPageName = None
self.nicknamePageId = 0
self.nicknamePageName = "Nicknames"
self.loadSheets()
self.loadMessage()
self.getActivePage()
self.getNicknamePage()
self.reactLoop.start()
@tasks.loop(seconds=300)
async def reactLoop(self):
if(self.gpqMessage):
try:
ch = self.bot.get_channel(self.gpqMessage["ch"])
msg = await ch.fetch_message(self.gpqMessage["id"])
for reaction in msg.reactions:
users = await reaction.users().flatten()
if(reaction.emoji == "✅"):
await self.updateSheet(users, self.Attendance.YES)
elif(reaction.emoji == "❌"):
await self.updateSheet(users, self.Attendance.NO)
elif(reaction.emoji == "❔"):
await self.updateSheet(users, self.Attendance.MAYBE)
else:
try:
await reaction.clear()
except:
pass ## Didn't have permissions probably
except discord.errors.NotFound as e:
pass
except Exception as e:
print(e)
traceback.print_exc()
@reactLoop.before_loop
async def beforeReactLoop(self):
print("reactLoop waiting...")
await self.bot.wait_until_ready()
@commands.command()
@commands.check_any(commands.has_guild_permissions(manage_guild=True), commands.is_owner())
async def postGpq(self, ctx, hours : int = 3, minutes : int = 45):
today = datetime.date.today()
friday = today + datetime.timedelta( (5-today.weekday()) % 7 )
dt = datetime.datetime.combine(friday, datetime.time())
gpqTime = dt + datetime.timedelta(hours = hours, minutes = minutes)
pstTZ = pytz.timezone('US/Pacific')
pst = pytz.utc.localize(gpqTime).astimezone(pstTZ)
cstTZ = pytz.timezone('US/Central')
cst = pytz.utc.localize(gpqTime).astimezone(cstTZ)
estTZ = pytz.timezone('US/Eastern')
est = pytz.utc.localize(gpqTime).astimezone(estTZ)
bstTZ = pytz.timezone('Europe/London')
bst = pytz.utc.localize(gpqTime).astimezone(bstTZ)
aestTZ = pytz.timezone('Australia/Melbourne')
aest = pytz.utc.localize(gpqTime).astimezone(aestTZ)
gpqText = """
<@&795087707046543370> This week's GPQ will be Friday Reset+{0}. Check below for your time zone and react if you can/can't make it.
{1} {3} PST / {4} CST / {5} EST
[ {2} {6} BST / {7} AEST ]
React with :white_check_mark: if you are able to make it, :x: if you are not, :grey_question:if you don't know/want to fill.
"""
plusTime = "{0}:{1}".format(hours, minutes)
d = int(pst.strftime("%d"))
d2 = int(bst.strftime("%d"))
suffix1 = 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
weekday1 = pst.strftime("%A %B %d{0}".format(suffix1))
suffix2 = 'th' if 11<=d2<=13 else {1:'st',2:'nd',3:'rd'}.get(d2%10, 'th')
weekday2 = bst.strftime("%A %B %d{0}".format(suffix2))
time1 = pst.strftime("%I:%M %p")
time2 = cst.strftime("%I:%M %p")
time3 = est.strftime("%I:%M %p")
time4 = bst.strftime("%I:%M %p")
time5 = aest.strftime("%I:%M %p")
ch = self.bot.get_channel(794753791153012788)
msg = await ch.send(gpqText.format(plusTime, weekday1, weekday2, time1, time2, time3, time4, time5))
await self.gpq(ctx, msg, 0)
###<@&795087707046543370>
@commands.command()
@commands.check_any(commands.has_guild_permissions(manage_guild=True), commands.is_owner())
async def gpq(self, ctx, u : typing.Union[discord.Message, discord.TextChannel, int, None], c : typing.Optional[int]):
if(u):
msg = None
if(isinstance(u, discord.Message)):
msg = u
elif(isinstance(u, int) and c):
ch = self.bot.get_channel(c)
msg = await ch.fetch_message(u)
else:
print("Getting latest message in channel {0}".format(u.name))
messages = await u.history(limit=1).flatten()
msg = messages[0]
print("Setting GPQ message to {0}".format(msg.id))
await self.updateMessage(msg)
##Add Yes and no Reactions
await msg.add_reaction("✅")
await msg.add_reaction("❌")
await msg.add_reaction("❔")
await ctx.send("Tracking GPQ attendance")
else:
await ctx.send("Closing GPQ attendance")
await self.updateMessage(None)
## updates the gpq message id and saves to pickle, all under lock
async def updateMessage(self, msg):
async with self.fileLock:
self.gpqMessage = None
if(msg):
self.gpqMessage = { "id" : msg.id, "ch" : msg.channel.id }
with open(self.filePath, "wb") as f:
pickle.dump(self.gpqMessage, f)
## Not under lock because we only call this in init
def loadMessage(self):
if os.path.exists(self.filePath):
with open(self.filePath, "rb") as f:
self.gpqMessage = pickle.load(f)
## Load Sheets with service account
def loadSheets(self):
credFile = os.path.join(os.getcwd(), config.CRED_FILE)
creds = service_account.Credentials.from_service_account_file(credFile, scopes=self.scopes)
service = discovery.build('sheets', 'v4', credentials=creds)
self.sheet = service.spreadsheets()
## updates the sheet based on the latest reactions
async def updateSheet(self, users, attendance):
self.getActivePage()
self.getNicknamePage()
nicks = OrderedDict([ x for x in self.getNicknameMapping() if not len(x) is 0 ])
arr = []
for user in users:
if(not user == self.bot.user):
## Not using setdefault() here because we want to avoid unnecessary interation with discord API
if not str(user.id) in nicks:
nicks[str(user.id)] = await self.getNickOrIgn(user.id)
arr.append([nicks[str(user.id)]])
self.updateNicknameMapping(list(nicks.items()))
self.updateAttendance(arr, attendance)
## Gets the Nickname page, and if it does not exist, creates it
def getNicknamePage(self):
if(not self.sheet == None):
metadata = self.sheet.get(spreadsheetId=self.spreadsheetId).execute()
sheets = metadata['sheets']
for sheet in sheets:
props = sheet["properties"]
if(props["title"] == self.nicknamePageName):
self.nicknamePageId = props["sheetId"]
return
## If we get here there was no nickname sheet
body = {
'requests' : [
{
'addSheet' : {
'properties' : {
'title' : self.nicknamePageName,
}
}
}
]
}
reply = self.sheet.batchUpdate(spreadsheetId=self.spreadsheetId, body=body).execute()
self.nicknamePageId = reply.get("replies")[0].get("addSheet").get("properties").get("sheetId")
body = {
"values" : [
["ID (DO NOT CHANGE)", "Nickname"]
]
}
r1 = "{0}!A1".format(self.nicknamePageName)
reply = self.sheet.values().update(spreadsheetId=self.spreadsheetId, range=r1, body=body, valueInputOption="RAW").execute()
## Gets the current nickname mapping
def getNicknameMapping(self):
if(not self.sheet == None):
## First row is headers
r1 = "{0}!A:B".format(self.nicknamePageName)
reply = self.sheet.values().get(spreadsheetId=self.spreadsheetId, range=r1).execute()
return(reply["values"])
## Clears the nickname mapping, then adds back the new mapping
def updateNicknameMapping(self, v):
if(not self.sheet == None):
r1 = "{0}!A:B".format(self.nicknamePageName)
body= {
"values" : v
}
reply = self.sheet.values().clear(spreadsheetId=self.spreadsheetId, range=r1).execute()
reply = self.sheet.values().update(spreadsheetId=self.spreadsheetId, range=r1, valueInputOption='RAW', body=body).execute()
## Gets active GPQ party page
def getActivePage(self):
if(not self.sheet == None):
metadata = self.sheet.get(spreadsheetId=self.spreadsheetId).execute()
sheets = metadata.get('sheets', '')
for sheet in sheets:
props = sheet["properties"]
if(props["index"] == 0):
self.currentPageId = props["sheetId"]
self.currentPageName = props["title"]
return
## Clears the attending column, then adds back everyone that is still attending
def updateAttendance(self, v, attendance):
if(not self.sheet == None):
r1 = "{0}!{1}3:{1}".format(self.currentPageName, attendance.value)
body = {
"values" : v
}
reply = self.sheet.values().clear(spreadsheetId=self.spreadsheetId, range=r1).execute()
reply = self.sheet.values().update(spreadsheetId = self.spreadsheetId, range=r1, valueInputOption="RAW", body=body).execute()
## display_name is not always accurate it appears (perhaps just in reaction lists)
## Also will extract text within parenthesis if available, assuming that is an IGN
async def getNickOrIgn(self, i):
ch = self.bot.get_channel(self.gpqMessage["ch"])
g = ch.guild
user = await g.fetch_member(i)
res = re.search(r"\((.*)\)", user.display_name)
if(res):
return(res.group(1))
else:
return(user.display_name)
## converts a number to the column string
def cs(self, n):
n += 1
s = ""
while n > 0:
n, r = divmod(n - 1, 26)
s = chr(65 + r) + s
return(s)
class Attendance(Enum):
YES = 'Z'
NO = 'AC'
MAYBE = 'AE'
def setup(bot):
bot.add_cog(GPQ(bot))
|
python
|
'''
Created on 16 de nov de 2020
@author: klaus
'''
import jsonlines
from folders import DATA_DIR, SUBMISSIONS_DIR
import os
from os import path
import pandas as pd
import numpy as np
import igraph as ig
from input.read_input import read_item_data, get_mappings, NUM_DOMS
from nn import domain_string_identifier
from nn.domain_string_identifier import predict_model
from input.create_ratio import get_ratio
def create_graph_domain():
"""
Creates graph linking (domain searched, domain bought)
"""
"""
Fetch data
"""
df = read_item_data()
df['item_id'] = df.index
dct_title = df['title'].to_dict()
dct_domain = df['domain_id'].to_dict()
dct_cat= df['category_id'].to_dict()
dct_price = df['price'].to_dict()
""" Ratio stuff """
from input.create_ratio import get_ratio
dct_ratio_dom = get_ratio(which='domain_id')
ratio_df = get_ratio(which='item_id',full=True)
ratio_df['popularity'] = 100.0*ratio_df['bought'] + ratio_df['searched']
dct_ratio_item_b = ratio_df['popularity'].to_dict()
"""
JSON
"""
check = lambda x: x <= np.round(413163*0.8).astype(np.int32)
DATA_PATH = path.join(DATA_DIR,'train_dataset.jl')
line_i = 0
"""
Create graph vertices
"""
g = ig.Graph()
counter, f_map_func, r_map_func = get_mappings()
num_items = df.shape[0]
for k in dct_title.keys():
g.add_vertex(value=k,deg=dct_ratio_item_b[k],domain_id=dct_domain[k],price=dct_price[k],cat='item_id')
""" ['item_id','domain_id','category_id','product_id'] """
for k in pd.unique(df['domain_id']):
g.add_vertex(value=k,cat='domain_id')
for k in pd.unique(df['category_id']):
g.add_vertex(value=k,cat='category_id')
for k in pd.unique(df['product_id']):
g.add_vertex(value=k,cat='product_id')
"""
Create edges
"""
E1 = []
E2 = []
with jsonlines.open(DATA_PATH) as reader:
for line_i, obj in enumerate(reader):
if check(line_i):
print(line_i)
L = []
for h in obj['user_history']:
if h['event_type'] == 'view':
#print("Viewed {}".format(dct[h['event_info']]))
L.append(h['event_info'])
elif h['event_type'] == 'search':
#print("Searched {}".format(h['event_info']))
pass
L_domain = [dct_domain[k] for k in L]
L_domain = pd.unique(L_domain)
L_cat = [dct_cat[k] for k in L]
L_cat = pd.unique(L_cat)
for i in range(len(L)):
E1.append(L[i])
E2.append(obj['item_bought'] )
E1 = f_map_func['item_id'](E1)
E2 = f_map_func['item_id'](E2)
E = pd.Series(list(zip(E1,E2))).value_counts()
g.add_edges(E.index)
g.es["weight"] = E.values
g.write_pickle(fname=path.join(DATA_DIR,'graph_item_to_item.pkl'))
def deg_matrix(W,pwr=1,flat=False, NA_replace_val = 1.0):
import scipy.sparse
""" Returns a diagonal matrix with the row-wise sums of a matrix W."""
ws = W.sum(axis=0) if scipy.sparse.issparse(W) else np.sum(W,axis=0)
D_flat = np.reshape(np.asarray(ws),(-1,))
D_flat = np.power(D_flat,np.abs(pwr))
is_zero = (D_flat == 0)
if pwr < 0:
D_flat[np.logical_not(is_zero)] = np.reciprocal(D_flat[np.logical_not(is_zero)])
D_flat[is_zero] = NA_replace_val
if scipy.sparse.issparse(W):
if flat:
return D_flat
else:
row = np.asarray([i for i in range(W.shape[0])])
col = np.asarray([i for i in range(W.shape[0])])
coo = scipy.sparse.coo_matrix((D_flat, (row, col)), shape=(W.shape[0], W.shape[0]))
return coo.tocsr()
else:
if flat:
return D_flat
else:
return(np.diag(D_flat))
def fit_RNN():
import tensorflow as tf
from tensorflow import keras
import tf_geometric as tfg
"""
Create graph
"""
df = read_item_data()
NUM_ITEMS = read_item_data().shape[0]
NUM_FEATURES = 1
counter, f_map_func, r_map_func = get_mappings()
NUM_DOMS = pd.unique(df['domain_id']).shape[0]
""" Load graph """
G = ig.Graph.Read_Pickle(path.join(DATA_DIR,'graph_item_to_item.pkl'))
#weights = np.log(1+np.array(G.es["weight"]))
weights = np.array(G.es["weight"])
indices = np.array([ np.array(e.tuple) for e in G.es])
indices = np.transpose(indices)
""" Create sparse matrix W """
from scipy.sparse import coo_matrix
import scipy.sparse
row = indices[0,:]
col = indices[1,:]
W = coo_matrix((weights, (row, col)),shape=(NUM_ITEMS,NUM_ITEMS))
""" Normalize rows """
#W = deg_matrix(W,pwr=-1) @ W
W = W.transpose()
W = scipy.sparse.csr_matrix(W)
assert scipy.sparse.issparse(W)
@tf.function
def smooth_labels(labels, factor=0.001):
# smooth the labels
labels = tf.cast(labels,tf.float32)
labels *= (1 - factor)
labels += (factor / tf.cast(tf.shape(labels)[1],tf.float32))
# returned the smoothed labels
return labels
@tf.function
def compute_loss(labels,logits):
logits = tf.reshape(logits,(-1,NUM_ITEMS))
labels = tf.reshape(labels,(-1,NUM_ITEMS))
#logits = tf.nn.softmax(logits)
#print(logits)
logits = smooth_labels(logits)
labels = smooth_labels(labels)
losses = -tf.reduce_sum(logits*tf.math.log(labels),axis=1)
return tf.reduce_mean(losses)
@tf.function
def evaluate(labels,logits):
logits = tf.reshape(logits,(-1,NUM_ITEMS))
labels = tf.reshape(labels,(-1,NUM_ITEMS))
#logits = tf.nn.softmax(logits)
#print(logits)
logits = smooth_labels(logits)
labels = smooth_labels(labels)
acc = tf.metrics.categorical_accuracy(labels,logits)
return tf.reduce_mean(acc)
"""
Read data, yadda yadda
"""
from input.create_ratio import get_ratio
ratio_df = get_ratio(which='item_id',full=True)
ratio_df['popularity'] = 100.0*ratio_df['bought'] + ratio_df['searched']
dct_ratio_item_b = ratio_df['popularity'].to_dict()
dct = df['title'].to_dict()
dct_domain = df['domain_id'].to_dict()
dct_cat = df['category_id'].to_dict()
dct_price = df['price'].to_dict()
""" Ratio stuff """
from input.create_ratio import get_ratio
category_df = get_ratio(which='category_id',full=True)
domain_df = get_ratio(which='domain_id', full = True)
feat_1, feat_2, feat_3 = domain_df['searched'].to_dict(), domain_df['bought'].to_dict(), domain_df['rat'].to_dict()
feat_1,feat_2,feat_3 = [ [X[dct_domain[k]] for k in df.index] for X in [feat_1,feat_2,feat_3]]
feat_1_1, feat_2_1, feat_3_1 = category_df['searched'].to_dict(), category_df['bought'].to_dict(), category_df['rat'].to_dict()
feat_1_1,feat_2_1,feat_3_1 = [ [X[dct_cat[k]] for k in df.index] for X in [feat_1_1,feat_2_1,feat_3_1]]
def standardize(x):
return (x - np.min(x)) / (np.max(x)+1e-06 - np.min(x))
feat_1, feat_2, feat_3 = [standardize(x) for x in [feat_1,feat_2,feat_3]]
feat_1_1, feat_2_1, feat_3_1 = [standardize(x) for x in [feat_1_1,feat_2_1,feat_3_1]]
del df
del domain_df
del category_df
del G
#dom_ratios = np.array([dct_ratio_dom[k] for k in pd.unique(df['domain_id'].values)])
#dom_ratios = (dom_ratios - np.mean(dom_ratios)) / np.std(dom_ratios)
from nn.domain_string_identifier import load_model
domain_prediction_model = load_model()
def my_generator(mode='train'):
if mode == 'train':
check = lambda x: x <= np.round(413163*0.8).astype(np.int32)
elif mode == 'val':
check = lambda x: x > np.round(413163*0.8).astype(np.int32)
else:
check = lambda x: True
DATA_PATH = path.join(DATA_DIR,'test_dataset.jl' if mode == 'test' else 'train_dataset.jl')
print("Reading....")
X = np.zeros((NUM_ITEMS,10)).astype(np.float32)
with jsonlines.open(DATA_PATH) as reader:
for line_i, obj in enumerate(reader):
if check(line_i):
L = []
S = []
C =[]
IDS = []
for h in obj['user_history']:
if h['event_type'] == 'view':
L.append(dct_domain[h['event_info']])
C.append(dct_cat[h['event_info']])
IDS.append(h['event_info'])
elif h['event_type'] == 'search':
S.append(h['event_info'])
if obj['item_bought'] in L:
continue
L = f_map_func['domain_id'](L)
C = f_map_func['category_id'](C)
IDS_map = f_map_func['item_id'](IDS)
""" Adjust graph """
Y = np.zeros((NUM_ITEMS,1)).astype(np.float32)
"""
X[:,0] = feat_1
X[:,1] = feat_2
X[:,2] = feat_3
X[:,6] = feat_1_1
X[:,7] = feat_2_1
X[:,8] = feat_3_1
#if len(S) > 0:
# X[:,8] = np.mean(predict_model(domain_prediction_model,S,return_numeric=True),axis=0)
"""
target_id = f_map_func['item_id']( [ obj['item_bought'] ] )[0]
if not mode == 'test':
Y[ target_id,0 ] = 1.0
"""
for i,k in enumerate(IDS_map):
X[k,3] += 1
X[k,4] += dct_ratio_item_b[IDS[i]]/len(C)
X[k,5] = dct_price[IDS[i]]
#W[target_id,:] = (np.clip(np.array(W[target_id,:].todense())-1,a_min=0.0,a_max=None))
X[:,9] = np.reshape(np.asarray(W @ X[:,3]),(-1,))
X[:,9] = X[:,8] * X[:,2]
#X[:,:8] = 0
for i in range(10):
X[:,i] = (X[:,i] - np.min(X[:,i])) / (1e-06+ np.max(X[:,i]) - np.min(X[:,i]))
"""
if not mode == 'test':
Y[ target_id,0 ] = 0.0
#X = X -0.5
yield X,Y
"""
Optimize
"""
BS = 2
step = 0
def batch_generator(mode, loop =True,batch_size=BS):
BATCH_X = []
BATCH_Y = []
i = 0
while True:
for x,y in my_generator(mode):
BATCH_X.append(x[None,:,:])
BATCH_Y.append(y[None,:,:])
i+= 1
if i % batch_size == 0:
yield np.concatenate(BATCH_X,axis=0), np.concatenate(BATCH_Y,axis=0)
BATCH_X = []
BATCH_Y = []
i = 0
if loop == False:
yield np.concatenate(BATCH_X,axis=0), np.concatenate(BATCH_Y,axis=0)
break
"""
Define train_model
"""
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
inp_x = keras.Input((NUM_ITEMS,10))
x = layers.Dense(32,activation='relu')(inp_x)
x = layers.Dense(32,activation='relu')(x)
x = layers.Dense(1)(x)
x = layers.Flatten()(x)
x = layers.Softmax(axis=-1)(x)
train_model = keras.Model(inputs=[inp_x],outputs=[x])
print(train_model.summary())
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.5*1e-2,
decay_steps=1000,
decay_rate=0.9)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.2*1e-2)
train_model.compile(optimizer=optimizer,loss=compute_loss,metrics=[evaluate])
from functools import partial
from input.read_input import TRAIN_LINES
train_model.fit_generator(batch_generator('train',True),
steps_per_epoch=TRAIN_LINES//BS,
epochs=1
)
ITEM_PATH = path.join(DATA_DIR,'train_model','item_classifier.h5')
train_model.save_weights(ITEM_PATH)
def predict(mode):
PREDS = []
CONFS = []
NUM_SELECT = 10
batch_size = 1
for batch_id, X in enumerate(batch_generator(mode,batch_size=batch_size,loop=False)):
x = X[0]
print("Predicting {} - Batch {}".format(mode,batch_id))
pred = train_model.predict_on_batch(x)
if batch_id == 0:
print(pred)
PREDS.append(tf.argsort(pred,axis=-1)[:,-NUM_SELECT:])
CONFS.append(tf.sort(pred,axis=-1)[:,-NUM_SELECT:])
PREDS = np.concatenate(PREDS,axis=0)
CONFS = np.concatenate(CONFS,axis=0)
#PREDS = np.concatenate([PREDS,CONFS],axis=1)
cols = ['pred_{}'.format(k) for k in range(NUM_SELECT)]
fname = os.path.join(DATA_DIR,'item_pred_{}.csv'.format(mode))
pd.DataFrame(PREDS,index=range(PREDS.shape[0]),columns=cols).to_csv(fname)
predict('train')
predict('val')
predict('test')
#############################################################################################################################################################
if __name__ == "__main__":
#create_graph_domain()
fit_RNN()
|
python
|
import sys
import torchvision
import torch
from torch.utils.data import Dataset
from .episodic_dataset import EpisodicDataset, FewShotSampler
import json
import os
import numpy as np
import numpy
import cv2
import pickle as pkl
# Inherit order is important, FewShotDataset constructor is prioritary
class EpisodicTieredImagenet(EpisodicDataset):
tasks_type = "clss"
name = "tiered-imagenet"
split_paths = {"train":"train", "test":"test", "valid": "val"}
c = 3
h = 84
w = 84
def __init__(self, data_root, split, sampler, size, transforms):
self.data_root = data_root
self.split = split
img_path = os.path.join(self.data_root, "%s_images_png.pkl" %(split))
label_path = os.path.join(self.data_root, "%s_labels.pkl" %(split))
with open(img_path, 'rb') as infile:
self.features = pkl.load(infile, encoding="bytes")
with open(label_path, 'rb') as infile:
labels = pkl.load(infile, encoding="bytes")[b'label_specific']
super().__init__(labels, sampler, size, transforms)
def sample_images(self, indices):
return [cv2.imdecode(self.features[i], cv2.IMREAD_COLOR)[:,:,::-1] for i in indices]
def __iter__(self):
return super().__iter__()
if __name__ == '__main__':
import sys
from torch.utils.data import DataLoader
from tools.plot_episode import plot_episode
sampler = FewShotSampler(5, 5, 15, 0)
transforms = torchvision.transforms.Compose([torchvision.transforms.ToPILImage(),
torchvision.transforms.Resize((84,84)),
torchvision.transforms.ToTensor(),
])
dataset = EpisodicTieredImagenet("train", sampler, 10, transforms)
loader = DataLoader(dataset, batch_size=1, collate_fn=lambda x: x)
for batch in loader:
plot_episode(batch[0], classes_first=False)
|
python
|
# Test the difference in computation time between a set of functions that calculate all prime numbers in a given range
import time
import math
import matplotlib.pyplot as plt
# Methods for finding all prime numbers up to max value 'n'
# Slow method using a basic check
def slow_method(n: int) -> list[int]:
results = [2]
for i in range(3, n, 2):
is_prime = True
for j in range(3, i):
if i % j == 0:
is_prime = False
break
if is_prime:
results.append(i)
return results
# Fast method using squares
def fast_method(n: int) -> list[int]:
results = [2]
for i in range(3, n, 2):
is_prime = True
for j in range(3, int(math.sqrt(i) + 1)):
if i % j == 0:
is_prime = False
break
if is_prime:
results.append(i)
return results
# Faster method using previous results
def faster_method(n: int) -> list[int]:
results = [2]
for i in range(3, n, 2):
is_prime = True
limit = math.sqrt(i)
for j in results:
if j > limit:
break
if i % j == 0:
is_prime = False
break
if is_prime:
results.append(i)
return results
def main():
# Graph the methods against each other to show the speed over time
# Add methods to test into the 'methods' list
methods = [fast_method, faster_method]
times = [[] for method in methods]
# Test all 'n' values from 0 to 100000 in increments of 1000
n_values = range(0, 100000, 1000)
for n in n_values:
# Test each method and record the elapsed time
for method in methods:
start_time = time.perf_counter()
results = method(n)
elapsed_time = time.perf_counter() - start_time
times[methods.index(method)].append(elapsed_time)
print(f"Method '{method.__name__}' found \t{len(results)} primes in \t{elapsed_time} seconds")
# Graph elapsed time for each method
for method in methods:
plt.plot(n_values, times[methods.index(method)], label=method.__name__)
plt.legend()
plt.xlabel("n")
plt.ylabel("seconds")
plt.show()
if __name__ == '__main__':
main()
|
python
|
from typing import List, Optional
from fastapi.datastructures import UploadFile
from pydantic.main import BaseModel
from enum import Enum
class TextSpan(BaseModel):
text: str
start: int
end: int
class NamedEntity(TextSpan):
label: str # The (predicted) label of the text span
definition: Optional[str] = None # A short description
class NounChunk(TextSpan):
definition: Optional[str] = None
class Lemma(BaseModel):
text: str
is_stopword: Optional[bool] = None
class Sentence(TextSpan):
text: str
lemmatized_text: Optional[str]
score: Optional[float] = None
#
# Request Models (= schema for API requests)
#
class BaseRequest(BaseModel):
verbose: Optional[bool] = None
metadata: Optional[dict] = None
class NLPBaseRequest(BaseRequest):
language: Optional[str] = None
model: Optional[str] = None
text: str
class SentenceRequest(NLPBaseRequest):
pass
class AnalyzeRequest(NLPBaseRequest):
num_sentences: Optional[int] = 3
class Config:
schema_extra = {
"example": {
"text": "Breast cancer most commonly presents as a lump that feels different from the rest of the breast tissue.\
More than 80% of cases are discovered when a person detects such a lump with the fingertips.",
}
}
#
# Response Models (= schema for API responses)
#
class BaseResponse(BaseModel):
error: Optional[str] = None
class TranslateResponse(BaseResponse):
"""
Response for a text translation request
"""
from_language: str
from_text: str
to_language: str
to_text: str
class ExtractResponse(BaseResponse):
sourceUrl: str
text: str
document_class: str
language: str
mediatype: str
metadata: Optional[dict] = None
class NLPBaseResponse(BaseResponse):
language: Optional[str]
model: Optional[str]
text: Optional[str]
class AnalyzeResponse(NLPBaseResponse):
entities: Optional[List[NamedEntity]] = None
health_entities: Optional[List[NamedEntity]] = None
# entities_text: Optional[List[str]]
noun_chunks: Optional[List[NounChunk]] = None
# noun_chunks_text: Optional[List[str]]
sentences: Optional[List[Sentence]] = None
top_sentences: Optional[List[Sentence]] = None
# lemma: Optional[List[Lemma]]
# lemmatized_text: Optional[str]
class Config:
schema_extra = {
"example": {
"language": "en",
"model": "core_web_sm",
"text": "Hello again! The quick brown fox jumps over the lazy dog. A second time !",
"entities": [],
"health_entities": [{"text": "..."}],
"sentences": [{"text": "Hello again!"}],
}
}
class ImmersiveReaderTokenResponse(BaseResponse):
token: str
subdomain: str
class TermDefinition(BaseModel):
id: str
term: str
type: str
text: str
class DefinitionResponse(BaseResponse):
term: str
definitions: Optional[List[TermDefinition]]
class RenderRequest(AnalyzeResponse):
"""
Request to renders the result a previous "analyze" response into an output format, such as HTML/...
"""
format: Optional[str] = "html"
class Page(BaseModel):
"""
A related webpage
- **title** Title of the web page
- **text** Text (usually a summary)
- **url** The web address
"""
title: str
text: str
url: str
class Image(BaseModel):
"""
An Image Url
- **text** Short text/ description of the image
- **url** The web address
- **hostPageUrl** The web address of the webpage that hosts the image
- **webSearchUrl** The web adress to do a Microsoft Bing "similarity" search
"""
text: str
url: str
hostPageUrl: str
thumbnailUrl: str
webSearchUrl: str
class Video(BaseModel):
"""
An Video Url
- **text** Short text/ description of the video
- **url** The web address
- **hostPageUrl** The web address of the webpage that hosts the video
- **webSearchUrl** The web adress to do a Microsoft Bing "similarity" search
"""
text: str
url: str
hostPageUrl: str
thumbnailUrl: str
webSearchUrl: str
class SearchResponse(BaseModel):
"""
Search responses, contain pages, images and videos
- **pages** List of (web) pages
- **images** List of images
- **videos** List of videos
"""
pages: Optional[List[Page]]
images: Optional[List[Image]]
videos: Optional[List[Video]]
|
python
|
import numpy as np
from sklearn.metrics import confusion_matrix
def test_data_index(true_label,pred_label,class_num):
M = 0
C = np.zeros((class_num+1,class_num+1))
c1 = confusion_matrix(true_label, pred_label)
C[0:class_num,0:class_num] = c1
C[0:class_num,class_num] = np.sum(c1,axis=1)
C[class_num,0:class_num] = np.sum(c1,axis=0)
N = np.sum(np.sum(c1,axis=1))
C[class_num,class_num] = N # all of the pixel number
OA = np.trace(C[0:class_num,0:class_num])/N
every_class = np.zeros((class_num+3,))
for i in range(class_num):
acc = C[i,i]/C[i,class_num]
M = M + C[class_num,i] * C[i,class_num]
every_class[i] = acc
kappa = (N * np.trace(C[0:class_num,0:class_num]) - M) / (N*N - M)
AA = np.sum(every_class,axis=0)/class_num
every_class[class_num] = OA
every_class[class_num+1] = AA
every_class[class_num+2] = kappa
return every_class, C
def caculate_index(true_label,pred_label,class_num):
our_label = pred_label
confusion_matrix = np.zeros((class_num+2,class_num+1))
for i in range(our_label.shape[0]):
for j in range(our_label.shape[1]):
x = our_label[i,j]
y = true_label[i,j]
if y==0:
continue
confusion_matrix[int(x)-1,int(y)-1] += 1
confusion_matrix[class_num,:]=np.sum(confusion_matrix[0:class_num,:], axis=0)
confusion_matrix[0:class_num,class_num]=np.sum(confusion_matrix[0:class_num,0:class_num], axis=1)
confusion_matrix[class_num+1,0]= confusion_matrix[0,0]/confusion_matrix[class_num,0]
confusion_matrix[class_num+1,1]= confusion_matrix[1,1]/confusion_matrix[class_num,1]
confusion_matrix[class_num+1,2]= confusion_matrix[2,2]/confusion_matrix[class_num,2]
M = 0
N = our_label.shape[0] * our_label.shape[1] - np.sum(true_label==0)
for i in range(class_num):
M = M + confusion_matrix[class_num,i] * confusion_matrix[i,class_num]
kappa = (N * np.trace(confusion_matrix[0:class_num,0:class_num]) - M) / (N*N - M)
every_class = confusion_matrix[class_num+1,0:class_num]
OA = np.trace(confusion_matrix[0:class_num,0:class_num])/N
AA = np.sum(confusion_matrix[class_num+1,0:class_num])/class_num
return OA,AA,kappa,every_class
def generate_image(label):
classifaction_result_img = np.zeros((label.shape[0],label.shape[1], 3))
a,b = np.where(label==1)
for location in range(len(a)):
classifaction_result_img[a[location],b[location],0] = 255
classifaction_result_img[a[location],b[location],1] = 0
classifaction_result_img[a[location],b[location],2] = 0
a,b = np.where(label==2)
for location in range(len(a)):
classifaction_result_img[a[location],b[location],0] = 0
classifaction_result_img[a[location],b[location],1] = 255
classifaction_result_img[a[location],b[location],2] = 0
a,b = np.where(label==3)
for location in range(len(a)):
classifaction_result_img[a[location],b[location],0] = 0
classifaction_result_img[a[location],b[location],1] = 0
classifaction_result_img[a[location],b[location],2] = 255
return classifaction_result_img
|
python
|
l=input()
n=len(l)
f=0
prev=0
found=0
for i in range(n-1):
if l[i:i+2]=='AB':
if f==0:
prev=i+1
f=1
else:
continue
elif l[i:i+2]=='BA':
if f==1:
if i!=prev:
found=1
else:
continue
prev=0
f=0
for i in range(n-1):
if l[i:i+2]=='BA':
if f==0:
prev=i+1
f=1
else:
continue
elif l[i:i+2]=='AB':
if f==1:
if i!=prev:
found=1
else:
continue
if found:
print('YES')
else:
print('NO')
|
python
|
"""
A Python library for generating Atom feeds for podcasts.
Uses the specification described at
http://www.atomenabled.org/developers/syndication/
"""
from xml.etree import ElementTree as ET
from xml.dom import minidom
import copy
from datetime import datetime, timedelta
from uuid import UUID
def parse_datetime(dt):
"""Return RFC 3339 compliant datetime."""
return dt.isoformat() + 'Z' if hasattr(dt, 'isoformat') else dt
def parse_timedelta(td):
"""Return time offset as HH:MM:SS.sss."""
if isinstance(td, timedelta):
microseconds_per_millisecond = 1000
seconds_per_minute = 60
minutes_per_hour = 60
seconds_per_hour = seconds_per_minute * minutes_per_hour
hours_per_day = 24
hours = int(td.seconds / seconds_per_hour) + hours_per_day * td.days
minutes = int(td.seconds / seconds_per_minute) % minutes_per_hour
seconds = td.seconds % seconds_per_minute
milliseconds = int(td.microseconds / microseconds_per_millisecond)
return '%02i:%02i:%02i.%03i' % (hours, minutes, seconds, milliseconds)
else:
return td
def parse_id(id):
"""If id is a UUID, prefix it with "urn:uuid:"."""
if isinstance(id, UUID):
return 'urn:uuid:%s' % id
else:
return id
class Element(ET.Element):
"""Base class of all elements which are added to the feed."""
def __init__(self, *args, **kwargs):
for kw, kwval in kwargs.items():
if kwval is None:
del(kwargs[kw])
super(Element, self).__init__(*args, **kwargs)
self.subelement_names = []
def tostring(self, pretty=False):
rough_string = ET.tostring(self.tree(), 'utf-8')
if not pretty:
return '<?xml version="1.0" encoding="utf-8"?>' + rough_string
else:
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=' ', encoding='utf-8')
def tree(self):
el = copy.copy(self)
for subelement_name in self.subelement_names:
if subelement_name in dir(self):
subelement = self.__getattribute__(subelement_name)
if subelement is None:
continue
if isinstance(subelement, ElementList):
el.extend(subelement.tree())
elif isinstance(subelement, list):
for subelement_item in subelement:
if subelement_item is None:
continue
el.extend([subelement_item.tree()])
else:
el.extend([subelement.tree()])
return el
def add_custom_element(self, tag, content=None, **kwargs):
el = Element(tag, **kwargs)
if content is not None:
el.text = content
self.__setattr__(tag, el)
self.subelement_names.append(tag)
class ElementList(list):
"""A list of elements. Intended for subclassing to overwrite tree method."""
def __init__(self, values=None):
if values is None:
values = []
super(ElementList, self).__init__()
for value in values:
self.append(value)
def tree(self):
return self.tree_elements()
def tree_elements(self):
return [el.tree() for el in self]
class Person(Element):
"""
<author> and <contributor> describe a person, corporation, or
similar entity. It has one required element, name, and two optional
elements: uri, email.
* name: conveys a human-readable name for the person.
* uri: contains a home page for the person.
* email: contains an email address for the person.
"""
def __init__(self, tag, name, email=None, uri=None):
super(Person, self).__init__(tag)
self.subelement_names = ['name', 'email', 'uri']
# Required
self.name = Element('name')
self.name.text = name
# Optional
if email is not None:
self.email = Element('email')
self.email.text = email
if uri is not None:
self.uri = Element('uri')
self.uri.text = uri
class Author(Person):
"""Creates a <author> element. See Person class for more information."""
def __init__(self, name, email=None, uri=None):
super(Author, self).__init__('author', name, email, uri)
class Contributor(Person):
"""Creates a <contributor> element. See Person class for more information."""
def __init__(self, name, email=None, uri=None):
super(Contributor, self).__init__('contributor', name, email, uri)
class Link(Element):
"""
Creates a <link> element to a web page or resource. Has one required
attribute, href, and five optional attributes: rel, type, hreflang,
title, and length.
Required:
* href: the URI of the referenced resource (typically a Web page)
Optional:
* rel: a single link relationship type. It can be a full URI (see
extensibility), or one of the following predefined values
(default=alternate):
- alternate: an alternate representation of the entry or feed,
for example a permalink to the html version of the entry, or
the front page of the weblog.
- enclosure: a related resource which is potentially large in
size and might require special handling, for example an audio
or video recording.
- related: an document related to the entry or feed.
- self: the feed itself.
- via: the source of the information provided in the entry.
- type indicates the media type of the resource.
* type: the media type of the resource.
* hreflang: the language of the referenced resource.
* title: human readable information about the link, typically for
display purposes.
* length: the length of the resource, in bytes.
"""
def __init__(self, href, rel=None, type=None, hreflang=None,
title=None, length=None):
super(Link, self).__init__('link', href=href, rel=rel, type=type,
hreflang=hreflang, title=title,
length=length)
class Category(Element):
"""
Creates a <category> element. Has one required attribute, term, and
two optional attributes, scheme and label.
Required:
* term: identifies the category.
Optional:
* scheme: identifies the categorization scheme via a URI.
* label: provides a human-readable label for display.
"""
def __init__(self, term, scheme=None, label=None):
super(Category, self).__init__('category', term=term,
scheme=scheme, label=label)
class Chapter(Element):
"""
Podcast chapters as described at http://podlove.org/simple-chapters/
Required:
* start: a single point in time relative to the beginning of the
media file.
* title: the name of the chapter.
Optional:
* href: a hypertext reference as an extension of the title that
refers to a resource that provides related information.
* image: an URL pointing to an image to be associated with the
chapter.
"""
def __init__(self, start, title, href=None, image=None):
start = parse_timedelta(start)
super(Chapter, self).__init__('psc:chapter', start=start,
title=title, href=href,
image=image)
class ChapterList(ElementList):
"""Chapters should be encapsulated in this class."""
def __init__(self, values=None):
if values is None:
values = []
super(ChapterList, self).__init__(values)
def tree(self):
if len(self) == 0:
return []
el = Element('psc:chapters', version="1.1")
el.set('xmlns:psc', 'http://podlove.org/simple-chapters')
el.extend(self.tree_elements())
return [el]
class Entry(Element):
"""Generates an entry element to be added to the elements array
in the Feed class.
Required:
* id: Identifies the entry using a universally unique and
permanent URI. Two entries in a feed can have the same value
for id if they represent the same entry at different points
in time.
* title: Contains a human readable title for the entry. This
value should not be blank.
* updated: Indicates the last time the entry was modified in a
significant way. This value need not change after a typo is
fixed, only after a substantial modification. Generally,
different entries in a feed will have different updated
timestamps.
Recommended:
* author: Names one author of the entry. An entry may have
multiple authors. An entry must contain at least one author
element unless there is an author element in the enclosing
feed, or there is an author element in the enclosed source
element. See Author class.
* content: Contains or links to the complete content of the
entry. Content must be provided if there is no alternate link,
and should be provided if there is no summary.
* link: Identifies a related Web page. The type of relation is
defined by the rel attribute. An entry is limited to one
alternate per type and hreflang. An entry must contain an
alternate link if there is no content element. See Link class.
* summary: Conveys a short summary, abstract, or excerpt of the
entry. Summary should be provided if there either is no content
provided for the entry, or that content is not inline (i.e.,
contains a src attribute), or if the content is encoded in
base64.
Optional:
* category: Specifies a category that the entry belongs to. A
entry may have multiple category elements. See Category class.
* contributor: Names one contributor to the entry. An entry may
have multiple contributor elements. See Contributor class.
* published: Contains the time of the initial creation or first
availability of the entry.
* source: If an entry is copied from one feed into another feed,
then the source feed's metadata (all child elements of feed
other than the entry elements) should be preserved if the
source feed contains any of the child elements author,
contributor, rights, or category and those child elements are
not present in the source entry.
* rights: Conveys information about rights, e.g. copyrights, held
in and over the entry.
* chapters: Podcast chapters as described at
http://podlove.org/simple-chapters/. See Chapter class.
"""
def __init__(self, id, title, updated, authors=None, content=None,
links=None, summary=None, categories=None, contributors=None,
published=None, source=None, rights=None, chapters=None):
if authors is None:
authors = []
if links is None:
links = []
if categories is None:
categories = []
if contributors is None:
contributors = []
if chapters is None:
chapters = []
super(Entry, self).__init__('entry')
self.subelement_names = [
'id',
'title',
'updated',
'authors',
'content',
'links',
'summary',
'categories',
'contributors',
'published',
'source',
'rights',
'chapters'
]
self.id = Element('id')
self.id.text = parse_id(id)
self.title = Element('title')
self.title.text = title
self.updated = Element('updated')
self.updated.text = parse_datetime(updated)
self.authors = authors
if content:
self.content = Element('content')
self.content.text = content
self.links = links
if summary:
self.summary = Element('summary')
self.summary.text = summary
self.categories = categories
self.contributors = contributors
if published is not None:
self.published = Element('published')
self.published.text = parse_datetime(published)
self.source = source # Should be an Entry
if rights:
self.rights = Element('rights')
self.rights.text = rights
self.chapters = ChapterList(chapters)
class Feed(Element):
"""Generates an Atom feed based on the specification described at
http://www.atomenabled.org/developers/syndication/
Required elements:
* id: Identifies the feed using a universally unique and
permanent URI. If you have a long-term, renewable lease on
your Internet domain name, then you can feel free to use
your website's address.
* title: Contains a human readable title for the feed. Often the
same as the title of the associated website. This value should
not be blank.
* updated: Indicates the last time the feed was modified in a
significant way.
Recommended elements:
* author: Names one author of the feed. A feed may have multiple
author elements. A feed must contain at least one author
element unless all of the entry elements contain at least one
author element. See Author class.
* link: Identifies a related Web page. The type of relation is
defined by the rel attribute. A feed is limited to one
alternate per type and hreflang. A feed should contain a link
back to the feed itself. See Link class.
Optional elements:
* category: Specifies a category that the feed belongs to. A
feed may have multiple category elements. See Category class.
* contributor: Names one contributor to the feed. An feed may
have multiple contributor elements.
* generator: Identifies the software used to generate the feed,
for debugging and other purposes. Both the uri and version
attributes are optional.
* icon: Identifies a small image which provides iconic visual
identification for the feed. Icons should be square.
* logo: Identifies a larger image which provides visual
identification for the feed. Images should be twice as wide as
they are tall.
* rights: Conveys information about rights, e.g. copyrights, held
in and over the feed.
* subtitle: Contains a human-readable description or subtitle for
the feed.
"""
def __init__(self, id, title, updated=None, authors=None, links=None,
categories=None, contributors=None, generator=None,
icon=None, logo=None, rights=None, subtitle=None, entries=None,
**kwargs):
if authors is None:
authors = []
if links is None:
links = []
if categories is None:
categories = []
if contributors is None:
contributors = []
if entries is None:
entries = []
if 'xmlns' not in kwargs:
kwargs['xmlns'] = 'http://www.w3.org/2005/Atom'
super(Feed, self).__init__('feed', **kwargs)
if updated is None:
updated = datetime.now()
self.subelement_names = [
'id',
'title',
'updated',
'authors',
'links',
'categories',
'contributors',
'generator',
'icon',
'logo',
'rights',
'subtitle',
'entries'
]
# Required
self.id = Element('id')
self.id.text = parse_id(id)
self.title = Element('title')
self.title.text = title
self.updated = Element('updated')
self.updated.text = parse_datetime(updated)
# Recommended
self.authors = authors
self.links = links
# Optional
self.categories = categories
self.contributors = contributors # List of Category objects
if generator:
self.generator = Element('generator')
self.generator.text = generator
if icon:
self.icon = Element('icon')
self.icon.text = icon
if logo:
self.logo = Element('logo')
self.logo.text = logo
if rights:
self.rights = Element('rights')
self.rights.text = rights
if subtitle:
self.subtitle = Element('subtitle')
self.subtitle.text = subtitle
self.entries = entries
|
python
|
from datetime import timedelta
import json
import sys
import click
from humanize import naturaldelta
from . import __version__
from .click_timedelta import TIME_DELTA
from .watson_overtime import watson_overtime
def _build_work_diff_msg(diff: timedelta) -> str:
msg = f"You are {naturaldelta(diff)} "
if diff < timedelta(0):
msg += "behind"
else:
msg += "ahead of"
msg += " schedule."
return msg
@click.command()
@click.version_option(version=__version__)
@click.option(
"--watson-report",
"-r",
help="Read json style of watson report",
type=click.File("r"),
default=sys.stdin,
)
@click.option(
"--working-hours",
"-w",
help="Amount of planned working time",
type=TIME_DELTA,
default="40 hours",
)
@click.option(
"--period",
"-p",
help="Duration in which the amount of planned working time should be achieved",
type=TIME_DELTA,
default="1 week",
)
def main(
watson_report: click.File, working_hours: timedelta, period: timedelta
) -> None:
watson_report = json.load(watson_report)
diff = watson_overtime(watson_report, working_hours, period)
click.echo(_build_work_diff_msg(diff))
if __name__ == "__main__":
main()
|
python
|
def dump_locals(lcls):
print('|' + ('='*78) + '|')
print("|Locals:".ljust(79) + '|')
print('|' + ('- -'*(79//3)) + '|')
for (k, v) in lcls.items():
print("| {} => {}".format(k, v).ljust(79) + '|')
print('|' + ('='*78) + '|')
def dump_obj(name, obj):
print('|' + ('='*78) + '|')
print('|dump of {} (type: {})'.format(name, obj).ljust(79) + '|')
print('|' + (' - ' * (79//3)) + "|")
for (k, v) in obj.__dict__.items():
print("| {} => {}".format(k, v.__repr__()).ljust(79) + '|')
print('|' + ('='*78) + '|')
|
python
|
"""
Settings file
You can set the database settings in here at DATABASES['default']
You can also set the supported languages with SUPPORTED_LANGUAGES
Before you launch the site, you should probably set DATABASE password and the SECRET_KEY to something that
is not in a public git repository...
"""
from random import SystemRandom
from django.utils.translation import ugettext_lazy as _
from os.path import join, dirname, exists
import string
import sys
SUPPORTED_LANGUAGES = (
# the first is language key / database value (max 8 chars, don't change), the second is the display name
('en-gb', _('English (British)')),
('zh-cn', _('Chinese (simplified Mandarin)')),
('he', _('Hebrew')),
('de', _('German')),
('nl', _('Dutch')),
)
#SUPPORTED_LANGUAGES = DEFAULT_LANGUAGES # replace this if you want to limit the available languages
DEFAULT_KNOWN_LANGUAGE = 'en-gb'
DEFAULT_LEARN_LANGUAGE = 'zh-cn'
# Build paths inside the project like this: join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=)z3(+)z!jaizz^$ggqme0q)49vy2qs-9g+5@h&340qopx^$4w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'learners.Learner'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'haystack',
'basics',
'learners',
'phrasebook',
'lists',
'study',
'opinions',
'importing',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware', # should be after SessionMiddleware
'basics.middleware.SetLearningLanguage' # should be after LocaleMiddleWare
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'basics.context.statistics',
)
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'data', 'database.sqlite3'),
},
## reaplce with this to us mySQL (make sure the database is UTF8):
#'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'words_database',
# 'USER': 'werner',
# 'PASSWORD': 'Wx3G4fqABzwZZAz',
# 'HOST': '127.0.0.1',
# 'PORT': '',
#}
}
LOGIN_URL = '/learner/login/'
LOGIN_REDIRECT_URL = '/learner/profile/'
PREPEND_WWW = False # I like turning this on for real sites, but it doesn't work if you access the site using an IP (like 127.0.0.1), so turn it on after testing
APPEND_SLASH = True
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGES = SUPPORTED_LANGUAGES
LANGUAGE_CODE = DEFAULT_LEARN_LANGUAGE
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (join(BASE_DIR, 'locale'),)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = join(BASE_DIR, 'media')
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
# search engine
# http://django-haystack.readthedocs.org/en/latest/tutorial.html
# should probably be changed once performance becomes an issue
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': join(BASE_DIR, 'data', 'searchindex.whoosh'),
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
try:
if not exists(join(BASE_DIR, 'source', 'local.py')):
with open(join(BASE_DIR, 'source', 'local.py'), 'w+') as fh:
fh.write('"""\nLocal (machine specific) settings that overwrite the general ones.\n"""\n\n')
fh.write('from os.path import join, realpath, dirname\n\n\n')
fh.write('BASE_DIR = dirname(dirname(realpath(__file__)))\n\n')
fh.write('DATABASES = {\'default\': {\n\t\'ENGINE\': \'django.db.backends.sqlite3\',\n\t\'NAME\': join(BASE_DIR, \'data\', \'aqua.db\'),\n}}\n\n')
fh.write('ALLOWED_HOSTS = [\'localhost\', \'.localhost.markv.nl\',]\n\n')
fh.write('SECRET_KEY = "{0:s}"\n\n'.format(''.join(SystemRandom().choice(string.ascii_letters + string.digits + '#$%&()*+,-./:;?@[]^_`{|}~') for _ in range(50))))
fh.write('NOTIFICATION_PATH = join(BASE_DIR, \'notification.html\')\n\n')
fh.write('TEMPLATE_DEBUG = DEBUG = True\n\n\n')
except Exception as err:
print('could not create local.py settings file: {0:s}'.format(str(err)))
from local import *
|
python
|
# From any external file our program counts the number of lines
# thanks to Stackoverflow
# @program 3
# @author unobatbayar
# @date 03-10-2018
num_lines = sum(1 for line in open('out.txt'))
print(num_lines)
|
python
|
#!/usr/bin/env python
import requests
import traceback
import sys
from bs4 import BeautifulSoup
import csv
## getHTML
def getHTML():
url = "http://mcc-mnc.com/"
html = ""
try :
r = requests.get(url)
html = r.text.encode("utf-8")
except:
traceback.print_exc()
return html
return html
## end
## getHeaders
def getHeaders(table):
## Find thead
thead = table.find("thead")
if thead is None:
print "Didn't find thead tag in table, cannot proceed!!!"
sys.exit(1)
return [header.text.strip() for header in thead.find_all("th")]
## end
## getRows
def getRows(table):
## Find tbody
tbody = table.find("tbody")
if tbody is None:
print "Didn't find tbody tag in table, cannot proceed!!!"
sys.exit(1)
rows= []
for row in tbody.find_all("tr"):
rows.append([val.text.strip() for val in row.find_all("td")])
return rows
## end
html = getHTML()
if html == "":
print "HTML retrieve is empty, cannot proceed!!!"
sys.exit(1)
soup = BeautifulSoup(html)
table = soup.find("table", attrs={"id": "mncmccTable"})
headers = getHeaders(table)
rows = getRows(table)
outputFileName = "mcc-mnc.csv"
try :
f = open(outputFileName, "wb")
writer = csv.writer(f, delimiter=",", quoting = csv.QUOTE_MINIMAL)
writer.writerow(headers)
writer.writerows(rows)
except:
traceback.print_exc()
sys.exit(1)
|
python
|
from user import User
from db import Base, Session
from sqlalchemy import *
from sqlalchemy.orm import relation, sessionmaker
from datetime import datetime, date
from attendee import Attendee
from werkzeug.security import generate_password_hash, check_password_hash
from flask import json
from sqlalchemy import exc
from event import Event
import organization
class OrgMember(User):
__tablename__ = "orgmembers"
__mapper_args__ = {'polymorphic_identity': 'orgmember'}
id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=False)
# the OrgMember will have all User fields
org = Column(Integer, ForeignKey('organizations.id'), nullable=False) # object or id?
poc = Column(Boolean, nullable=False)
@classmethod
def fromdict(cls, d):
allowed = ('name', 'email', 'passwordhash', 'phone', 'last_active', 'birthdate',
'bio', 'gender', 'org', 'poc')
df = {k: v for k, v in d.items() if k in allowed}
return cls(**df)
def asdict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
result = getattr(self, key)
if isinstance(result, date):
dict_[key] = str(result)
else:
dict_[key] = result
return dict_
def __init__(self, name, email, passwordhash, phone, poc, org, birthdate=None,
bio=None, gender=None):
self.name = name
self.email = email
self.set_password(passwordhash)
if len(phone) > 15 :
raise ValueError("phone number is too long")
elif len(phone) < 10:
raise ValueError("phone number is too short")
elif phone.isdigit() == False:
raise ValueError("phone number must be a string of digits")
else:
self.phone = phone
self.poc = poc
self.last_activity = datetime.now()
self.birthdate = birthdate
self.bio = bio
self.gender = gender
self.org = org
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
# create a volunteer from a json blob
def getOrgMember(self, id):
s = Session()
content = s.query(OrgMember).filter_by(id=id).first()
s.close()
if content:
return content
else:
raise ValueError("user does not exist")
def confirmAttendee(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.confirmed = True
s.commit()
s.close()
return True
else:
return False
def validateHour(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.hoursValidated = True
s.commit()
s.close()
return True
else:
return False
def deleteSelf(self, session):
s = session
try:
s.delete(self)
except:
raise exc.SQLAlchemyError("failed to delete orgMember " + self.id)
def link_org(orgmember):
s = Session()
o2_org = orgmember.org
org_m = s.query(OrgMember).filter_by(email=orgmember.email).first()
s.close()
if org_m:
org_id = org_m.id
else :
print (exc.InvalidRequestError("query failed"))
return False
json2 = json.dumps({'poc': org_id})
organization.updateOrg(o2_org, json2)
return True
def createMember(json):
o = OrgMember.fromdict(json)
s = Session()
try:
s.add(o)
s.commit()
except:
return False
finally:
s.close()
o2 = OrgMember.fromdict(json)
if link_org(o2):
return True
else:
return False
|
python
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import random
import time
# Using the Python Device SDK for IoT Hub:
# https://github.com/Azure/azure-iot-sdk-python
# The sample connects to a device-specific MQTT endpoint on your IoT Hub.
from azure.iot.device import IoTHubDeviceClient, Message
import requests
import os
from datetime import datetime
lat="45.657974"
lon="25.601198"
key="3c8ed3ac65cb597f0d30d0c07259b6ff"
CONNECTION_STRING = "HostName=WeatherData.azure-devices.net;DeviceId=DataSender;SharedAccessKey=FChK/5Ls4g4a37TaMp8jhQdsXjyQwILDFHAhieM+Ymw="
def call_api():
#complete_api_link = "https://api.openweathermap.org/data/2.5/onecall?lat="+lat+"&lon="+lon+"&appid="+key
complete_api_link = "https://api.openweathermap.org/data/2.5/weather?q=brasov&appid="+key
api_link = requests.get(complete_api_link)
api_data = api_link.json()
return api_data
def iothub_client_init():
# Create an IoT Hub client
client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
return client
def iothub_client_telemetry_sample_run():
try:
client = iothub_client_init()
print ( "IoT Hub device sending periodic messages, press Ctrl-C to exit" )
while True:
# Build the message with simulated telemetry values.
api_data=call_api()
message = Message(str(api_data))
# Send the message.
print( "Sending message: {}".format(message) )
client.send_message(message)
print ( "Message successfully sent" )
time.sleep(20)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
print ( "IoT Hub Quickstart #1 - Simulated device" )
print ( "Press Ctrl-C to exit" )
iothub_client_telemetry_sample_run()
|
python
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, www.privaz.io Valletech AB
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# OpenNebula common documentation
DOCUMENTATION = r'''
options:
api_url:
description:
- The ENDPOINT URL of the XMLRPC server.
- If not specified then the value of the ONE_URL environment variable, if any, is used.
type: str
aliases:
- api_endpoint
api_username:
description:
- The name of the user for XMLRPC authentication.
- If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
type: str
api_password:
description:
- The password or token for XMLRPC authentication.
- If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
type: str
aliases:
- api_token
validate_certs:
description:
- Whether to validate the SSL certificates or not.
- This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
type: bool
default: yes
wait_timeout:
description:
- Time to wait for the desired state to be reached before timeout, in seconds.
type: int
default: 300
'''
|
python
|
#!/usr/bin/env python3
import os
from electroncash.util import json_decode
from tkinter import filedialog
from tkinter import *
Tk().withdraw()
wallet = filedialog.askopenfilename(initialdir = "~/.electron-cash/wallets",title = "Select wallet")
coins = os.popen("electron-cash -w "+wallet+" listunspent").read()
coins = json_decode(coins)
#print(coins)
addrValues = {}
for c in coins:
addrValues.setdefault(c['address'], []).append(c['value'])
for ad in addrValues:
numVals = len(addrValues[ad])
if numVals > 1:
numDust = 0
for i in range(numVals):
if addrValues[ad][i] == '0.00000547':
numDust += 1
if numVals - numDust > 1:
print(ad, numVals, addrValues[ad])
|
python
|
from unittest import mock
from django.conf import settings
from django.contrib.auth.models import User
from django.test import Client, TestCase
from .models import Lamp
from .views import LampControlForm
class LoginTests(TestCase):
def setUp(self):
self.client = Client()
def test_login_view(self):
response = self.client.get(settings.LOGIN_URL)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Log in')
def checkLoginRedirect(self, path):
response = self.client.get(path)
self.assertRedirects(response, f'{settings.LOGIN_URL}?next={path}')
def test_lamp_list_no_auth(self):
self.checkLoginRedirect('/lamps/')
def test_lamp_detail_no_auth(self):
self.checkLoginRedirect('/lamps/1')
def test_lamp_control_no_auth(self):
self.checkLoginRedirect('/lamps/1/control')
class LampsSiteViewsTests(TestCase):
def setUp(self):
self.client = Client()
user = User.objects.create_user('testuser')
self.client.force_login(user)
def test_root_redirect(self):
response = self.client.get('/')
self.assertRedirects(response, '/lamps/')
def test_list(self):
lamp = Lamp.objects.create(name='lamp1')
response = self.client.get('/lamps/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lights/lamp_list.html')
self.assertIn(lamp, response.context['lamp_list'])
def test_details(self):
lamp = Lamp.objects.create(name='lamp1')
response = self.client.get(f'/lamps/{lamp.pk}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lights/lamp_detail.html')
self.assertEqual(lamp, response.context['lamp'])
def test_control_get(self):
lamp = Lamp.objects.create(name='lamp1')
response = self.client.get(f'/lamps/{lamp.pk}/control')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lights/lamp_control.html')
self.assertEqual(lamp, response.context['lamp'])
def test_control_get_status_off(self):
lamp = Lamp.objects.create(name='lamp1')
response = self.client.get(f'/lamps/{lamp.pk}/control')
self.assertEqual(response.status_code, 200)
form = response.context['form']
self.assertEqual(form['status'].value(), LampControlForm.STATUS_OFF)
def test_control_post_action(self):
lamp = Lamp.objects.create(name='lamp1')
new_brightness = 70
self.client.post(f'/lamps/{lamp.pk}/control', {
'brightness': str(new_brightness),
'status': 'on'})
lamp.refresh_from_db()
self.assertEqual(lamp.brightness, new_brightness)
self.assertTrue(lamp.is_on)
def test_control_post_redirect(self):
lamp = Lamp.objects.create(name='lamp1')
response = self.client.post(f'/lamps/{lamp.pk}/control',
{'brightness': '20',
'status': 'on'})
self.assertRedirects(response, f'/lamps/{lamp.pk}')
@mock.patch('lights.views.lamp_service', autospec=True)
def test_control_service_call(self, mock_lamp_service):
lamp = Lamp.objects.create(name='lamp1')
new_brightness = 75
self.client.post(f'/lamps/{lamp.pk}/control', {
'brightness': str(new_brightness),
'status': 'on'})
mock_lamp_service.set_lamp_mode.assert_called_once_with(
lamp,
on=True,
brightness=new_brightness)
def test_control_get_404(self):
response = self.client.get(f'/lamps/1/control')
self.assertEqual(response.status_code, 404)
def test_control_post_404(self):
response = self.client.post(f'/lamps/1/control')
self.assertEqual(response.status_code, 404)
def test_control_validation_error_response(self):
lamp = Lamp.objects.create(name='lamp1')
response = self.client.post(f'/lamps/{lamp.pk}/control', {
'brightness': 101,
'status': 'on'})
self.assertEqual(response.status_code, 200)
@mock.patch('lights.views.lamp_service', autospec=True)
def test_control_validation_error_action(self, mock_lamp_service):
lamp = Lamp.objects.create(name='lamp1')
self.client.post(f'/lamps/{lamp.pk}/control', {
'brightness': 101,
'status': 'on'})
mock_lamp_service.assert_not_called()
|
python
|
# -*- coding: utf-8 -*-
# Copyright © 2018 by IBPort. All rights reserved.
# @Author: Neal Wong
# @Email: [email protected]
from setuptools import setup
setup(
name='scrapy_autoproxy',
version='1.0.0',
description='Machine learning proxy picker',
long_description=open('README.rst').read(),
keywords='scrapy proxy web-scraping',
license='MIT License',
author="Dan Chrostowski",
author_email='[email protected]',
url='https://streetscrape.com',
packages=[
'scrapy_autoproxy',
],
package_dir={'scrapy_autoproxy': 'scrapy_autoproxy'},
package_data={'scrapy_autoproxy/data': ['docker-compose.yml','.env','config/autoproxy.cfg','database/','init_sql/1-schema.sql','init_sql/2-seeds.sql']},
install_requires=[
'redis',
'psycopg2-binary',
'docker'
],
)
|
python
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (get_location_type,
file_type,
get_resource_name_completion_list,
get_enum_type,
get_three_state_flag,
tags_type)
from azure.mgmt.iotcentral.models import AppSku
from azure.mgmt.iothub.models import IotHubSku
from azure.mgmt.iothubprovisioningservices.models import (IotDpsSku,
AllocationPolicy,
AccessRightsDescription)
from azure.cli.command_modules.iot.shared import (EndpointType,
RouteSourceType,
EncodingFormat,
RenewKeyType,
AuthenticationType)
from .custom import KeyType, SimpleAccessRights
from ._validators import (validate_policy_permissions,
validate_retention_days,
validate_fileupload_notification_max_delivery_count,
validate_fileupload_notification_ttl,
validate_fileupload_sas_ttl,
validate_feedback_ttl,
validate_feedback_lock_duration,
validate_feedback_max_delivery_count,
validate_c2d_max_delivery_count,
validate_c2d_ttl)
hub_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'),
help='IoT Hub name.')
dps_name_type = CLIArgumentType(
options_list=['--dps-name'],
completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'),
help='IoT Provisioning Service name')
app_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.IoTCentral/IoTApps'),
help='IoT Central application name.')
def load_arguments(self, _): # pylint: disable=too-many-statements
# Arguments for IoT DPS
with self.argument_context('iot dps') as c:
c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('tags', tags_type)
with self.argument_context('iot dps create') as c:
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Provisioning Service. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotDpsSku),
help='Pricing tier for the IoT provisioning service.')
c.argument('unit', help='Units in your IoT Provisioning Service.', type=int)
for subgroup in ['access-policy', 'linked-hub', 'certificate']:
with self.argument_context('iot dps {}'.format(subgroup)) as c:
c.argument('dps_name', options_list=['--dps-name'], id_part=None)
with self.argument_context('iot dps access-policy') as c:
c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'],
help='A friendly name for DPS access policy.')
with self.argument_context('iot dps access-policy create') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps access-policy update') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps linked-hub') as c:
c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.')
with self.argument_context('iot dps linked-hub create') as c:
c.argument('connection_string', help='Connection string of the IoT hub.')
c.argument('location', get_location_type(self.cli_ctx),
help='Location of the IoT hub.')
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the IoT hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps linked-hub update') as c:
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the Iot hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps allocation-policy update') as c:
c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy),
help='Allocation policy for the IoT provisioning service.')
with self.argument_context('iot dps certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'],
help='A friendly name for the certificate.')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
# Arguments for IoT Hub
with self.argument_context('iot hub') as c:
c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
c.argument('sku', arg_type=get_enum_type(IotHubSku),
help='Pricing tier for Azure IoT Hub. '
'Note that only one free IoT hub instance (F1) is allowed in each '
'subscription. Exception will be thrown if free instances exceed one.')
c.argument('unit', help='Units in your IoT Hub.', type=int)
c.argument('partition_count',
help='The number of partitions of the backing Event Hub for device-to-cloud messages.', type=int)
c.argument('retention_day', options_list=['--retention-day', '--rd'],
type=int, validator=validate_retention_days,
help='Specifies how long this IoT hub will maintain device-to-cloud events, between 1 and 7 days.')
c.argument('c2d_ttl', options_list=['--c2d-ttl', '--ct'],
type=int, validator=validate_c2d_ttl,
help='The amount of time a message is available for the device to consume before it is expired'
' by IoT Hub, between 1 and 48 hours.')
c.argument('c2d_max_delivery_count', options_list=['--c2d-max-delivery-count', '--cdd'],
type=int, validator=validate_c2d_max_delivery_count,
help='The number of times the IoT hub will attempt to deliver a cloud-to-device'
' message to a device, between 1 and 100.')
c.argument('feedback_ttl', options_list=['--feedback-ttl', '--ft'],
type=int, validator=validate_feedback_ttl,
help='The period of time for which the IoT hub will maintain the feedback for expiration'
' or delivery of cloud-to-device messages, between 1 and 48 hours.')
c.argument('feedback_lock_duration', options_list=['--feedback-lock-duration', '--fld'],
type=int, validator=validate_feedback_lock_duration,
help='The lock duration for the feedback queue, between 5 and 300 seconds.')
c.argument('feedback_max_delivery_count', options_list=['--feedback-max-delivery-count', '--fd'],
type=int, validator=validate_feedback_max_delivery_count,
help='The number of times the IoT hub attempts to'
' deliver a message on the feedback queue, between 1 and 100.')
c.argument('enable_fileupload_notifications', options_list=['--fileupload-notifications', '--fn'],
arg_type=get_three_state_flag(),
help='A boolean indicating whether to log information about uploaded files to the'
' messages/servicebound/filenotifications IoT Hub endpoint.')
c.argument('fileupload_notification_max_delivery_count', type=int,
options_list=['--fileupload-notification-max-delivery-count', '--fnd'],
validator=validate_fileupload_notification_max_delivery_count,
help='The number of times the IoT hub will attempt to deliver a file notification message,'
' between 1 and 100.')
c.argument('fileupload_notification_ttl', options_list=['--fileupload-notification-ttl', '--fnt'],
type=int, validator=validate_fileupload_notification_ttl,
help='The amount of time a file upload notification is available for the service to'
' consume before it is expired by IoT Hub, between 1 and 48 hours.')
c.argument('fileupload_storage_connectionstring',
options_list=['--fileupload-storage-connectionstring', '--fcs'],
help='The connection string for the Azure Storage account to which files are uploaded.')
c.argument('fileupload_storage_authentication_type',
options_list=['--fileupload-storage-auth-type', '--fsa'],
help='The authentication type for the Azure Storage account to which files are uploaded.'
'Possible values are keyBased and identityBased')
c.argument('fileupload_storage_container_uri',
options_list=['--fileupload-storage-container-uri', '--fcu'],
help='The container URI for the Azure Storage account to which files are uploaded.')
c.argument('fileupload_storage_container_name',
options_list=['--fileupload-storage-container-name', '--fc'],
help='The name of the root container where you upload files. The container need not exist but'
' should be creatable using the connectionString specified.')
c.argument('fileupload_sas_ttl', options_list=['--fileupload-sas-ttl', '--fst'],
type=int, validator=validate_fileupload_sas_ttl,
help='The amount of time a SAS URI generated by IoT Hub is valid before it expires,'
' between 1 and 24 hours.')
c.argument('min_tls_version', options_list=['--min-tls-version', '--mintls'],
type=str, help='Specify the minimum TLS version to support for this hub. Can be set to'
' "1.2" to have clients that use a TLS version below 1.2 to be rejected.')
for subgroup in ['consumer-group', 'policy', 'certificate', 'routing-endpoint', 'route']:
with self.argument_context('iot hub {}'.format(subgroup)) as c:
c.argument('hub_name', options_list=['--hub-name'])
with self.argument_context('iot hub route') as c:
c.argument('route_name', options_list=['--route-name', '--name', '-n'], help='Name of the Route.')
c.argument('endpoint_name', options_list=['--endpoint-name', '--endpoint', '--en'],
help='Name of the routing endpoint.')
c.argument('condition', options_list=['--condition', '-c'],
help='Condition that is evaluated to apply the routing rule.')
c.argument('enabled', options_list=['--enabled', '-e'], arg_type=get_three_state_flag(),
help='A boolean indicating whether to enable route to the Iot hub.')
c.argument('source_type', arg_type=get_enum_type(RouteSourceType),
options_list=['--source-type', '--type', '--source', '-s'], help='Source of the route.')
with self.argument_context('iot hub route test') as c:
c.argument('body', options_list=['--body', '-b'], help='Body of the route message.')
c.argument('app_properties', options_list=['--app-properties', '--ap'],
help='App properties of the route message.')
c.argument('system_properties', options_list=['--system-properties', '--sp'],
help='System properties of the route message.')
with self.argument_context('iot hub routing-endpoint') as c:
c.argument('endpoint_name', options_list=['--endpoint-name', '--name', '-n'],
help='Name of the Routing Endpoint.')
c.argument('endpoint_resource_group', options_list=['--endpoint-resource-group', '--erg', '-r'],
help='Resource group of the Endpoint resoure.')
c.argument('endpoint_subscription_id', options_list=['--endpoint-subscription-id', '-s'],
help='SubscriptionId of the Endpoint resource.')
c.argument('connection_string', options_list=['--connection-string', '-c'],
help='Connection string of the Routing Endpoint.')
c.argument('container_name', options_list=['--container-name', '--container'],
help='Name of the storage container.')
c.argument('endpoint_type', arg_type=get_enum_type(EndpointType),
options_list=['--endpoint-type', '--type', '-t'], help='Type of the Routing Endpoint.')
c.argument('encoding', options_list=['--encoding'], arg_type=get_enum_type(EncodingFormat),
help='Encoding format for the container. The default is AVRO. '
'Note that this field is applicable only for blob container endpoints.')
c.argument('endpoint_uri', options_list=['--endpoint-uri'],
help='The uri of the endpoint resource.')
c.argument('entity_path', options_list=['--entity-path'],
help='The entity path of the endpoint resource.')
with self.argument_context('iot hub routing-endpoint create') as c:
c.argument('batch_frequency', options_list=['--batch-frequency', '-b'], type=int,
help='Request batch frequency in seconds. The maximum amount of time that can elapse before data is'
' written to a blob, between 60 and 720 seconds.')
c.argument('chunk_size_window', options_list=['--chunk-size', '-w'], type=int,
help='Request chunk size in megabytes(MB). The maximum size of blobs, between 10 and 500 MB.')
c.argument('file_name_format', options_list=['--file-name-format', '--ff'],
help='File name format for the blob. The file name format must contain {iothub},'
' {partition}, {YYYY}, {MM}, {DD}, {HH} and {mm} fields. All parameters are'
' mandatory but can be reordered with or without delimiters.')
c.argument('authentication_type', options_list=['--auth-type'], arg_type=get_enum_type(AuthenticationType),
help='Authentication type for the endpoint. The default is keyBased.')
with self.argument_context('iot hub certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.')
with self.argument_context('iot hub consumer-group') as c:
c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2',
help='Event hub consumer group name.')
c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.')
with self.argument_context('iot hub policy') as c:
c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1',
help='Shared access policy name.')
permission_values = ', '.join([x.value for x in SimpleAccessRights])
c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower,
help='Permissions of shared access policy. Use space-separated list for multiple permissions. '
'Possible values: {}'.format(permission_values))
with self.argument_context('iot hub policy renew-key') as c:
c.argument('regenerate_key', options_list=['--renew-key', '--rk'], arg_type=get_enum_type(RenewKeyType),
help='Regenerate keys')
with self.argument_context('iot hub create') as c:
c.argument('hub_name', completer=None)
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Hub. Default is the location of target resource group.')
with self.argument_context('iot hub show-connection-string') as c:
c.argument('show_all', options_list=['--all'], help='Allow to show all shared access policies.')
c.argument('hub_name', options_list=['--hub-name', '--name', '-n'])
c.argument('policy_name', help='Shared access policy to use.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
# Arguments for Message Enrichments
with self.argument_context('iot hub message-enrichment') as c:
c.argument('key', options_list=['--key', '-k'], help='The enrichment\'s key.')
c.argument('value', options_list=['--value', '-v'], help='The enrichment\'s value.')
c.argument('endpoints', options_list=['--endpoints', '-e'], nargs='*',
help='Endpoint(s) to apply enrichments to. Use a space-separated list for multiple endpoints.')
with self.argument_context('iot central app') as c:
c.argument('app_name', app_name_type, options_list=['--name', '-n'])
with self.argument_context('iot central app create') as c:
c.argument('app_name', completer=None,
help='Give your IoT Central app a unique name so you can find it later.'
'This will be used as the resource name in the Azure portal and CLI.'
'Avoid special characters `-` '
'instead, use lower case letters (a-z), numbers (0-9), and dashes (-)')
c.argument('location', get_location_type(self.cli_ctx),
help='Where your app\'s info and resources are stored. We will default to the location'
' of the target resource group. See documentation for a full list of supported locations.')
c.argument('sku', arg_type=get_enum_type(AppSku), options_list=['--sku', '-p'],
help='Pricing plan for IoT Central application.')
c.argument('subdomain', options_list=['--subdomain', '-s'],
help='Enter a unique URL. Your app will be accessible via https://<subdomain>.azureiotcentral.com/.'
' Avoid special characters `-` instead, use lower '
'case letters (a-z), numbers (0-9), and dashes (-).')
c.argument('template', options_list=['--template', '-t'],
help='IoT Central application template name. Default is "Custom application". See documentation for'
' a list of available templates.')
c.argument('display_name', options_list=['--display-name', '-d'],
help='Custom display name for the IoT Central app. This will be used in the IoT Central application'
' manager to help you identify your app. Default value is the resource name.')
|
python
|
def bin_count(x):
return bin(x).count('1')
def logic(n, m, y):
print(2)
print(n, 1)
if n == 0:
print(1)
print(1)
for i in range(m):
print(-1.0, end=' ')
print(-1.0)
return
for idx in range(n):
for binary in range(m):
if idx & (2 ** binary) > 0:
print(1.0, end=' ')
else:
print(-1e7, end=' ')
print(0.5 - bin_count(idx))
print(*y)
print(-0.5)
if __name__ == '__main__':
m = int(input())
y = list()
for _ in range(2 ** m):
y.append(int(input()))
n = 2 ** m
logic(n, m, y)
|
python
|
import remi.gui as gui
import remi.server
import collections
class DraggableItem(gui.EventSource):
def __init__(self, container, **kwargs):
gui.EventSource.__init__(self)
self.container = container
self.refWidget = None
self.parent = None
self.active = False
self.origin_x = -1
self.origin_y = -1
self.snap_grid_size = 1
def setup(self, refWidget, newParent):
#refWidget is the target widget that will be resized
#newParent is the container
if self.parent:
try:
self.parent.remove_child(self)
except:
pass
if newParent==None:
return
self.parent = newParent
self.refWidget = refWidget
try:
self.parent.append(self)
except:
pass
self.update_position()
def start_drag(self, emitter, x, y):
self.active = True
self.container.onmousemove.connect(self.on_drag)
self.container.onmouseup.connect(self.stop_drag)
self.container.onmouseleave.connect(self.stop_drag, 0, 0)
self.origin_x = -1
self.origin_y = -1
@gui.decorate_event
def stop_drag(self, emitter, x, y):
self.active = False
self.update_position()
return ()
def on_drag(self, emitter, x, y):
pass
def update_position(self):
pass
def set_snap_grid_size(self, value):
self.snap_grid_size = value
def round_grid(self, value):
return int(value/self.snap_grid_size)*self.snap_grid_size
class ResizeHelper(gui.Widget, DraggableItem):
def __init__(self, container, **kwargs):
super(ResizeHelper, self).__init__(**kwargs)
DraggableItem.__init__(self, container, **kwargs)
self.style['float'] = 'none'
self.style['position'] = 'absolute'
self.style['left']='0px'
self.style['top']='0px'
self.onmousedown.connect(self.start_drag)
def setup(self, refWidget, newParent):
if type(refWidget) in [gui.Widget, gui.Button, gui.GridBox, gui.VBox, gui.HBox,
gui.ListView, gui.DropDown, gui.Label, gui.Image, gui.Link,
gui.TableWidget, gui.TextInput, gui.CheckBox, gui.CheckBox,
gui.CheckBoxLabel, gui.Slider, gui.SpinBox, gui.ColorPicker,
gui.Svg, gui.VideoPlayer, gui.Progress]:
DraggableItem.setup(self, refWidget, newParent)
def on_drag(self, emitter, x, y):
if self.active:
if self.origin_x == -1:
self.origin_x = float(x)
self.origin_y = float(y)
self.refWidget_origin_w = gui.from_pix(self.refWidget.style['width'])
self.refWidget_origin_h = gui.from_pix(self.refWidget.style['height'])
else:
self.refWidget.style['width'] = gui.to_pix( self.round_grid( self.refWidget_origin_w + float(x) - self.origin_x ) )
self.refWidget.style['height'] = gui.to_pix( self.round_grid( self.refWidget_origin_h + float(y) - self.origin_y ) )
self.update_position()
def update_position(self):
self.style['position']='absolute'
if self.refWidget:
if 'left' in self.refWidget.style and 'top' in self.refWidget.style:
self.style['left']=gui.to_pix(gui.from_pix(self.refWidget.style['left']) + gui.from_pix(self.refWidget.style['width']) )
self.style['top']=gui.to_pix(gui.from_pix(self.refWidget.style['top']) + gui.from_pix(self.refWidget.style['height']) )
class DragHelper(gui.Widget, DraggableItem):
def __init__(self, container, **kwargs):
super(DragHelper, self).__init__(**kwargs)
DraggableItem.__init__(self, container, **kwargs)
self.style['float'] = 'none'
self.style['position'] = 'absolute'
self.style['left']='0px'
self.style['top']='0px'
self.onmousedown.connect(self.start_drag)
def setup(self, refWidget, newParent):
if type(refWidget) in [gui.Widget, gui.Button, gui.GridBox, gui.VBox, gui.HBox,
gui.ListView, gui.DropDown, gui.Label, gui.Image, gui.Link,
gui.TableWidget, gui.TextInput, gui.CheckBox, gui.CheckBox,
gui.CheckBoxLabel, gui.Slider, gui.SpinBox, gui.ColorPicker,
gui.Svg, gui.VideoPlayer, gui.Progress]:
DraggableItem.setup(self, refWidget, newParent)
def on_drag(self, emitter, x, y):
if self.active:
if self.origin_x == -1:
self.origin_x = float(x)
self.origin_y = float(y)
self.refWidget_origin_x = gui.from_pix(self.refWidget.style['left'])
self.refWidget_origin_y = gui.from_pix(self.refWidget.style['top'])
else:
self.refWidget.style['left'] = gui.to_pix( self.round_grid( self.refWidget_origin_x + float(x) - self.origin_x ) )
self.refWidget.style['top'] = gui.to_pix( self.round_grid( self.refWidget_origin_y + float(y) - self.origin_y ) )
self.update_position()
def update_position(self):
self.style['position']='absolute'
if self.refWidget:
if 'left' in self.refWidget.style and 'top' in self.refWidget.style:
self.style['left']=gui.to_pix(gui.from_pix(self.refWidget.style['left'])-gui.from_pix(self.style['width']))
self.style['top']=gui.to_pix(gui.from_pix(self.refWidget.style['top'])-gui.from_pix(self.style['width']))
class SvgComposedPoly(gui.SvgGroup):
""" A group of polyline and circles
"""
def __init__(self, x, y, maxlen, stroke, color, **kwargs):
super(SvgComposedPoly, self).__init__(x, y, **kwargs)
self.maxlen = maxlen
self.plotData = gui.SvgPolyline(self.maxlen)
self.append(self.plotData)
self.set_stroke(stroke, color)
self.set_fill(color)
self.circle_radius = stroke
self.circles_list = list()
self.x_factor = 1.0
self.y_factor = 1.0
def add_coord(self, x, y):
""" Adds a coord to the polyline and creates another circle
"""
x = x*self.x_factor
y = y*self.y_factor
self.plotData.add_coord(x, y)
#self.circles_list.append(gui.SvgCircle(x, y, self.circle_radius))
#self.append(self.circles_list[-1])
#if len(self.circles_list) > self.maxlen:
# self.remove_child(self.circles_list[0])
# del self.circles_list[0]
def scale(self, x_factor, y_factor):
self.x_factor = x_factor/self.x_factor
self.y_factor = y_factor/self.y_factor
self.plotData.attributes['points'] = ""
tmpx = collections.deque()
tmpy = collections.deque()
for c in self.circles_list:
self.remove_child(c)
self.circles_list = list()
while len(self.plotData.coordsX)>0:
tmpx.append( self.plotData.coordsX.popleft() )
tmpy.append( self.plotData.coordsY.popleft() )
while len(tmpx)>0:
self.add_coord(tmpx.popleft(), tmpy.popleft())
self.x_factor = x_factor
self.y_factor = y_factor
class SvgPlot(gui.Svg):
def __init__(self, width, height):
super(SvgPlot, self).__init__(width, height)
self.width = width
self.height = height
self.polyList = []
self.font_size = 15
self.plot_inner_border = self.font_size
self.textYMin = gui.SvgText(0, self.height + self.font_size, "min")
self.textYMax = gui.SvgText(0, 0, "max")
self.textYMin.style['font-size'] = gui.to_pix(self.font_size)
self.textYMax.style['font-size'] = gui.to_pix(self.font_size)
self.append([self.textYMin, self.textYMax])
def append_poly(self, polys):
for poly in polys:
self.append(poly)
self.polyList.append(poly)
poly.textXMin = gui.SvgText(0, 0, "actualValue")
poly.textXMax = gui.SvgText(0, 0, "actualValue")
poly.textYVal = gui.SvgText(0, 0, "actualValue")
poly.textYVal.style['font-size'] = gui.to_pix(self.font_size)
poly.lineYValIndicator = gui.SvgLine(0, 0, 0, 0)
poly.lineXMinIndicator = gui.SvgLine(0, 0, 0, 0)
poly.lineXMaxIndicator = gui.SvgLine(0, 0, 0, 0)
self.append([poly.textXMin, poly.textXMax, poly.textYVal, poly.lineYValIndicator,
poly.lineXMinIndicator, poly.lineXMaxIndicator])
def remove_poly(self, poly):
self.remove_child(poly)
self.polyList.remove(poly)
self.remove_child(poly.textXMin)
self.remove_child(poly.textXMax)
self.remove_child(poly.textYVal)
def render(self):
self.set_viewbox(-self.plot_inner_border, -self.plot_inner_border, self.width + self.plot_inner_border * 2,
self.height + self.plot_inner_border * 2)
if len(self.polyList) < 1:
return
minX = min(self.polyList[0].plotData.coordsX)
maxX = max(self.polyList[0].plotData.coordsX)
minY = min(self.polyList[0].plotData.coordsY)
maxY = max(self.polyList[0].plotData.coordsY)
for poly in self.polyList:
minX = min(minX, min(poly.plotData.coordsX))
maxX = max(maxX, max(poly.plotData.coordsX))
minY = min(minY, min(poly.plotData.coordsY))
maxY = max(maxY, max(poly.plotData.coordsY))
self.textYMin.set_text("min:%s" % minY)
self.textYMax.set_text("max:%s" % maxY)
i = 1
for poly in self.polyList:
scaledTranslatedYpos = (-poly.plotData.coordsY[-1] + maxY + (self.height-(maxY-minY))/2.0)
textXpos = self.height / (len(self.polyList) + 1) * i
poly.textXMin.set_text(str(min(poly.plotData.coordsX)))
poly.textXMin.set_fill(poly.attributes['stroke'])
poly.textXMin.set_position(-textXpos, (min(poly.plotData.coordsX) - minX) )
poly.textXMin.attributes['transform'] = "rotate(%s)" % (-90)
poly.textXMax.set_text(str(max(poly.plotData.coordsX)))
poly.textXMax.set_fill(poly.attributes['stroke'])
poly.textXMax.set_position(-textXpos, (max(poly.plotData.coordsX) - minX) )
poly.textXMax.attributes['transform'] = "rotate(%s)" % (-90)
poly.textYVal.set_text(str(poly.plotData.coordsY[-1]))
poly.textYVal.set_fill(poly.attributes['stroke'])
poly.textYVal.set_position(0, scaledTranslatedYpos)
poly.lineYValIndicator.set_stroke(1, poly.attributes['stroke'])
poly.lineXMinIndicator.set_stroke(1, poly.attributes['stroke'])
poly.lineXMaxIndicator.set_stroke(1, poly.attributes['stroke'])
poly.lineYValIndicator.set_coords(0, scaledTranslatedYpos, self.width, scaledTranslatedYpos)
poly.lineXMinIndicator.set_coords((min(poly.plotData.coordsX) - minX), 0,
(min(poly.plotData.coordsX) - minX), self.height)
poly.lineXMaxIndicator.set_coords((max(poly.plotData.coordsX) - minX), 0,
(max(poly.plotData.coordsX) - minX), self.height)
poly.attributes['transform'] = ('translate(%s,%s)' % (-minX, maxY + (self.height-(maxY-minY))/2.0) +
' scale(%s,%s)' % ((1.0), -(1.0)))
i = i + 1
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'EA.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1130, 807)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(2, 2, 2, 2)
self.verticalLayout.setSpacing(20)
self.verticalLayout.setObjectName("verticalLayout")
self.fgdc_eainfo = QtWidgets.QTabWidget(Form)
self.fgdc_eainfo.setTabPosition(QtWidgets.QTabWidget.West)
self.fgdc_eainfo.setObjectName("fgdc_eainfo")
self.tab_instructions = QtWidgets.QWidget()
self.tab_instructions.setObjectName("tab_instructions")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.tab_instructions)
self.verticalLayout_4.setContentsMargins(25, 15, 0, 25)
self.verticalLayout_4.setSpacing(20)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_5 = QtWidgets.QLabel(self.tab_instructions)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setMinimumSize(QtCore.QSize(15, 0))
self.label_5.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(9)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setTextFormat(QtCore.Qt.RichText)
self.label_5.setScaledContents(False)
self.label_5.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_5.setIndent(0)
self.label_5.setObjectName("label_5")
self.verticalLayout_4.addWidget(self.label_5)
self.label_2 = QtWidgets.QLabel(self.tab_instructions)
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.label_4 = QtWidgets.QLabel(self.tab_instructions)
self.label_4.setObjectName("label_4")
self.verticalLayout_4.addWidget(self.label_4)
spacerItem = QtWidgets.QSpacerItem(20, 419, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.widget = QtWidgets.QWidget(self.tab_instructions)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(700, 150))
self.widget.setObjectName("widget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.groupBox = QtWidgets.QGroupBox(self.widget)
self.groupBox.setStyleSheet("QGroupBox{ \n"
"font: 75 10pt \"Arial\";\n"
"border: 1px solid black;\n"
"border-radius: 3px;\n"
"background: QLinearGradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #eef, stop: 1 #ccf);\n"
"} ")
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_34 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_34.sizePolicy().hasHeightForWidth())
self.label_34.setSizePolicy(sizePolicy)
self.label_34.setStyleSheet("font: italic;")
self.label_34.setWordWrap(True)
self.label_34.setObjectName("label_34")
self.verticalLayout_3.addWidget(self.label_34)
self.label_35 = QtWidgets.QLabel(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_35.sizePolicy().hasHeightForWidth())
self.label_35.setSizePolicy(sizePolicy)
self.label_35.setStyleSheet("font: italic;")
self.label_35.setWordWrap(True)
self.label_35.setObjectName("label_35")
self.verticalLayout_3.addWidget(self.label_35)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(38, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.btn_add_detailed = QtWidgets.QPushButton(self.groupBox)
self.btn_add_detailed.setObjectName("btn_add_detailed")
self.horizontalLayout_2.addWidget(self.btn_add_detailed)
spacerItem2 = QtWidgets.QSpacerItem(38, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.verticalLayout_5.addWidget(self.groupBox)
self.verticalLayout_4.addWidget(self.widget)
self.fgdc_eainfo.addTab(self.tab_instructions, "")
self.fgdc_overview = QtWidgets.QWidget()
self.fgdc_overview.setObjectName("fgdc_overview")
self.label = QtWidgets.QLabel(self.fgdc_overview)
self.label.setGeometry(QtCore.QRect(30, 30, 111, 16))
self.label.setObjectName("label")
self.fgdc_eaover = QtWidgets.QPlainTextEdit(self.fgdc_overview)
self.fgdc_eaover.setGeometry(QtCore.QRect(30, 50, 571, 192))
self.fgdc_eaover.setObjectName("fgdc_eaover")
self.label_6 = QtWidgets.QLabel(self.fgdc_overview)
self.label_6.setGeometry(QtCore.QRect(30, 250, 111, 16))
self.label_6.setObjectName("label_6")
self.fgdc_eadetcit = QtWidgets.QPlainTextEdit(self.fgdc_overview)
self.fgdc_eadetcit.setGeometry(QtCore.QRect(30, 270, 571, 192))
self.fgdc_eadetcit.setObjectName("fgdc_eadetcit")
self.fgdc_eainfo.addTab(self.fgdc_overview, "")
self.verticalLayout.addWidget(self.fgdc_eainfo)
self.retranslateUi(Form)
self.fgdc_eainfo.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_5.setToolTip(_translate("Form", "Required"))
self.label_5.setText(_translate("Form", "<html><head/><body><p><span style=\" font-size:12pt; font-style:italic; color:#55aaff;\">Provide Specific Information about the data content, organization, units, and values.</span></p></body></html>"))
self.label_2.setText(_translate("Form", "If this record describes a CSV, Shapefile, or other tabular or spatial dataset you need to fill out a Detailed section. Use the \'Browse to Dataset\' button on a detailed tab to autopopulate this section with the column labels and values. Then provide definitions for each column (attribute) and values. \n"
"\n"
"If this record describes multiple files or sheets, one Detailed section should be created for each, see below."))
self.label_4.setText(_translate("Form", "Use both the Overview and one or more Detailed sections if that provides more clarity to data users."))
self.groupBox.setTitle(_translate("Form", "Additional \'Detailed\' tabs"))
self.label_34.setText(_translate("Form", "In some cases more than one Detailed section is required. This could be the case if the metadata record is describing multiple CSV files, multiple worksheets in an Excel Workbook, or a data bundle consisting of multiple files."))
self.label_35.setText(_translate("Form", "Use the button below to add additional Detailed sections. These can be removed from their respective tab, using the \'Remove this Detailed\' Button"))
self.btn_add_detailed.setText(_translate("Form", "Add Detailed"))
self.fgdc_eainfo.setTabText(self.fgdc_eainfo.indexOf(self.tab_instructions), _translate("Form", "Instructions"))
self.label.setText(_translate("Form", "Overview Description"))
self.label_6.setText(_translate("Form", "Citation"))
self.fgdc_eainfo.setTabText(self.fgdc_eainfo.indexOf(self.fgdc_overview), _translate("Form", "Overview"))
|
python
|
from flask_appbuilder.security.sqla.apis.permission import PermissionApi # noqa: F401
from flask_appbuilder.security.sqla.apis.permission_view_menu import ( # noqa: F401
PermissionViewMenuApi,
)
from flask_appbuilder.security.sqla.apis.role import RoleApi # noqa: F401
from flask_appbuilder.security.sqla.apis.user import UserApi # noqa: F401
from flask_appbuilder.security.sqla.apis.view_menu import ViewMenuApi # noqa: F401
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals, division, absolute_import
import os
import unittest
import shutil
import random
import time
import dxpy
from dxpy_testutil import (DXTestCase, temporary_project, run)
import dxpy_testutil as testutil
import pytest
CACHE_DIR = '/tmp/dx-docker-cache'
def create_file_in_project(fname, trg_proj_id, folder=None):
data = "foo"
if folder is None:
dxfile = dxpy.upload_string(data, name=fname, project=trg_proj_id, wait_on_close=True)
else:
dxfile = dxpy.upload_string(data, name=fname, project=trg_proj_id, folder=folder, wait_on_close=True)
return dxfile.get_id()
def create_project():
project_name = "test_dx_cp_" + str(random.randint(0, 1000000)) + "_" + str(int(time.time() * 1000))
return dxpy.api.project_new({'name': project_name})['id']
def rm_project(proj_id):
dxpy.api.project_destroy(proj_id, {"terminateJobs": True})
def create_folder_in_project(proj_id, path):
dxpy.api.project_new_folder(proj_id, {"folder": path})
@unittest.skipUnless(testutil.TEST_DX_DOCKER,
'skipping tests that would run dx-docker')
class TestDXDocker(DXTestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree(CACHE_DIR)
@classmethod
def setUpClass(cls):
run("docker pull ubuntu:14.04")
run("docker pull busybox")
def test_dx_docker_pull(self):
run("dx-docker pull ubuntu:14.04")
self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, 'ubuntu%3A14.04.aci')))
run("dx-docker pull ubuntu:15.04")
self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, 'ubuntu%3A15.04.aci')))
def test_dx_docker_pull_silent(self):
dx_docker_out = run("dx-docker pull -q busybox").strip()
self.assertEqual(dx_docker_out, '')
def test_dx_docker_pull_quay(self):
run("dx-docker pull quay.io/ucsc_cgl/samtools")
self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, 'quay.io%2Fucsc_cgl%2Fsamtools.aci')))
def test_dx_docker_pull_hash_or_not(self):
run("dx-docker pull dnanexus/testdocker")
self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, 'dnanexus%2Ftestdocker.aci')))
repo = "dnanexus/testdocker@sha256:4f983c07e762f5afadf9c45ccd6a557e1a414460e769676826b01c99c4ccb1cb"
run("dx-docker pull {}".format(repo))
sanit='dnanexus%2Ftestdocker%40sha256%3A4f983c07e762f5afadf9c45ccd6a557e1a414460e769676826b01c99c4ccb1cb.aci'
self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, sanit)))
def test_dx_docker_pull_failure(self):
with self.assertSubprocessFailure(exit_code=1, stderr_regexp='Failed to obtain image'):
run("dx-docker pull busyboxasdf")
@pytest.mark.TRACEABILITY_MATRIX
@testutil.update_traceability_matrix(["DNA_CLI_APP_RUN_DOCKER_CONTAINERS"])
def test_dx_docker_basic_commands(self):
run("dx-docker run ubuntu:14.04 ls --color")
run("dx-docker run ubuntu:15.04 ls")
def test_dx_docker_run_from_hash(self):
repo = "dnanexus/testdocker@sha256:4f983c07e762f5afadf9c45ccd6a557e1a414460e769676826b01c99c4ccb1cb"
run("dx-docker run {}".format(repo))
def test_dx_docker_run_error_codes(self):
with self.assertSubprocessFailure(exit_code=1):
run("dx-docker run ubuntu:14.04 false")
run("dx-docker run ubuntu:14.04 true")
def test_dx_docker_volume(self):
os.makedirs('dxdtestdata')
run("dx-docker run -v dxdtestdata:/data-host ubuntu:14.04 touch /data-host/newfile.txt")
self.assertTrue(os.path.isfile(os.path.join('dxdtestdata', 'newfile.txt')))
shutil.rmtree('dxdtestdata')
def test_dx_docker_entrypoint_cmd(self):
docker_out = run("docker run dnanexus/testdocker /bin")
dx_docker_out = run("dx-docker run -q dnanexus/testdocker /bin")
self.assertEqual(docker_out, dx_docker_out)
def test_dx_docker_home_dir(self):
run("dx-docker run julia:0.5.0 julia -E 'println(\"hello world\")'")
def test_dx_docker_run_rm(self):
run("dx-docker run --rm ubuntu ls")
def test_dx_docker_set_env(self):
dx_docker_out = run("dx-docker run --env HOME=/somethingelse busybox env")
self.assertTrue(dx_docker_out.find("HOME=/somethingelse") != -1)
def test_dx_docker_run_canonical(self):
run("dx-docker run quay.io/ucsc_cgl/samtools --help")
def test_dx_docker_add_to_applet(self):
os.makedirs('tmpapp')
run("docker pull busybox")
with self.assertSubprocessFailure(exit_code=1, stderr_regexp='does not appear to have a dxapp.json that parses'):
run("dx-docker add-to-applet busybox tmpapp")
with open('tmpapp/dxapp.json', 'w') as dxapp:
dxapp.write("[]")
run("dx-docker add-to-applet busybox tmpapp")
self.assertTrue(os.path.isfile(os.path.join('tmpapp', 'resources/tmp/dx-docker-cache/busybox.aci')))
shutil.rmtree('tmpapp')
def test_dx_docker_create_asset(self):
with temporary_project(select=True) as temp_project:
test_projectid = temp_project.get_id()
run("docker pull ubuntu:14.04")
run("dx-docker create-asset ubuntu:14.04")
self.assertEqual(run("dx ls ubuntu\\\\:14.04").strip(), 'ubuntu:14.04')
create_folder_in_project(test_projectid, '/testfolder')
run("dx-docker create-asset busybox -o testfolder")
ls_out = run("dx ls /testfolder").strip()
self.assertEqual(ls_out, 'busybox')
ls_out = run("dx ls testfolder\\/busybox.tar.gz").strip()
self.assertEqual(ls_out, 'busybox.tar.gz')
def test_dx_docker_create_asset_with_short_imageid(self):
with temporary_project(select=True) as temp_project:
test_projectid = temp_project.get_id()
run("docker pull ubuntu:14.04")
short_id = run("docker images -q ubuntu:14.04").strip()
create_folder_in_project(test_projectid, '/testfolder')
run("dx-docker create-asset {short_id} -o testfolder".format(short_id=short_id))
ls_out = run("dx ls /testfolder").strip()
self.assertEqual(ls_out, short_id)
def test_dx_docker_create_asset_with_long_imageid(self):
with temporary_project(select=True) as temp_project:
test_projectid = temp_project.get_id()
run("docker pull ubuntu:14.04")
long_id = run("docker images --no-trunc -q ubuntu:14.04").strip()
create_folder_in_project(test_projectid, '/testfolder')
run("dx-docker create-asset {long_id} -o testfolder".format(long_id=long_id))
ls_out = run("dx ls /testfolder").strip()
self.assertEqual(ls_out, long_id)
def test_dx_docker_create_asset_with_image_digest(self):
with temporary_project(select=True) as temp_project:
test_projectid = temp_project.get_id()
run("docker pull ubuntu:14.04")
create_folder_in_project(test_projectid, '/testfolder')
image_digest = run("docker inspect ubuntu:14.04 | jq -r '.[] | .RepoDigests[0]'").strip()
run("dx-docker create-asset {image_digest} -o testfolder".format(image_digest=image_digest))
ls_out = run("dx ls /testfolder").strip()
self.assertEqual(ls_out, image_digest)
def test_dx_docker_additional_container(self):
run("dx-docker run busybox ls")
def test_dx_docker_working_dir_override(self):
run("dx-docker run -v $PWD:/tmp -w /tmp quay.io/ucsc_cgl/samtools faidx test.fa")
def test_complex_quote(self):
run('dx-docker run python:2-slim /bin/sh -c "echo \'{"foo": {"location": "file:///"}}\' > /dev/stdout"')
|
python
|
#
# The XBUILD builder.
#
# (c) 2012 The XTK Developers <[email protected]>
#
import datetime
import os
import stat
import sys
import subprocess
import config
from _cdash import CDash
from _colors import Colors
from _jsfilefinder import JSFileFinder
from _licenser import Licenser
#
#
#
class Builder( object ):
'''
'''
def run( self, options=None ):
'''
Performs the action.
'''
print 'Building ' + config.SOFTWARE_SHORT + '...'
# grab all js files
filefinder = JSFileFinder()
jsfiles = filefinder.run()
arguments = []
# add js files
for j in jsfiles:
arguments.extend( ['-i', j] )
# add the project root
arguments.extend( ['--root', config.SOFTWARE_PATH] )
# set the output mode to compiled
arguments.extend( ['-o', 'compiled'] )
# configure the compiler path
arguments.extend( ['-c', config.CLOSURECOMPILER_PATH] )
# configure the output file
arguments.extend( ['--output_file', config.BUILD_OUTPUT_PATH] )
# configure additional compiler arguments
arguments.extend( [ '-f', '--warning_level=VERBOSE'] ) # verbose
arguments.extend( [ '-f', '--compilation_level=ADVANCED_OPTIMIZATIONS'] ) # advanced compilation
arguments.extend( [ '-f', '--jscomp_warning=missingProperties'] ) # enable strict mode 1
arguments.extend( [ '-f', '--jscomp_warning=checkTypes'] ) # enable strict mode 2
arguments.extend( ['-f', '--summary_detail_level=3'] ) # always show summary
arguments.extend( [ '-f', '--define=goog.DEBUG=false'] ) # turn of closure library debugging
# add the goog/deps.js file from closure according to
# https://code.google.com/p/closure-library/wiki/FrequentlyAskedQuestions#When_I_compile_with_type-checking_on,_I_get_warnings_about_unkno
arguments.extend( [ '-f', '--js=' + config.CLOSURELIBRARY_DEPS_PATH] )
# if enabled, set debug options
if options.debug:
arguments.extend( ['-f', '--debug'] )
arguments.extend( ['-f', '--formatting=PRETTY_PRINT'] )
#
# call the compiler (through the closure builder)
#
# make sure the closurebuilder is executable
st = os.stat( config.CLOSUREBUILDER_PATH )
os.chmod( config.CLOSUREBUILDER_PATH, st.st_mode | stat.S_IEXEC )
command = [config.CLOSUREBUILDER_PATH]
command.extend( arguments )
process = subprocess.Popen( command, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
# ignore the next X lines
ignoreNext = 0
# save warnings and errors in a log
logActive = False
log = []
# fancy displaying using ANSI colors
for line in process.stdout:
if ignoreNext > 0:
# we ignore this line
ignoreNext -= 1
continue
line = line.strip( '\n' )
color = Colors._CLEAR # default is no color
# colorize depending on line content
if line.find( 'Scanning' ) != -1:
color = Colors.YELLOW
elif line.find( 'scanned' ) != -1:
color = Colors.YELLOW
elif line.find( 'Building' ) != -1:
color = Colors.PURPLE
elif line.find( 'Compiling' ) != -1:
color = Colors.PURPLE
# start logging now
logActive = True
elif line.find( 'ERROR' ) != -1:
color = Colors.RED
elif line.find( 'WARNING' ) != -1:
# this is a warning, only display these if verbose mode is on
if not options.verbose:
ignoreNext = 3 # and ignore the next 2 lines
continue
color = Colors.ORANGE
if logActive:
# log this line if we are in logging mode
log.append( line )
# print colored line
print color + line + Colors._CLEAR
# we have errors and warnings logged now
log = log[1:-1] # remove first and last log entries since they are additional information
# now we create a dashboard submission file
cdasher = CDash()
xmlfile = cdasher.run( ['Build', log, True] )
with open( os.path.join( config.TEMP_PATH, config.SOFTWARE_SHORT + '_Build.xml' ), 'w' ) as f:
f.write( xmlfile )
# and add a timestamp to the compiled file
with open( config.BUILD_OUTPUT_PATH, 'r' ) as f:
content = f.read() # read everything in the file
now = datetime.datetime.now()
content_with_timestamp = content.replace( '###TIMESTAMP###', now.strftime( '%Y-%m-%d %H:%M:%S' ) )
with open( config.BUILD_OUTPUT_PATH, 'w' ) as f:
f.write( content_with_timestamp ) # write the new stuff
# and attach the license
licenser = Licenser()
licenser.run()
print Colors.ORANGE + 'Compiled file ' + Colors.CYAN + config.BUILD_OUTPUT_PATH + Colors.ORANGE + ' written. ' + Colors._CLEAR
|
python
|
import logging
from src.backup.datastore.backup_finder import BackupFinder
from src.restore.list.backup_list_restore_service import \
BackupListRestoreRequest, BackupItem, BackupListRestoreService
from src.restore.status.restoration_job_status_service import \
RestorationJobStatusService
class TableRestoreService(object):
@classmethod
def restore(cls, table_reference, target_project_id, target_dataset_id,
create_disposition, write_disposition, restoration_datetime):
backup = BackupFinder.for_table(table_reference, restoration_datetime)
restore_request = BackupListRestoreRequest([BackupItem(backup.key)],
target_project_id,
target_dataset_id,
create_disposition,
write_disposition)
restoration_job_id = BackupListRestoreService().restore(restore_request)
logging.info("Scheduled restoration job: %s", restoration_job_id)
return {
'restorationJobId': restoration_job_id,
'restorationStatusEndpoint': RestorationJobStatusService.get_status_endpoint(restoration_job_id),
'restorationWarningsOnlyStatusEndpoint': RestorationJobStatusService.get_warnings_only_status_endpoint(restoration_job_id)
}
|
python
|
class Solution:
def findMaxForm(self, strs: List[str], m: int, n: int) -> int:
# 初始化dp[i][j] 当成二维的01背包问题
dp=[[0]*(n+1) for _ in range(m+1)]
for str in strs:
zeroNum = 0
oneNum = 0
#获得每个str下有几个0 和几个1 相当于物品价值
for i in str:
if i == '0':
zeroNum+=1
else:
oneNum+=1
for i in range(m, zeroNum-1, -1):
for j in range(n, oneNum-1, -1):
dp[i][j] = max(dp[i][j], dp[i-zeroNum][j-oneNum] + 1)
return dp[-1][-1]
|
python
|
import sys
assert sys.version_info >= (3,9), "This script requires at least Python 3.9"
world = {
"uuid": "1A507EF7-87D8-4EBA-865E-C5D36673C916",
"name": "YELLOWSTONE",
"creator": "Twine",
"creatorVersion": "2.3.14",
"schemaName": "Harlowe 3 to JSON",
"schemaVersion": "0.0.6",
"createdAtMs": 1631210521173,
"passages": [
{
"name": "WELCOME TO YELLOWSTONE",
"tags": "",
"id": "1",
"text": "You are at the entrance to Yellowstone, you have the option to advance to the Cabin or the fire pit. \n[[CABIN -> Cabin]]\n[[FIRE PIT -> Fire pit]]",
"links": [
{
"linkText": "CABIN",
"passageName": "Cabin",
"original": "[[CABIN -> Cabin]]"
},
{
"linkText": "FIRE PIT",
"passageName": "Fire pit",
"original": "[[FIRE PIT -> Fire pit]]"
}
],
"hooks": [],
"cleanText": "You are at the entrance to Yellowstone, you have the option to advance to the Cabin or the fire pit."
},
{
"name": "Cabin",
"tags": "",
"id": "2",
"text": "You enter the cabin to find no one inside, but you find a note on the desk that says (text-style:\"bold\",\"italic\",\"smear\")[\"HELP FIND US, HE CAN'T BE TRUSTED\"] You now have the option to retreat to the entrance, or advance to the fire pit or the forest.\n[[ENTRANCE ->WELCOME TO YELLOWSTONE]] \n[[FIRE PIT -> Fire pit]]\n[[FOREST -> Forest]]",
"links": [
{
"linkText": "ENTRANCE",
"passageName": "WELCOME TO YELLOWSTONE",
"original": "[[ENTRANCE ->WELCOME TO YELLOWSTONE]]"
},
{
"linkText": "FIRE PIT",
"passageName": "Fire pit",
"original": "[[FIRE PIT -> Fire pit]]"
},
{
"linkText": "FOREST",
"passageName": "Forest",
"original": "[[FOREST -> Forest]]"
}
],
"hooks": [
{
"hookText": "\"HELP FIND US, HE CAN'T BE TRUSTED\"",
"original": "side, but you find a note on the desk that says (text-style:\"bold\",\"italic\",\"smear\")"
}
],
"cleanText": "You enter the cabin to find no one in but you find a note that says 'HELP FIND US, HE WILL KILL US ALL' You now have the option to retreat to the entrance, or advance to the fire pit or the forest."
},
{
"name": "Fire pit",
"tags": "",
"id": "3",
"text": "You are at the fire pit, and you find a log with the information and photos of three missing Park Rangers: Melissa, Joshua, and Francis. You are determined to find them, but worrisome about what caused them to go missing. You have the option to go to cabin, the forest, or back to the entrance.\n[[ENTRANCE ->WELCOME TO YELLOWSTONE]] \n[[CABIN -> Cabin]]\n[[FOREST -> Forest]]",
"links": [
{
"linkText": "ENTRANCE",
"passageName": "WELCOME TO YELLOWSTONE",
"original": "[[ENTRANCE ->WELCOME TO YELLOWSTONE]]"
},
{
"linkText": "CABIN",
"passageName": "Cabin",
"original": "[[CABIN -> Cabin]]"
},
{
"linkText": "FOREST",
"passageName": "Forest",
"original": "[[FOREST -> Forest]]"
}
],
"hooks": [],
"cleanText": "You are at the fire pit, and you find a log with the information and photos of three missing Park Rangers: Melissa, Joshua, and Francis. You are determined to find them, but worrisome about what caused them to go missing. You have the option to go to cabin, the forest, or back to the entrance."
},
{
"name": "Forest",
"tags": "",
"id": "4",
"text": "You have now entered the forest, you see three pathways, one leads to a ledge, the other leads down a narrow path but with visible foot prints heading down the path, and finally the third path leads into a cave with a bloody handprint on the outter stone. You have three options: ledge, narrow path, cave.\n[[ENTRANCE ->WELCOME TO YELLOWSTONE]] \n[[LEDGE -> Ledge]]\n[[NARROW PATH -> Narrow Path]]\n[[CAVE -> Cave]]",
"links": [
{
"linkText": "ENTRANCE",
"passageName": "WELCOME TO YELLOWSTONE",
"original": "[[ENTRANCE ->WELCOME TO YELLOWSTONE]]"
},
{
"linkText": "LEDGE",
"passageName": "Ledge",
"original": "[[LEDGE -> Ledge]]"
},
{
"linkText": "NARROW PATH",
"passageName": "Narrow Path",
"original": "[[NARROW PATH -> Narrow Path]]"
},
{
"linkText": "CAVE",
"passageName": "Cave",
"original": "[[CAVE -> Cave]]"
}
],
"hooks": [],
"cleanText": "You have now entered the forest, you see three pathways, one leads to a ledge, the other leads down a narrow path but with visible foot prints heading down the path, and finally the third path leads into a cave with a bloody handprint on the outter stone. You have three options: ledge, narrow path, cave."
},
{
"name": "Ledge",
"tags": "",
"id": "5",
"score":10,
"text": "As you walk up to the ledge something feels off, you look down and about 80 feet down a body lays there that fits the description of Joshua. You cross off his name and begin to panic and try to contact the authorities, but there is no cell service. You have the option to return to the forest.\n[[FOREST -> Forest]]",
"links": [
{
"linkText": "FOREST",
"passageName": "Forest",
"original": "[[FOREST -> Forest]]"
}
],
"hooks": [],
"cleanText": "As you walk up to the ledge something feels off, you look down and about 80 feet down a body lays there that fits the description of Joshua. You cross off his name and begin to panic and try to contact the authorities, but there is no cell service. You have the option to return to the forest."
},
{
"name": "Narrow Path",
"tags": "",
"id": "6",
"text": "You continue down the narrow path until you find a fork in the path, leading off in two directions. There is a sign that states going left will take you to the lake, and going right will take you to the equestrian park. You have two options: Lake, or Equestrian.\n[[LAKE -> Lake]]\n[[EQUESTRIAN -> Equestrian Park]]",
"links": [
{
"linkText": "LAKE",
"passageName": "Lake",
"original": "[[LAKE -> Lake]]"
},
{
"linkText": "EQUESTRIAN",
"passageName": "Equestrian Park",
"original": "[[EQUESTRIAN -> Equestrian Park]]"
}
],
"hooks": [],
"cleanText": "You continue down the narrow path until you find a fork in the path, leading off in two directions. There is a sign that states going left will take you to the lake, and going right will take you to the equestrian park. You have two options: Lake or Equestrian."
},
{
"name": "Cave",
"tags": "",
"id": "7",
"text": "As you approach the cave you investigate the bloody handprint. You use your phones flashlight so you can see in the cave, and written in blood on the wall you see [BEWARE OF THE STABLES] frightened you retreat out of the cave. Your only option is to retreat to the forest.\n[[FOREST -> Forest]]",
"links": [
{
"linkText": "CAVE",
"passageName": "Cave",
"original": "[[CAVE -> Cave]]"
},
{
"linkText": "FOREST",
"passageName": "Forest",
"original": "[[FOREST -> Forest]]"
}
],
"hooks": [],
"cleanText": "As you approach the cave you investigate the bloody handprint. You use your phones flashlight so you can see in the cave, and written in blood on the wall you see [BEWARE OF THE STABLES] frightened you retreat out of the cave. Your only option is to retreat to the forest."
},
{
"name": "Lake",
"tags": "",
"id": "8",
"text": "Once you get to the lake you find nothing of importance, but out of nowhere you hear the sound of startled horses. You have the option to retreat back down the narrow path to head to the equestrian park, or head to the beach off the side of the lake to investigate further. Two options: beach, or equestrian park. \n[[BEACH -> Beach off the side of the lake]]\n[[NARROW PATH -> Narrow Path]]",
"links": [
{
"linkText": "BEACH",
"passageName": "Beach off the side of the lake",
"original": "[[BEACH -> Beach off the side of the lake]]"
},
{
"linkText": "NARROW PATH",
"passageName": "Narrow Path",
"original": "[[NARROW PATH -> Narrow Path]]"
}
],
"hooks": [],
"cleanText": "Once you get to the lake you find nothing of importance, but out of nowhere you hear the sound of startled horses. You have the option to retreat back down the narrow path to head to the equestrian park, or head to the beach off the side of the lake to investigate further. Two options: beach, or equestrian park."
},
{
"name": "Equestrian Park",
"tags": "",
"id": "9",
"text": "Once you arrive at the equestrian park there are two signs one pointing to the left that reads [STABLES] and another pointing to the right that reads [MAINTENANCE SHED]. You have the options to go back to the narrow path, stable, or maintenance shed.\n[[NARROW PATH -> Narrow Path]] \n[[STABLES -> Stables]]\n[[MAINTENANCE SHED -> Maintenance shed]]",
"links": [
{
"linkText": "STABLES",
"passageName": "Stables",
"original": "[[STABLES -> Stables]]"
},
{
"linkText": "MAINTENANCE SHED",
"passageName": "Maintenance shed",
"original": "[[MAINTENANCE SHED -> Maintenance shed]]"
}
],
"hooks":[],
"cleanText": "Once you arrive at the equestrian park there are two signs one pointing left that reads [STABLES] and another pointing to the right that reads [MAINTENANCE SHED]. You have the options to go back to the narrow path, stable, or maintenance shed."
},
{
"name": "Beach off the side of the lake",
"tags": "",
"id": "10",
"text": "Nothing here besides a shell that you find interest in so you put it in your pocket for later. If there will be a later. Return back to narrow path.\n[[NARROW PATH -> Narrow Path]]",
"links": [
{
"linkText": "NARROW PATH",
"passageName": "Narrow Path",
"original": "[[NARROW PATH -> Narrow Path]]"
}
],
"hooks": [],
"cleanText": "Nothing here besides a shell that you find interest in so you put it in your pocket for later. If there will be a later. Return back to narrow path."
},
{
"name": "Stables",
"tags": "",
"id": "11",
"text": "You should have looked for the cautions against going to the stables, a deranged man on a horse covered in blood rides up to you and..... game over ehad back to the entrance to try again.\n[[ENTRANCE ->WELCOME TO YELLOWSTONE]]",
"links": [
{
"linkText": "ENTRANCE",
"passageName": "WELCOME TO YELLOWSTONE",
"original": "[[ENTRANCE -> WELCOME TO YELLOWSTONE]]"
}
],
"hooks": [],
"cleanText": "You should have looked for the cautions against going to the stables, a deranged man on a horse covered in blood rides up to you and..... game over head back to the entrance to try again."
},
{
"name": "Maintenance shed",
"tags": "",
"id": "12",
"score":20,
"text": "As you approach the shed it seems oddly quiet, you become hesitant as you reach to open the door until you hear a light sob. You open the door to find Francis taking care of an injured Melissa. Francis breaks down as now he has help to carry Melissa to the entrance of the park to find help. Success! You avoided the deranged man at the stables! If you scored 30 then you found all of the missing rangers! If you only scored 20 then you never found the third, go back and see if you can find the third ranger. Type quit to end game.\n[[EQUESTRIAN PARK -> Equestrian Park]]",
"links": [
{
"linkText": "EQUESTRIAN",
"passageName": "Equestrian Park",
"original": "[[EQUESTRIAN -> Equestrian Park]]"
}
],
"hooks": [],
"cleanText": "As you approach the shed it seems oddly quiet, you become hesitant as you reach to open the door until you hear a light sob. You open the door to find Francis taking care of an injured Melissa. Francis breaks down as now he has help to carry Melissa to the entrance of the park to find help. Success! You avoided the deranged man at the stables! If you scored 30 then you found all of the missing rangers! If you only scored 20 then you never found the third, go back and see if you can find the third ranger. Type quit to end game."
}
]
}
def find_current_location(location_label):
if "passages" in world:
for passage in world["passages"]:
if location_label == passage["name"]:
return passage
return {}
# ----------------------------------------------------------------
def render(current_location, score, moves):
if "name" in current_location and "cleanText" in current_location:
print("Moves: " + str(moves) + ", Score: " + str(score))
print(current_location["cleanText"] + "\n")
def get_input():
response = input("What do you want to do? ")
response = response.upper().strip()
return response
def update(current_location, location_label, response):
if response == "":
return location_label
if "links" in current_location:
for link in current_location["links"]:
if link["linkText"] == response:
return link["passageName"]
print("I don't understand what you are trying to do. Try again.")
return location_label
location_label = "WELCOME TO YELLOWSTONE"
current_location = {}
response = ""
score = 0
moves = 0
while True:
if response == "QUIT":
break
moves += 1
location_label = update(current_location, location_label, response)
current_location = find_current_location(location_label)
if "score" in current_location:
score = score + current_location["score"]
render(current_location, score, moves)
response = get_input()
print("Thanks for playing!")
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# See: https://docs.python.org/2/library/string.html#format-specification-mini-language
import math
def main():
# "{field_name:format_spec}".format(...)
#
# format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]
#
# fill ::= <any character>
# align ::= "<" | ">" | "=" | "^"
# sign ::= "+" | "-" | " "
# width ::= integer
# precision ::= integer
# type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
#
# The '#' option is only valid for integers, and only for binary, octal, or
# hexadecimal output. If present, it specifies that the output will be
# prefixed by '0b', '0o', or '0x', respectively.
#
# The ',' option signals the use of a comma for a thousands separator. For
# a locale aware separator, use the 'n' integer presentation type instead.
# Fill and align:
# '<' : Forces the field to be left-aligned within the available space
# (this is the default for most objects).
# '>' : Forces the field to be right-aligned within the available space
# (this is the default for numbers).
# '=' : Forces the padding to be placed after the sign (if any) but before
# the digits. This is used for printing fields in the form ‘+000000120’.
# This alignment option is only valid for numeric types.
# '^' : Forces the field to be centered within the available space.
print("{:.<9}".format(3))
print("{:.<9}".format(11))
print("{:.>9}".format(3))
print("{:.>9}".format(11))
print("{:.=9}".format(3))
print("{:.=9}".format(11))
print("{:.^9}".format(3))
print("{:.^9}".format(11))
print()
# Sign:
# '+' : indicates that a sign should be used for both positive as well
# as negative numbers.
# '-' : indicates that a sign should be used only for negative numbers
# (this is the default behavior).
# space : indicates that a leading space should be used on positive
# numbers, and a minus sign on negative numbers.
print("{:+}".format(3))
print("{:+}".format(-3))
print("{:-}".format(3))
print("{:-}".format(-3))
print("{: }".format(3))
print("{: }".format(-3))
print()
# Width
print("{}".format(3))
print("{}".format(11))
print("{:3}".format(3))
print("{:3}".format(11))
print()
# Precision
print("{}".format(math.pi))
print("{:.2f}".format(math.pi))
print()
# Type:
# The available integer presentation types are:
# 'b' : Binary format. Outputs the number in base 2.
# 'c' : Character. Converts the integer to the corresponding unicode character before printing.
# 'd' : Decimal Integer. Outputs the number in base 10.
# 'o' : Octal format. Outputs the number in base 8.
# 'x' : Hex format. Outputs the number in base 16, using lower- case letters for the digits above 9.
# 'X' : Hex format. Outputs the number in base 16, using upper- case letters for the digits above 9.
# 'n' : Number. This is the same as 'd', except that it uses the current locale setting to insert the appropriate number separator characters.
# None : The same as 'd'.
print("{:}".format(21))
print("{:b}".format(21))
print("{:#b}".format(21))
#print("{:c}".format(21))
print("{:d}".format(21))
print("{:o}".format(21))
print("{:#o}".format(21))
print("{:x}".format(21))
print("{:X}".format(21))
print("{:#x}".format(21))
print("{:#X}".format(21))
print("{:n}".format(21))
print()
# Type:
# The available presentation types for floating point and decimal values are:
# 'e' : Exponent notation. Prints the number in scientific notation using
# the letter ‘e’ to indicate the exponent. The default precision is 6.
# 'E' : Exponent notation. Same as 'e' except it uses an upper case ‘E’
# as the separator character.
# 'f' : Fixed point. Displays the number as a fixed-point number. The
# default precision is 6.
# 'F' : Fixed point. Same as 'f'.
# 'g' : General format. For a given precision p >= 1, this rounds the
# number to p significant digits and then formats the result in
# either fixed-point format or in scientific notation, depending on
# its magnitude.
# The precise rules are as follows: suppose that the result
# formatted with presentation type 'e' and precision p-1 would have
# exponent exp. Then if -4 <= exp < p, the number is formatted with
# presentation type 'f' and precision p-1-exp. Otherwise, the
# number is formatted with presentation type 'e' and precision p-1.
# In both cases insignificant trailing zeros are removed from the
# significand, and the decimal point is also removed if there are
# no remaining digits following it.
# Positive and negative infinity, positive and negative zero, and
# nans, are formatted as inf, -inf, 0, -0 and nan respectively,
# regardless of the precision.
# A precision of 0 is treated as equivalent to a precision of 1.
# The default precision is 6.
# 'G' : General format. Same as 'g' except switches to 'E' if the number
# gets too large. The representations of infinity and NaN are
# uppercased, too.
# 'n' : Number. This is the same as 'g', except that it uses the current
# locale setting to insert the appropriate number separator
# characters.
# '%' : Percentage. Multiplies the number by 100 and displays in fixed
# ('f') format, followed by a percent sign.
# None : The same as 'g'.
print("{}".format(math.pi))
print("{:e}".format(math.pi))
print("{:E}".format(math.pi))
print("{:f}".format(math.pi))
print("{:F}".format(math.pi))
print("{:g}".format(math.pi))
print("{:G}".format(math.pi))
print("{:n}".format(math.pi))
print("{:%}".format(math.pi))
if __name__ == '__main__':
main()
|
python
|
import uuid
from yandex_checkout.domain.common.http_verb import HttpVerb
from yandex_checkout.client import ApiClient
from yandex_checkout.domain.request.webhook_request import WebhookRequest
from yandex_checkout.domain.response.webhook_response import WebhookResponse, WebhookList
class Webhook:
base_path = '/webhooks'
def __init__(self):
self.client = ApiClient()
"""
Get list of installed webhooks
:return: WebhookList
"""
@classmethod
def list(cls):
instance = cls()
path = cls.base_path
response = instance.client.request(HttpVerb.GET, path)
return WebhookList(response)
"""
Add webhook
:param params: data passed to API
:param idempotency_key:
:return: WebhookResponse
"""
@classmethod
def add(cls, params, idempotency_key=None):
instance = cls()
path = cls.base_path
if not idempotency_key:
idempotency_key = uuid.uuid4()
headers = {
'Idempotence-Key': str(idempotency_key)
}
if isinstance(params, dict):
params_object = WebhookRequest(params)
elif isinstance(params, WebhookRequest):
params_object = params
else:
raise TypeError('Invalid params value type')
response = instance.client.request(HttpVerb.POST, path, None, headers, params_object)
return WebhookResponse(response)
"""
Remove webhook
:param webhook_id:
:param idempotency_key:
:return: WebhookResponse
"""
@classmethod
def remove(cls, webhook_id, idempotency_key=None):
instance = cls()
path = cls.base_path + '/' + webhook_id
if not idempotency_key:
idempotency_key = uuid.uuid4()
headers = {
'Idempotence-Key': str(idempotency_key)
}
response = instance.client.request(HttpVerb.DELETE, path, None, headers)
return WebhookResponse(response)
|
python
|
from .cache import LRUCache
|
python
|
from kasa_device_manager import KasaDeviceManager
if __name__ == "__main__":
kasa_device_manager = KasaDeviceManager()
# Print all the discovered devices out to the console
devices = kasa_device_manager.get_all_devices()
print(devices)
# Toggle a devices power state
# kasa_device_manager.toggle_device_by_name("family room plug")
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.