content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
# coding=utf8
import logging
from logging import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
|
python
|
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
n = len(cost)
dp = [0 for _ in range(n)]
dp[-1] = cost[-1]
dp[-2] = cost[-2]
for i in range(n - 3, -1, -1):
dp[i] = cost[i] + min(dp[i + 1], dp[i + 2])
return min(dp[0], dp[1])
|
python
|
from zipfile import ZipFile
import zipfile
import wget
import os
import subprocess
import pycountry
# define variables
path = '/path/ipvanish/'
url = 'https://www.ipvanish.com/software/configs/configs.zip'
filename = path + '/' + os.path.basename(url)
best_ping = 99999
# get user's choice
def get_choice():
print("1 - delete old configs, and download + unzip new config\n2 - check best server to desired country\n3 - exit")
choice = int(input("Enter number:\n"))
return (choice)
# get country from user input
def get_country():
print("Please enter the name of the country you would like to connect to")
country = str(input("Country: "))
return(country)
# convert user selection into 2 letter country notion
def get_country_code(country):
mapping = {country.name: country.alpha_2 for country in pycountry.countries}
code = mapping.get(country)
if type(code) is str:
return(code)
else:
return("Country not found!")
# deletes old *.zip & *.ovpn files, downloads new file and unzips it
def delete_and_renew_config():
for config_file in os.listdir(path):
if config_file.endswith(".ovpn") or config_file.endswith(".crt") or config_file == "configs.zip":
os.remove(path + config_file)
wget.download(url, '/path/ipvanish/configs.zip')
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(path)
# returns array [host, avg ping to host]
def get_host_and_ping(conf_file):
with open(path + conf_file) as file:
for i, line in enumerate(file):
if i == 3:
host = line.split()[1]
ping = os.system("ping -c 1 " + host)
return [host, ping]
# returns dictionary with the form: {"filename.ovpn" : [ "host", avgping]}
def get_servers(code):
srvs = {}
for config_file in os.listdir(path):
if config_file.startswith("ipvanish-" + code):
# first 0 is placeholder for HOSTNAME, second is for AVG PING
srvs[config_file] = get_host_and_ping(config_file)
return(srvs)
# returns the config file name of the best server
def return_best_server(servers):
for index, (server, host) in enumerate(servers.items()):
if float(host[1]) < float(best_ping):
best_srv = server
return(best_srv)
# main while loop
while 1:
choice = get_choice()
if choice == 1:
delete_and_renew_config()
elif choice == 2:
country = get_country()
code = get_country_code(country)
servers = get_servers(code)
best_srv = return_best_server(servers)
print("The best server is: " + best_srv)
elif choice == 3:
break
|
python
|
from django.contrib import admin
from .models import (
Payment,
PaymentChoice
)
admin.site.register(Payment)
admin.site.register(PaymentChoice)
|
python
|
from gym_connect_four.envs.connect_four_env import ConnectFourEnv, ResultType
|
python
|
import requests
def coords_to_divisions(lat, lng):
url = f"https://v3.openstates.org/divisions.geo?lat={lat}&lng={lng}"
try:
data = requests.get(url).json()
return [d["id"] for d in data["divisions"]]
except Exception:
# be very resilient
return []
|
python
|
# Generated by Django 2.0.7 on 2018-10-25 11:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hivs_cd', '0011_add_field_condomdistribution_purpose'),
]
operations = [
migrations.AlterField(
model_name='condomdistribution',
name='center',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='condom_distributions', to='hivs_cd.Center', verbose_name='distribution center'),
),
]
|
python
|
#!/home/bryanfeeney/anaconda3/bin/python3.6
#
# Simple script that uses the Microsoft Light Gradient-Boosted Machine-Learnign
# toolkit to make predictions *separately* for each value.
#
from datetime import date, timedelta, datetime
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import sys
import json
import psycopg2
FutureDaysToCalculate = 16
WeeksOfHistoryForMinTrainigData = 20
WeeksOfHistoryForFeature = 7
WeeksOfHistoryForFeatureOnValidate = 3
TrainingTimePeriodCount = 6
def load_data_csv (cumul_sales_path, cumul_sales_query_path, items_path, stores_path, query_start_date=None):
"""
Loads four datasets from the file-system in CSV format:
cumul_sale_path is the cumulative sales data, should be the last 12 months
cumul_sale_query_path enumerates the things to predict
items is item data
stores is store data
query_start_date if this is None, it's inferred from the first row of the cumul_sales_query_path documents. If
this is not None, then cumul_sales_query rows before this date are removed.
"""
cumul_sales = pd.read_csv(
cumul_sales_path,
usecols=[1, 2, 3, 4, 5],
dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(float(u)) if float(u) > 0 else 0},
parse_dates=["date"]
)
if cumul_sales_query_path is not None:
cumul_sales_query = pd.read_csv(
cumul_sales_query_path,
usecols=[0, 1, 2, 3, 4],
dtype={'onpromotion': bool},
parse_dates=["date"],
)
if query_start_date is None:
query_start_date = str(cumul_sales_query.iloc[0,1]).split(" ")[0]
else:
cumul_sales_query = cumul_sales_query[cumul_sales_query.date >= query_start_date]
cumul_sales_query = cumul_sales_query.set_index(
['store_nbr', 'item_nbr', 'date']
)
items = pd.read_csv(
items_tbl,
).set_index("item_nbr")
stores = pd.read_csv(
stores_tbl
).set_index("store_nbr")
return cumul_sales, cumul_sales_query, query_start_date, items, stores
def load_data_sql (cumul_sales_path, cumul_sales_query_path, items_path, stores_path, query_start_date=None):
"""
Loads three datasets from the file-system in CSV format:
cumul_sale_path is the cumulative sales data, should be the last 12 months
cumul_sale_query_path enumerates the things to predict
items is item data
stores is store data
"""
with open('db.json') as f:
conf = json.load(f)
print (str(conf))
conn_str = "host={} dbname={} user={} password={}".format(conf['host'], conf['database'], conf['user'], conf['passw'])
conn = psycopg2.connect(conn_str)
cumul_sales_query = pd.DataFrame()
c = 1
for chunk in pd.read_sql("select * from " + cumul_sales_query_path + " where date > CURRENT_DATE and date < CURRENT_DATE + INTERVAL '16 days' order by date asc", con=conn, chunksize=100000):
print ("Appending chunk " + str(c) + " to future promotions")
c += 1
cumul_sales_query = cumul_sales_query.append(chunk)
cumul_sales_query.date = pd.to_datetime(cumul_sales_query.date)
if query_start_date is None:
query_start_date = str(cumul_sales_query.iloc[0,1]).split(" ")[0]
else:
cumul_sales_query = cumul_sales_query[cumul_sales_query.date >= query_start_date]
cumul_sales_query = cumul_sales_query.set_index(
['store_nbr', 'item_nbr', 'date']
)
print("Future promotions loaded")
cumul_sales = pd.DataFrame()
c = 1
for chunk in pd.read_sql("select * from " + cumul_sales_path + " where date > CURRENT_DATE - INTERVAL '6 months' order by date asc", con=conn, chunksize=100000):
cumul_sales = cumul_sales.append(chunk)
print ("Appending chunk " + str(c) + " to cumulative sales")
c += 1
cumul_sales.loc[:, 'unit_sales'] = cumul_sales.unit_sales.apply(lambda u: np.log1p(float(u)) if float(u) > 0 else 0)
cumul_sales.date = pd.to_datetime(cumul_sales.date)
print ("Cumulative sales loaded")
items = pd.DataFrame()
c = 1
for chunk in pd.read_sql("select * from " + items_path, con=conn, chunksize=5000):
print ("Appending chunk " + str(c) + " to items")
c += 1
items = items.append(chunk)
print ("Items loaded")
stores = pd.DataFrame()
c = 1
for chunk in pd.read_sql("select * from " + stores_path, con=conn, chunksize=5000):
print ("Appending chunk " + str(c) + " to stores")
c += 1
stores = stores.append(chunk)
items = items.set_index("item_nbr")
stores = stores.set_index("store_nbr")
print ("Stores loaded")
return cumul_sales, cumul_sales_query, query_start_date, items, stores
def generate_promo_variables_train_and_query(cumul_sales, cumul_sales_query):
"""
Generate a column for each of the next 16 days with 1 if there is a
store-item pair for which a promotion exist on that day, and 0 otherwise.
"""
promo_variables_train = cumul_sales.set_index(
["store_nbr", "item_nbr", "date"])[["onpromotion"]].unstack(
level=-1).fillna(False)
promo_variables_train.columns = promo_variables_train.columns.get_level_values(1)
promo_variables_test = cumul_sales_query[["onpromotion"]].unstack(level=-1).fillna(False)
promo_variables_test.columns = promo_variables_test.columns.get_level_values(1)
promo_variables_test = promo_variables_test.reindex(promo_variables_train.index).fillna(False)
promo_variables = pd.concat([promo_variables_train, promo_variables_test], axis=1)
del promo_variables_train, promo_variables_test
return promo_variables
def generate_item_and_store_variables(cumul_sales, items, stores):
encoder = LabelEncoder()
items_reindex = items.reindex(cumul_sales.index.get_level_values(1))
item_family = encoder.fit_transform(items_reindex['family'].values)
item_class = encoder.fit_transform(items_reindex['class'].values)
item_perish = items_reindex['perishable'].values
stores_reindex = stores.reindex(cumul_sales.index.get_level_values(0))
store_nbr = cumul_sales.reset_index().store_nbr.values - 1
store_cluster = stores_reindex['cluster'].values - 1
store_type = encoder.fit_transform(stores_reindex['type'].values)
item_group_mean = cumul_sales.groupby('item_nbr').mean()
store_group_mean = cumul_sales.groupby('store_nbr').mean()
cat_features = np.stack([item_family, item_class, item_perish, store_nbr, store_cluster, store_type], axis=1)
return cat_features, item_group_mean, store_group_mean
def generate_unit_sales_columns(cumul_sales):
"""
Rotate the dataset so it's not normalized any more, and is more pivot-table
styled, with a column for each of the days.
"""
cumul_sales = cumul_sales.set_index(
["store_nbr", "item_nbr", "date"])[["unit_sales"]].unstack(
level=-1).fillna(0)
cumul_sales.columns = cumul_sales.columns.get_level_values(1)
return cumul_sales
def get_timespan(dataset, dt, minus, periods, freq='D'):
return dataset[
pd.date_range(dt - timedelta(days=minus), periods=periods, freq=freq)
]
def prepare_dataset(cumul_sales, promos, start_date, is_train=True):
"""
Takes two dataframes and fuses them together to form a single features
matrix.
cumul_sales : Used to generate mean sales for the last three days, seven
days etc.
promos : Used to generate features to say is a promotion for a single
store/item pair available on a given day
"""
X = pd.DataFrame({
"day_1_recent": get_timespan(cumul_sales, start_date, 1, 1).values.ravel(),
"mean_3_recent": get_timespan(cumul_sales, start_date, 3, 3).mean(axis=1).values,
"mean_7_recent": get_timespan(cumul_sales, start_date, 7, 7).mean(axis=1).values,
"mean_14_recent": get_timespan(cumul_sales, start_date, 14, 14).mean(axis=1).values,
"mean_30_recent": get_timespan(cumul_sales, start_date, 30, 30).mean(axis=1).values,
"mean_60_recent": get_timespan(cumul_sales, start_date, 60, 60).mean(axis=1).values,
"promo_14_recent": get_timespan(promos, start_date, 14, 14).sum(axis=1).values,
"promo_60_recnet": get_timespan(promos, start_date, 60, 60).sum(axis=1).values,
})
# Autoregressive features - do daily flux over a week
for i in range(7):
X['mean_4_dow{}_recent'.format(i)] = get_timespan(cumul_sales, start_date, 28 - i, 4, freq='7D').mean(axis=1).values
# Promotions on/off for the next 16 days
for i in range(16):
X["promo_{}".format(i)] = promos[
start_date + timedelta(days=i)].values.astype(np.uint8)
if is_train:
y = cumul_sales[
pd.date_range(start_date, periods=16)
].values
return X, y
return X
def create_machine_learning_matrices(cumul_sales, items, stores, promos_train_and_query, start_date, current_date, validate_start_date):
"""
A dataset is trend prices over the last time-period.
There are three training sets:
train - for training the model
validate - a subset of the training data for validating the model, so we
know when to stop Training
query - the bit to actually predict.
While the feature extraction is for the last 2 weeks, there's nothing to stop
us repeating this process for other time periods, to accumulate more data.
The features are independent of the *actual* time, we're just predicting
future prices given the previous prices.
You'll note there are no cumul_sales_query parameter, this is because the
necessary data is in the promos_train_and_query field, which lists, for
every date, store and item whether the item is on promotion or not.
"""
print("Preparing dataset...")
X_l, y_l = [], []
for i in range(TrainingTimePeriodCount):
delta = timedelta(days=7 * i)
X_tmp, y_tmp = prepare_dataset(
cumul_sales,
promos_train_and_query,
start_date + delta
)
X_l.append(X_tmp)
y_l.append(y_tmp)
X_train = pd.concat(X_l, axis=0)
y_train = np.concatenate(y_l, axis=0)
del X_l, y_l
X_validate, y_validate = prepare_dataset(cumul_sales, promos_train_and_query, validate_start_date)
X_query = prepare_dataset(cumul_sales, promos_train_and_query, current_date, is_train=False)
return X_train, y_train, X_validate, y_validate, X_query
def train_model(items, item_store_vars, X_train, y_train, X_validate, y_val, X_query, params=None, maxRounds=5000):
"""
Train a model using Lightwave Gradient Boosted Methods, specifically a
gradient-boosted regression-tree.
Optimise the L2 norm of the MSE.
"""
if params is None:
params = {
'num_leaves': 31,
'objective': 'regression',
'min_data_in_leaf': 300,
'learning_rate': 0.1,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 2,
'metric': 'l2',
'num_threads': 4
}
validate_pred = []
query_pred = []
cate_vars = []
for i in range(FutureDaysToCalculate):
print("=" * 50)
print("Future Day %d" % (i+1))
print("=" * 50)
dtrain = lgb.Dataset(
X_train, label=y_train[:, i],
categorical_feature=cate_vars,
weight=pd.concat([items["perishable"]] * TrainingTimePeriodCount) * 0.25 + 1
)
dval = lgb.Dataset(
X_validate, label=y_val[:, i], reference=dtrain,
weight=items["perishable"] * 0.25 + 1,
categorical_feature=cate_vars)
bst = lgb.train(
params, dtrain, num_boost_round=maxRounds,
valid_sets=[dtrain, dval], early_stopping_rounds=50, verbose_eval=50
)
print("\n".join(("%s: %.2f" % x) for x in sorted(
zip(X_train.columns, bst.feature_importance("gain")),
key=lambda x: x[1], reverse=True
)))
validate_pred.append(bst.predict(
X_validate, num_iteration=bst.best_iteration or maxRounds))
query_pred.append(bst.predict(
X_query, num_iteration=bst.best_iteration or maxRounds))
validate_rmse = np.sqrt (mean_squared_error(np.expm1(y_validate), np.expm1(np.array(validate_pred)).transpose()))
return query_pred, validate_rmse
def save_predictions(cumul_sales, cumul_sales_query, query_start_date, query_predictions, output_file):
"""
Save the predictions. The output matches the cumul_sales input, except it
does not have promotions included
"""
print("Saving predictions...")
y_query = np.array(query_pred).transpose()
df_preds = pd.DataFrame(
y_query, index=cumul_sales.index,
columns=pd.date_range(query_start_date, periods=16)
).stack().to_frame("unit_sales")
df_preds.to_csv(output_file)
def save_predictions_sql(cumul_sales, cumul_sales_query, query_start_date, query_predictions, output_file):
"""
Save the predictions. The output matches the cumul_sales input, except it
does not have promotions included
"""
print("Saving predictions...")
y_query = np.array(query_pred).transpose()
df_preds = pd.DataFrame(
y_query, index=cumul_sales.index,
columns=pd.date_range(query_start_date, periods=16)
).stack().to_frame("unit_sales")
df_preds.to_sql(output_file)
if __name__ == "__main__":
# TODO Get rid of the min_date requirement now that we have other info
(loc, min_date_str, cumul_sale_tbl, cumul_sale_future_tbl, items_tbl, stores_tbl, output_tbl) = sys.argv[1:]
(min_y, min_m, min_d) = min_date_str.split("-")
min_date_time = pd.datetime(int(min_y), int(min_m), int(min_d))
nowtime = datetime.now()
now = date(nowtime.year, nowtime.month, nowtime.day)
hist_data_start = now - timedelta(7 * WeeksOfHistoryForMinTrainigData) + timedelta(1)
hist_feature_start_date = now - timedelta(7 * TrainingTimePeriodCount + FutureDaysToCalculate)
validate_start_date = now - timedelta(7 * WeeksOfHistoryForFeatureOnValidate) + timedelta(1)
if loc == "csv":
cumul_sales, cumul_sales_query, query_start_date, items, stores = \
load_data_csv(cumul_sale_tbl, cumul_sale_future_tbl, items_tbl, stores_tbl, query_start_date=nowtime)
elif loc == "sql":
cumul_sales, cumul_sales_query, query_start_date, items, stores = \
load_data_sql(cumul_sale_tbl, cumul_sale_future_tbl, items_tbl, stores_tbl, query_start_date=nowtime)
else:
raise ValueError ("First argument must be format: csv or sql")
# TODO Why is this out of the loading functions?
cumul_sales = cumul_sales[cumul_sales.date.isin(
pd.date_range(hist_data_start, periods=7*WeeksOfHistoryForMinTrainigData)
)].copy()
cumul_sales.set_index(
['store_nbr', 'item_nbr', 'date']
)
print(str(cumul_sales.shape) + ", " + str(cumul_sales_query.shape))
promo_variables = generate_promo_variables_train_and_query(cumul_sales, cumul_sales_query)
cumul_sales = generate_unit_sales_columns(cumul_sales)
# Align the items info with our cumul sales info, so features can be extracted
items = items.reindex(cumul_sales.index.get_level_values(1))
# How far back to go to start generating trend features for demand
X_train, y_train, X_validate, y_validate, X_query = create_machine_learning_matrices(\
cumul_sales, items, stores, promo_variables, \
start_date=hist_feature_start_date, current_date=now, validate_start_date=validate_start_date)
item_store_vars, _, _ = generate_item_and_store_variables(cumul_sales, items, stores)
# Train a separate model for each of the next `FutureDaysToCalculate`
query_pred, validate_rmse = train_model(items, item_store_vars, X_train, y_train, X_validate, y_validate, X_query)
print ("Validation error is : " + str(validate_rmse))
save_predictions(cumul_sales, cumul_sales_query, query_start_date, query_pred, output_tbl)
|
python
|
__source__ = 'https://leetcode.com/problems/intersection-of-two-linked-lists/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/intersection-of-two-linked-lists.py
# Time: O(m + n)
# Space: O(1)
# LinkedList
#
# Description: Leetcode # 160. Intersection of Two Linked Lists
#
# Write a program to find the node at which the intersection of two singly linked lists begins.
#
# For example, the following two linked lists:
#
# A: a1 - a2
# \
# c1 - c2 - c3
# /
# B: b1 - b2 - b3
# begin to intersect at node c1.
#
#
# Notes:
#
# If the two linked lists have no intersection at all, return null.
# The linked lists must retain their original structure after the function returns.
# You may assume there are no cycles anywhere in the entire linked structure.
# Your code should preferably run in O(n) time and use only O(1) memory.
#
# Companies
# Amazon Microsoft Bloomberg Airbnb
# Related Topics
# Linked List
# Similar Questions
# Minimum Index Sum of Two Lists
#
import unittest
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param two ListNodes
# @return the intersected ListNode
def getIntersectionNode(self, headA, headB):
curA, curB = headA, headB
tailA, tailB = None, None
#idea: len(A)+len(B) is the same, so traverse A -> B ; B->A
while curA and curB:
if curA == curB:
return curA
if curA.next:
curA = curA.next
elif tailA is None: #reach here when !curA.next
tailA = curA
curA = headB
else:
break
if curB.next:
curB = curB.next #reach here when !curB.next
elif tailB is None:
tailB = curB
curB = headA
else:
break
return None
# http://blog.csdn.net/lilong_dream/article/details/41683563
class SolutionOther:
# @param two ListNodes
# @return the intersected ListNode
def getIntersectionNode(self, headA, headB):
curA, curB = headA, headB
lenA, lenB = 0,0
# get the length of A, B
while curA is not None:
lenA += 1
curA = curA.next
while curB is not None:
lenB += 1
curB = curB.next
curA, curB = headA, headB
# move diff steps for longer list
if lenA > lenB:
for i in range(lenA - lenB):
curA = curA.next
elif lenB > lenA:
for i in range(lenB - lenA):
curB = curB.next
while curB != curA:
curB = curB.next
curA = curA.next
return curA
# test
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
test = SolutionOther()
headA = ListNode(1)
headB = ListNode(2)
h1 = ListNode(3) ; h2 = ListNode(5); h3 = ListNode(7) ; h4 = ListNode(9) ; h5 = ListNode(11) ;
h7 = ListNode(13) ; h8 = ListNode(15); h9 = ListNode(17) ; h10 = ListNode(19) ; h11 = ListNode(21) ;
headA.next = h1; h1.next = h2; h2.next = h3; h3.next = h4; h4.next = h5; h5.next = h7; h7.next = h8
h8.next = h9; h9.next = h10; h10.next = h11;
ans = test.getIntersectionNode(headA, headB)
print "None" if ans == None else ans.val
headA = ListNode(10)
headB = ListNode(20)
h1 = ListNode(11) ; h2 = ListNode(12); h3 = ListNode(30) ;h5 = ListNode(31) ;
h4 = ListNode(21) ;
headA.next = h1 ; h1.next = h2 ; h2.next = h3 ; h3.next = h5
headB.next = h4 ; h4.next = h3 ;
print Solution().getIntersectionNode(headA,headB)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/intersection-of-two-linked-lists/solution/
/**
* Definition for singly-linked list.
* public class ListNode {
* int val;
* ListNode next;
* ListNode(int x) {
* val = x;
* next = null;
* }
* }
*/
1, Get the length of the two lists.
2, Align them to the same start point.
3, Move them together until finding the intersection point, or the end null
# 1ms 100%
class Solution {
public ListNode getIntersectionNode(ListNode headA, ListNode headB) {
int lenA = getLength(headA);
int lenB = getLength(headB);
while (lenA > lenB) {
headA = headA.next;
lenA--;
}
while (lenB > lenA) {
headB = headB.next;
lenB--;
}
while (headA != null) {
if (headA == headB) {
return headA;
} else {
headA = headA.next;
headB = headB.next;
}
}
return null;
}
private int getLength(ListNode head) {
int count = 0;
while (head != null) {
head = head.next;
count++;
}
return count;
}
}
Thought:
Maintain two pointers pA and pB initialized at the head of A and B, respectively.
Then let them both traverse through the lists, one node at a time.
When pA reaches the end of a list, then redirect it to the head of B (yes, B, that's right.);
similarly when pB reaches the end of a list, redirect it the head of A.
If at any point pA meets pB, then pA/pB is the intersection node.
# 2ms 38.83%
class Solution {
public ListNode getIntersectionNode(ListNode headA, ListNode headB) {
//boundary check
if(headA == null || headB == null) return null;
ListNode a = headA;
ListNode b = headB;
//if a & b have different len, then we will stop the loop after second iteration
while( a != b){
//for the end of first iteration, we just reset the pointer to the head of another linkedlist
a = a == null? headB : a.next;
b = b == null? headA : b.next;
}
return a;
}
}
'''
|
python
|
# Generated by Django 2.2.10 on 2020-08-23 11:08
from django.db import migrations, models
import django.db.models.deletion
import event.enums
class Migration(migrations.Migration):
dependencies = [("event", "0016_attachment_type")]
operations = [
migrations.CreateModel(
name="Schedule",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
(
"type",
models.PositiveSmallIntegerField(
choices=[
(0, "GENERAL"),
(1, "CEREMONY"),
(2, "TALK"),
(3, "TEAM_BUILDING"),
(4, "MEAL"),
(5, "DEMO"),
(6, "EVENT_START"),
(7, "EVENT_END"),
],
default=event.enums.ScheduleType(0),
),
),
(
"description",
models.TextField(blank=True, max_length=1000, null=True),
),
("starts_at", models.DateTimeField()),
("ends_at", models.DateTimeField(blank=True, null=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"session",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="schedules",
to="event.Session",
),
),
],
)
]
|
python
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Created on Mar 1, 2020
@author: Chengning Zhang
"""
import warnings
warnings.filterwarnings("ignore")
def get_cv(cls,X,Y,M,n_splits=10,cv_type = "StratifiedKFold",verbose = True):
""" Cross validation to get CLL and accuracy and training time and precision and recall.
"""
if cv_type == "StratifiedKFold":
cv = StratifiedKFold(n_splits= n_splits, shuffle=True, random_state=42) # The folds are made by preserving the percentage of samples for each class.
else:
cv = KFold(n_splits=n_splits, shuffle=True, random_state=42)
model = cls()
X,Y = check_X_y(X,Y)
#binarizer = MultiLabelBinarizer() ## for using recall and precision score
#binarizer.fit(Y)
Accuracy = []
Precision = []
Recall = []
CLL = []
training_time = []
F1 = []
for folder, (train_index, val_index) in enumerate(cv.split(X, Y)): # X,Y are array, data is list
X_train,X_val = X[train_index],X[val_index]
y_train,y_val = Y[train_index],Y[val_index]
model.fit(X_train,y_train,M) # whether data is list or array does not matter, only thing matters is label has to be same.
training_time.append(model.training_time_)
Accuracy.append(accuracy_score(y_val, model.predict(X_val) ))
CLL.append(model.Conditional_log_likelihood_general(y_val,model.predict_proba(X_val), model.classes_ ) )
Precision.append(precision_score(y_val, model.predict(X_val), average='macro') )
Recall.append(recall_score(y_val, model.predict(X_val), average='macro') )
F1.append(f1_score(y_val, model.predict(X_val), average='macro') )
if verbose:
print("accuracy in %s fold is %s" % (folder+1,Accuracy[-1] ) )
print("CLL in %s fold is %s" % (folder+1,CLL[-1]))
print("precision in %s fold is %s" % (folder+1,Precision[-1]))
print("recall in %s fold is %s" % (folder+1,Recall[-1]))
print("f1 in %s fold is %s" % (folder+1,F1[-1]))
print("training time in %s fold is %s" % (folder+1,training_time[-1]))
print(10*'__')
return Accuracy, CLL, training_time,Precision,Recall,F1
|
python
|
#!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import time
import wiredtiger, wttest
from wtdataset import SimpleDataSet
# test_reconfig01.py
# Smoke-test the connection reconfiguration operations.
class test_reconfig01(wttest.WiredTigerTestCase):
def test_reconfig_shared_cache(self):
self.conn.reconfigure("shared_cache=(name=pool,size=300M)")
def test_reconfig_async(self):
# Async starts off. Reconfigure through all the various cases, each
# building from the previous state.
# Async is off, and remains off.
self.conn.reconfigure("async=(enabled=false)")
# Async is off, turn it on.
self.conn.reconfigure("async=(enabled=true)")
# Async is on, and remains on.
self.conn.reconfigure("async=(enabled=true)")
# Async is on, turn it off.
self.conn.reconfigure("async=(enabled=false)")
# Async is off, turn it on with ops_max and threads.
self.conn.reconfigure("async=(enabled=true,ops_max=512,threads=10)")
# Reconfigure and use same thread count. (no-op)
self.conn.reconfigure("async=(threads=10)")
# Reconfigure more threads.
self.conn.reconfigure("async=(threads=14)")
# Reconfigure fewer threads.
self.conn.reconfigure("async=(threads=8)")
# Reconfigure illegal ops_max (ignored).
self.conn.reconfigure("async=(ops_max=1024)")
# Turn async off.
self.conn.reconfigure("async=(enabled=false)")
# Async is off, turn it on. Should end up with the
# same ops_max of 512 and thread of 8.
self.conn.reconfigure("async=(enabled=true)")
def test_reconfig_eviction(self):
# Increase the max number of running threads (default 8).
self.conn.reconfigure("eviction=(threads_max=10)")
# Increase the min number of running threads (default 1).
self.conn.reconfigure("eviction=(threads_min=5)")
# Decrease the max number of running threads.
self.conn.reconfigure("eviction=(threads_max=7)")
# Decrease the min number of running threads.
self.conn.reconfigure("eviction=(threads_min=2)")
# Set min and max the same.
self.conn.reconfigure("eviction=(threads_min=6,threads_max=6)")
def test_reconfig_lsm_manager(self):
# We create and populate a tiny LSM so that we can start off with
# the LSM threads running and change the numbers of threads.
# Take all the defaults.
uri = "lsm:test_reconfig"
nrecs = 10
SimpleDataSet(self, uri, nrecs).populate()
# Sleep to make sure all threads are started.
time.sleep(2)
# Now that an LSM tree exists, reconfigure LSM manager threads.
# We start with the default, which is 4. Configure more threads.
self.conn.reconfigure("lsm_manager=(worker_thread_max=10)")
# Generate some work
nrecs = 20
SimpleDataSet(self, uri, nrecs).populate()
# Now reconfigure fewer threads.
self.conn.reconfigure("lsm_manager=(worker_thread_max=3)")
def test_reconfig_statistics(self):
self.conn.reconfigure("statistics=(all)")
self.conn.reconfigure("statistics=(fast)")
self.conn.reconfigure("statistics=(none)")
def test_reconfig_checkpoints(self):
self.conn.reconfigure("checkpoint=(wait=0)")
self.conn.reconfigure("checkpoint=(wait=5)")
self.conn.reconfigure("checkpoint=(log_size=0)")
self.conn.reconfigure("checkpoint=(log_size=1M)")
# Statistics logging: reconfigure the things we can reconfigure.
def test_reconfig_statistics_log_ok(self):
self.conn.reconfigure("statistics=[all],statistics_log=(wait=0)")
self.conn.reconfigure("statistics_log=(wait=0)")
self.conn.reconfigure("statistics_log=(wait=2,json=true)")
self.conn.reconfigure("statistics_log=(wait=0)")
self.conn.reconfigure("statistics_log=(wait=2,on_close=true)")
self.conn.reconfigure("statistics_log=(wait=0)")
self.conn.reconfigure("statistics_log=(wait=2,sources=[lsm:])")
self.conn.reconfigure("statistics_log=(wait=0)")
self.conn.reconfigure("statistics_log=(wait=2,timestamp=\"t%b %d\")")
self.conn.reconfigure("statistics_log=(wait=0)")
# Statistics logging: reconfigure the things we can't reconfigure.
def test_reconfig_statistics_log_fail(self):
msg = '/unknown configuration key/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.conn.reconfigure("log=(path=foo)"), msg)
def test_file_manager(self):
self.conn.reconfigure("file_manager=(close_scan_interval=3)")
self.conn.reconfigure("file_manager=(close_idle_time=4)")
self.conn.reconfigure(
"file_manager=(close_idle_time=4,close_scan_interval=100)")
if __name__ == '__main__':
wttest.run()
|
python
|
from time import sleep
from progress.bar import Bar
with Bar('Processing...') as bar:
for i in range(100):
sleep(0.02)
bar.next()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals
)
class DataObj:
def __init__(self, str, *args, **kwargs):
self.docs = None
self.span = None
str = self.preprocess(str)
def preprocess(self, str):
return str
class IdentifierBase:
def __init__(self, model, *args, **kwargs):
self.model = model
def predict_proba(self):
raise NotImplementedError
def decision_fn(self):
raise NotImplementedError
def __call__(self):
raise NotImplementedError
class RuleBase:
def __init__(self, *args, **kwargs):
raise NotImplementedError
def apply(self, x):
raise NotImplementedError
def __call__(self, x):
return self.apply(x)
class Sequential:
def __init__(self, rule_list):
assert len(rule_list) > 0
assert issubclass(type(rule_list[0]), RuleBase)
self.rules = rule_list
super(Sequential, self).__init__()
def __call__(self, x):
for rule in self.rules:
x = rule(x)
|
python
|
import pygame
import time
class Clock:
def __init__(self, profile: int, turbo: bool):
self.cycle = 0
self.frame = 0
self.pyclock = pygame.time.Clock()
self.start = time.time()
self.profile = profile
self.turbo = turbo
def tick(self) -> bool:
self.cycle += 1
# Do a whole frame's worth of sleeping at the start of each frame
if self.cycle % 17556 == 20:
# Sleep if we have time left over
if not self.turbo:
self.pyclock.tick(60)
# Exit if we've hit the frame limit
if self.profile != 0 and self.frame > self.profile:
duration = time.time() - self.start
print(
"Emulated %d frames in %.2fs (%.2ffps)\n"
% (self.profile, duration, self.profile / duration)
)
return False
self.frame += 1
return True
|
python
|
from cascade_at.core.log import get_loggers
LOG = get_loggers(__name__)
class InputDataError(Exception):
"""These are errors that result from faults in the input data."""
class SettingsError(InputDataError):
def __init__(self, message, form_errors=None, form_data=None):
super().__init__(message)
self.form_errors = form_errors if form_errors else list()
self.form_data = form_data
class CascadeError(Exception):
"""Cascade base for exceptions."""
class DismodFileError(TypeError):
"""These are all Pandas data frames that don't match what Dismod expects."""
|
python
|
import logging
from pipelines.plugin.base_plugin import BasePlugin
from pipelines.plugin.exceptions import PluginError
from pipelines.plugin.utils import class_name
log = logging.getLogger('pipelines')
class PluginManager():
def __init__(self):
self.plugins = {}
def get_plugin(self, name):
return self.plugins.get(name)
def trigger(self, event_name, *args):
callbacks = self.plugins.get(event_name, [])
results = []
for cb in callbacks:
try:
ret = cb(*args)
results.append(ret)
except Exception:
log.error('Unknown error running callback {} hook {}, aborting.'.format(
cb.__name__, event_name)
)
raise
return results
def get_plugin_count(self, hook_name=None):
if hook_name is None:
return reduce(lambda counter, p: counter + len(p), self.plugins.values(), 0)
if hook_name in self.plugins:
return len(self.plugins[hook_name])
return 0
def register_plugin(self, plugin_class, conf_dict):
if not issubclass(plugin_class, BasePlugin):
raise PluginError('Trying to register plugin that is not extending BasePlugin: {}'.format(
class_name(plugin_class))
)
plugin = plugin_class.from_dict(conf_dict, self)
for k in ['hook_prefix', 'hooks']:
if not hasattr(plugin, k):
raise PluginError('Plugin is missing "{}" attribute.'.format(k))
prefix = '{}.'.format(plugin.hook_prefix) if plugin.hook_prefix else ''
for hook in plugin.hooks:
if not hasattr(plugin, hook):
raise PluginError('Plugin {} is missing {}-function'.format(
class_name(plugin), hook
))
hook_key = '{}{}'.format(prefix, hook)
if hook_key not in self.plugins:
self.plugins[hook_key] = []
self.plugins[hook_key].append(getattr(plugin, hook))
if __name__ == '__main__':
from pipelineplugins.dummy_executor import DummyExecutor
from pipelineplugins.stdout_logger import StdoutLogger
m = PluginManager()
m.register_plugin(DummyExecutor)
m.register_plugin(StdoutLogger)
print m.get_plugin_count()
print m.get_plugin_count('on_task_start')
|
python
|
import json
import os
from typing import List, Optional
from dkron_python.api import Dkron, DkronException
import typer
app = typer.Typer()
get = typer.Typer(help="Fetch information about a resource")
apply = typer.Typer(help="Apply a resource")
delete = typer.Typer(help="Delete a resource")
app.add_typer(get, name="get")
app.add_typer(apply, name="apply")
app.add_typer(delete, name="delete")
_DKRON_ENV_NAME_HOSTS = "DKRON_HOSTS"
api = None
HostsOption = typer.Option(
None,
"-h",
"--hosts",
help="Dkron instance URLs, separated with commas",
envvar=_DKRON_ENV_NAME_HOSTS,
)
InsecureOption = typer.Option(
False,
"-k",
"--insecure",
help="Allow insecure connections when using SSL",
is_flag=True,
)
JobName = typer.Argument(..., help="Name of the job")
@app.callback()
def cli(hosts: Optional[str] = HostsOption, insecure: bool = InsecureOption):
"""
Command line interface client for Dkron
"""
global api
if not hosts:
print(
f"You must provide {_DKRON_ENV_NAME_HOSTS} environment variable OR --hosts option."
)
print("Check docs: https://github.com/centreon/dkron-python#cli-usage")
exit(1)
api = Dkron(hosts.split(","), verify=not insecure)
@get.command()
def status():
"""
Get system status
"""
try:
results = api.get_status()
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
print(json.dumps(results))
@get.command()
def leader():
"""
Get system leader
"""
try:
results = api.get_leader()
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
print(json.dumps(results))
@get.command()
def members():
"""
Get system members
"""
try:
results = api.get_members()
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
print(json.dumps(results))
@get.command()
def jobs():
"""
Fetch all jobs
"""
try:
results = api.get_jobs()
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
print(json.dumps(results))
@get.command(name="job")
def get_job(job_name: str = JobName):
"""
Fetch specific job
"""
try:
results = api.get_job(job_name)
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
print(json.dumps(results))
@get.command()
def executions(job_name: str = JobName):
"""
Get system executions
"""
try:
results = api.get_executions(job_name)
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
print(json.dumps(results))
@apply.command(name="job")
def apply_job(
json_file_path: List[str] = typer.Argument(..., help="Path to the json file")
):
"""
Create or update job(s)
"""
for file_path in json_file_path:
with open(file_path, "r") as json_file:
data = json.load(json_file)
try:
api.apply_job(data)
except DkronException as ex:
print("Error while applying %s: %s" % (file_path, str(ex)))
exit(1)
print("Processed: %s" % file_path)
@app.command()
def run(job_name: str = JobName):
"""
Execute job on demand
"""
try:
api.run_job(job_name)
except DkronException as ex:
print("Error while executing: %s" % str(ex))
exit(1)
@app.command(name="export")
def export(backup_dir: str = typer.Argument(..., help="Path to the backup directory")):
"""
Exports all jobs to json files
"""
try:
jobs = api.get_jobs()
for job in jobs:
filename = os.path.join(backup_dir, job["name"] + ".json")
json.dump(job, open(filename, mode="w"), indent=2)
except DkronException as ex:
print("Error while fetching: %s" % str(ex))
exit(1)
@delete.command(name="job")
def delete_job(job_name):
"""
Delete job
"""
try:
api.delete_job(job_name)
except DkronException as ex:
print("Error while deleteing: %s" % str(ex))
exit(1)
if __name__ == "__main__":
app()
|
python
|
class ServiceProvider():
wsgi = True
def __init__(self):
self.app = None
def boot(self):
pass
def register(self):
self.app.bind('Request', object)
def load_app(self, app):
self.app = app
return self
|
python
|
from dynabuffers.api.ISerializable import ISerializable, ByteBuffer
from dynabuffers.ast.ClassType import ClassType
from dynabuffers.ast.EnumType import EnumType
from dynabuffers.ast.UnionType import UnionType
from dynabuffers.ast.annotation.GreaterEquals import GreaterEquals
from dynabuffers.ast.annotation.GreaterThan import GreaterThan
from dynabuffers.ast.annotation.LowerEquals import LowerEquals
from dynabuffers.ast.annotation.LowerThan import LowerThan
from dynabuffers.ast.annotation.MaxLength import MaxLength
from dynabuffers.ast.annotation.MinLength import MinLength
from dynabuffers.ast.annotation.NotBlank import NotBlank
class DynabuffersEngine(object):
def __init__(self, tree: [ISerializable]):
self.tree = tree
self.listeners = []
def addListener(self, listener):
self.listeners.append(listener)
def getPrimaryClass(self) -> ClassType:
classes = list(filter(lambda x: isinstance(x, ClassType), self.tree))
for clazz in classes:
if clazz.options.options.isPrimary():
return clazz
return classes[0]
def serialize(self, map: dict) -> bytearray:
clazz = self.getPrimaryClass()
buffer = ByteBuffer(clazz.size(map, Registry(self.tree, self.listeners)))
clazz.serialize(map, buffer, Registry(self.tree, self.listeners))
return buffer.toBytes()
def deserialize(self, bytes: bytearray) -> dict:
clazz = self.getPrimaryClass()
return clazz.deserialize(ByteBuffer(len(bytes), bytes), Registry(self.tree, self.listeners))
class Registry(object):
def __init__(self, tree: [ISerializable], listeners):
self.tree = tree
self.listeners = listeners
def resolveAnnotation(self, name, args):
if name == "GreaterThan":
return GreaterThan(args)
if name == "GreaterEquals":
return GreaterEquals(args)
if name == "LowerThan":
return LowerThan(args)
if name == "LowerEquals":
return LowerEquals(args)
if name == "MaxLength":
return MaxLength(args)
if name == "MinLength":
return MinLength(args)
if name == "NotBlank":
return NotBlank(args)
raise ValueError("unknown annotation " + str(name))
def resolve(self, name: str):
for element in self.tree:
if isinstance(element, ClassType) and element.options.name == name:
return element
if isinstance(element, EnumType) and element.options.name == name:
return element
if isinstance(element, UnionType) and element.options.name == name:
return element
def addNotification(self, notification: str):
for listener in self.listeners:
listener(notification)
|
python
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\suntan\suntan_ops.py
# Compiled at: 2019-05-09 01:16:48
# Size of source mod 2**32: 1473 bytes
from protocolbuffers import DistributorOps_pb2
from careers.prep_tasks.prep_tasks_tracker import protocol_constants
from distributor.ops import Op
from distributor.rollback import ProtocolBufferRollback
class SetTanLevel(Op):
def __init__(self, suntan_data):
super().__init__()
self._suntan_data = suntan_data
def write(self, msg):
tan_level = None
outfit_part_data_list = None
force_update = None
if self._suntan_data:
tan_level = self._suntan_data.tan_level
outfit_part_data_list = self._suntan_data.outfit_part_data_list
force_update = self._suntan_data.force_update
op = DistributorOps_pb2.SetTanLevel()
if tan_level is not None:
op.tan_level = tan_level
if outfit_part_data_list is not None:
for part_id, body_type in outfit_part_data_list:
with ProtocolBufferRollback(op.outfit_part_data_list) as (entry):
entry.id = part_id
entry.body_type = body_type
if force_update is not None:
op.force_update = force_update
self.serialize_op(msg, op, protocol_constants.SET_TAN_LEVEL)
|
python
|
from avatar2 import QemuTarget
from avatar2 import MemoryRange
from avatar2 import Avatar
from avatar2.archs import ARM
from avatar2.targets import Target, TargetStates
from avatar2.message import *
import tempfile
import os
import time
import intervaltree
import logging
from nose.tools import *
QEMU_EXECUTABLE = os.environ.get("QEMU_EXECUTABLE",
"targets/build/qemu/arm-softmmu/qemu-system-arm")
GDB_EXECUTABLE = os.environ.get("GDB_EXECUTABLE", "gdb-multiarch")
qemu = None
fake_target = None
class FakeTarget(object):
name = 'fake'
def __init__(self):
pass
def read_memory(*args, **kwargs):
return 0xdeadbeef
def write_memory(self, addr, size, val, *args, **kwargs):
self.fake_write_addr = addr
self.fake_write_size = size
self.fake_write_val = val
return True
def setup():
global qemu
global avatar
global fake_target
avatar = Avatar(output_directory='/tmp/testava')
qemu = QemuTarget(avatar, name='qemu_test',
firmware="./tests/binaries/qemu_arm_test",
gdb_executable=GDB_EXECUTABLE,
executable=QEMU_EXECUTABLE)
fake_target = FakeTarget()
dev1 = avatar.add_memory_range(0x101f2000, 0x1000, 'dev1', forwarded=True,
forwarded_to=fake_target,
qemu_name='avatar-rmemory')
mem1 = avatar.add_memory_range(0x8000000, 0x1000, 'mem1',
file='%s/tests/binaries/qemu_arm_test' %
os.getcwd())
def teardown():
global qemu
qemu.shutdown()
@with_setup(setup, teardown)
def test_initilization():
global qemu
qemu.init()
qemu.wait()
assert_equal(qemu.state, TargetStates.STOPPED)
@with_setup(setup, teardown)
def test_step():
global qemu
qemu.init()
qemu.wait()
qemu.regs.pc=0x08000000
qemu.step()
assert_equal(qemu.regs.pc, 0x08000004)
@with_setup(setup, teardown)
def test_memory_read():
global qemu
qemu.init()
qemu.wait()
mem = qemu.read_memory(0x08000000,4)
assert_equal(mem, 0xe3a0101e)
@with_setup(setup, teardown)
def test_memory_write():
global qemu
qemu.init()
qemu.wait()
qemu.write_memory(0x08000000,4, 0x41414141)
mem = qemu.read_memory(0x08000000,4)
assert_equal(mem, 0x41414141)
@with_setup(setup, teardown)
def test_remote_memory_write():
global qemu
global avatar
qemu.init()
qemu.wait()
remote_memory_write = qemu.write_memory(0x101f2000,4,0x41414141)
assert_equal(remote_memory_write, True)
assert_equal(fake_target.fake_write_addr, 0x101f2000)
assert_equal(fake_target.fake_write_size, 4)
assert_equal(fake_target.fake_write_val, 0x41414141)
@with_setup(setup, teardown)
def test_remote_memory_read():
global qemu
global avatar
qemu.init()
qemu.wait()
assert_equal(qemu.state, TargetStates.STOPPED)
remote_memory_read = qemu.read_memory(0x101f2000,4)
assert_equal(remote_memory_read, 0xdeadbeef)
if __name__ == '__main__':
setup()
#test_remote_memory()
test_initilization()
teardown()
|
python
|
"""
This module contains our unit and functional tests for the "think_aloud" application.
"""
# Create your tests here.
|
python
|
"""Database objects."""
import sqlalchemy
from collections import Mapping
from sqlalchemy.engine.url import make_url
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
from .exceptions import DatabaseAlreadyExists, DatabaseNotFound
from .sessions import Session
from .util import import_string
DATABASE_ALIASES = {
'postgresql': 'sqlalchemy_multidb.databases.PostgresDatabase',
}
class DatabaseManager(object):
"""
Provides a container of databases.
:param scope_func: optional function which defines the current scope.
"""
def __init__(self, scope_func=None):
self._databases = {}
self._scope_func = scope_func
self.Model = declarative_base()
@property
def databases(self):
"""
Gets the databases.
:return: The list with all databases.
"""
return self._databases.values()
def config_from_object(self, config):
"""
Loads the databases from the config.
:param config: The object containing the database config.
"""
for key in ('SQLALCHEMY_DATABASES', 'DATABASES', 'databases'):
databases = self._get_databases_from_object(key, config)
if databases is not None:
for name, url in databases.items():
self.add_database(name, url)
break
def close(self):
"""
Closes all databases.
"""
for database in self._databases.values():
database.close()
self._databases.clear()
def add_database(self, name, url):
"""
Adds a new database from the url.
:param str name: The name of the database.
:param str url: The connection string.
"""
name = name or 'default'
if not isinstance(name, str):
raise TypeError('Parameter name should be a str.')
if not isinstance(url, str):
raise TypeError('Parameter url should be a str.')
if name in self._databases:
raise DatabaseAlreadyExists(name)
self._databases[name] = self._create_database(name, url)
def get_database(self, name=None):
"""
Gets a database by the name.
:param str name: The database name.
:return Database: The database object.
"""
name = name or 'default'
database = self._databases.get(name)
if database:
return database
raise DatabaseNotFound(name)
def remove_database(self, name=None):
"""
Removes a database by the name.
:param name: The database name.
"""
name = name or 'default'
database = self._databases.pop(name, None)
if not database:
raise DatabaseNotFound(name)
database.close()
def session(self, database_name=None):
"""
Gets a new session for the specified database.
:param str database_name: The database name.
:return: The new session.
"""
database_name = database_name or 'default'
database = self._databases.get(database_name)
if database:
return database.session()
raise DatabaseNotFound(database_name)
def scoped_session(self, database_name=None):
"""
Gets a new scoped session for the specified database.
:param str database_name: The database name.
:return: The new scoped session.
"""
database_name = database_name or 'default'
database = self._databases.get(database_name)
if database:
return database.scoped_session()
raise DatabaseNotFound(database_name)
def _create_database(self, name, url):
"""
Creates a new database from the url.
:param str name: The database name.
:param str url: The connection string.
:return Database: A new instance of `Database`.
"""
uri = make_url(url)
class_name = DATABASE_ALIASES.get(uri.drivername)
if class_name is None:
database_cls = Database
else:
database_cls = import_string(class_name)
return database_cls(name, url, scope_func=self._scope_func)
def _get_databases_from_object(self, key, config):
"""
Get the databases from the give config object.
:param str key: The name of the attribute in the config object.
:param config: The config object.
:return dict: The map of databases.
"""
if isinstance(config, Mapping):
return config.get(key)
return getattr(config, key, None)
class Database(object):
"""
Provides methods to get sessions for a specific engine.
"""
def __init__(self, name, url, scope_func):
self._name = name
self._url, engine_params = self._parse_url(url)
self._engine = sqlalchemy.create_engine(self._url, **engine_params)
self._session_maker = sessionmaker(self.engine, class_=Session, expire_on_commit=False)
self._scoped_session_maker = scoped_session(self._session_maker, scopefunc=scope_func)
self.Model = declarative_base()
@property
def name(self):
"""
Gets the database name.
"""
return self._name
@property
def engine(self):
"""
Gets the database engine.
"""
return self._engine
@property
def session_maker(self):
"""
Gets the session maker.
"""
return self._session_maker
@property
def scoped_session_maker(self):
"""
Gets the scoped session maker.
"""
return self._scoped_session_maker
def close(self):
"""
Closes the engine and all its sessions opened.
"""
self._session_maker.close_all()
self._session_maker = None
self._scoped_session_maker = None
self._engine.dispose()
self._engine = None
def session(self):
"""
Gets a new session for the specified database.
"""
return self._session_maker()
def scoped_session(self):
"""
Gets a scoped session for the specified database.
"""
return self._scoped_session_maker()
@staticmethod
def _parse_url(url):
"""
Gets the parameters from the url.
"""
params_keys = {
'case_sensitive': bool,
'convert_unicode': bool,
'echo': bool,
'echo_pool': bool,
'encoding': str,
'isolation_level': str,
'module': str,
'pool_reset_on_return': str,
'strategy': str,
'paramstyle': str,
'logging_name': str,
'pool_logging_name': str,
'max_overflow': int,
'pool_size': int,
'pool_recycle': int,
'pool_timeout': int,
'label_length': int,
}
uri = make_url(url)
kwargs = {'connect_args': {}}
for key, value in uri.query.items():
param_type = params_keys.get(key)
if param_type:
kwargs[key] = param_type(value)
else:
kwargs['connect_args'][key] = value
uri.query.clear()
return str(uri), kwargs
class PostgresDatabase(Database):
"""
PostgreSQL implementation.
Provides support to search_path from the url.
"""
def __init__(self, name, url, scope_func=None):
uri = make_url(url)
self.__search_path = uri.query.pop('search_path', None)
super(PostgresDatabase, self).__init__(name, str(uri), scope_func)
if self.__search_path:
listen(self.engine, 'checkout', self.__on_checkout)
def __on_checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a new connection is open."""
cursor = dbapi_connection.cursor()
cursor.execute('SET search_path TO ' + self.__search_path)
cursor.close()
|
python
|
def find_next_square(sq):
|
python
|
#!/usr/bin/python3.7
########################################################################################
# pvt_collector/tedlar.py - Represents an tedlar layer within a PVT panel.
#
# Author: Ben Winchester
# Copyright: Ben Winchester, 2021
########################################################################################
"""
The tedlar module for the PV-T model.
This module represents a tedlar layer within a PV-T panel.
"""
from .__utils__ import MicroLayer
__all__ = ("Tedlar",)
class Tedlar(MicroLayer):
"""
Represents an Tedlar layer within the panel.
"""
|
python
|
import time
import random
from const import *
from util.logger import logger
class Session():
def __init__(self):
# requests.adapters.DEFAULT_RETRIES = 5 # 增加重試次數,避免連線失效
self.has_login = False
self.session = requests.Session()
self.session.headers = {
'User-Agent': make_ua(),
'Accept-Charset': 'utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive'
}
self.timeout = 20
def request(self, method, url, data=None,delay=0):
for i in range(RETRY_CNT):
try:
if delay:time.sleep(delay)
return self.session.request(
method,
url,
allow_redirects=False,
data=data,
timeout=self.timeout)
except (requests.HTTPError, requests.Timeout,requests.ConnectionError) as e:
logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), i))
pass
logger.error("can't get res: "+url)
return None
def make_ua():
rrange = lambda a, b, c=1: c == 1 and random.randrange(a, b) or int(1.0 * random.randrange(a * c, b * c) / c)
ua = 'Mozilla/%d.0 (Windows NT %d.%d) AppleWebKit/%d (KHTML, like Gecko) Chrome/%d.%d Safari/%d' % (
rrange(4, 7, 10), rrange(5, 7), rrange(0, 3), rrange(535, 538, 10),
rrange(21, 27, 10), rrange(0, 9999, 10), rrange(535, 538, 10)
)
|
python
|
# Requests may need to be installed for this script to work
import requests
import re
import config
# Here we pass our client id and secret token
auth = requests.auth.HTTPBasicAuth(config.client_id, config.secret_token)
# Here we pass our login method (password), username, and password
data = {'grant_type': 'password',
'username': config.username,
'password': config.password}
# Setup our header info, which gives reddit a brief description of our app
headers = {'User-Agent': config.botname + 'Bot/0.0.1'}
# Send our request for an OAuth token
res = requests.post('https://www.reddit.com/api/v1/access_token',
auth=auth, data=data, headers=headers)
# Convert response to JSON and pull access_token value
TOKEN = res.json()['access_token']
# Add authorization to our headers dictionary
headers = {**headers, **{'Authorization': f"bearer {TOKEN}"}}
# While the token is valid (~2 hours) we just add headers=headers to our requests
requests.get('https://oauth.reddit.com/api/v1/me', headers=headers)
# Pull results from desired subreddits
for subreddit in config.subreddits:
res = requests.get("https://oauth.reddit.com/r/" + subreddit + "/hot",
headers=headers,
params={'limit': '5'})
for post in res.json()['data']['children']:
print(post['data']['subreddit'])
print(post['data']['title'])
print(post['data']['permalink'])
# Working on a regex to filter out relevant content...
# ------------------------------------
#for post in res.json()['data']['children']:
# print(post['data']['subreddit'])
# print(post['data']['title'])
# print(post['data']['selftext'])
# print(post['data']['permalink'])
# print(post['data']['upvote_ratio'])
# print(post['data']['ups'])
# print(post['data']['downs'])
# print(post['data']['score'])
|
python
|
"""
# =============================================================================
# Simulating the double pendulum using Runge–Kutta method (RK4)
# =============================================================================
Created on Fri Jul 17 2020
@author: Ahmed Alkharusi
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
# =============================================================================
# globals
# =============================================================================
m1 = 1 #mass of the 1st pendulum
m2 = 1 #mass of the 2nd pendulum
g = 10 #gravity
r1 = 1 #length of the 1st pendulum
r2 = 1 #length of the 2nd pendulum
x = y = []
# =============================================================================
# Functions defn.
# =============================================================================
def angular_acc1(a1_arr,a2_arr):
"""Calculate the angular acceleration for the 1st pendulum
Inputs-> a1_arr: np.array([Initial angle, Initial angular velocity]);
a2_arr: np.array([Initial angle, Initial angular velocity]);"""
num = -g *(2*m1+m2)*np.sin(a1_arr[0]) - m2*g*np.sin(a1_arr[0]-2*a2_arr[0])- 2* m2*np.sin(a1_arr[0]-a2_arr[0]) * (r2*pow(a2_arr[1],2)+r1*pow(a1_arr[1],2)*np.cos(a1_arr[0]-a2_arr[0]))
den = r1*(2*m1+m2-m2 * np.cos(2*a1_arr[0]-2*a2_arr[0]))
return num/den
def angular_acc2(a1_arr,a2_arr):
"""Calculate the angular acceleration for the 2nd pendulum
Inputs-> a1_arr: np.array([Initial angle, Initial angular velocity]);
a2_arr: np.array([Initial angle, Initial angular velocity]);"""
temp = (2*np.sin(a1_arr[0]-a2_arr[0]))
num = temp * (r1*pow(a1_arr[1],2)*(m1+m2)+g*(m1+m2)*np.cos(a1_arr[0])+r2*pow(a2_arr[1],2)*m2*np.cos(a1_arr[0]-a2_arr[0]))
den = r2*(2*m1+m2-m2 * np.cos(2*a1_arr[0]-2*a2_arr[0]))
return num/den
def deriv_a1(a1_arr,a2_arr,t):
"""
Returns an array np.array([first derivative, 2nd derivative])
Inputs-> a1_arr: np.array([Initial angle, Initial angular velocity]);
a2_arr: np.array([Initial angle, Initial angular velocity]);
t: the dependent variable;
"""
return np.array([a1_arr[1],angular_acc1(a1_arr,a2_arr)])
def deriv_a2(a2_arr,a1_arr,t):
return np.array([a2_arr[1],angular_acc2(a1_arr,a2_arr)])
def rk4(deriv,func_i,func_i2, x_i,h):
"""
Implements the RK4 method
Inputs-> deriv: a function that takes two arguments;
func_i: the function to be calculated;
func_i2: this is just passed as an argument for func_i (see above deriv_a1 and deriv_a2);
x_i: the dependent variable of func_i;
h: the step size;
"""
k1 = deriv(func_i,func_i2,x_i)
k2 = deriv(func_i+h/2,func_i2,h*k1/2)
k3 = deriv(func_i+h/2,func_i2,h*k2/2)
k4 = deriv(func_i+h,func_i2,h*k3)
func = func_i + (1/6) * h * (k1 +2*k2+2*k3+k4)
x = x_i + h
return (x,func)
# =============================================================================
# def init(): #Uncomment these for the animation
# line.set_data([], [])
# time_text.set_text('')
# return line, time_text
#
# def animate(i):
# x = [0, pendulum1_x[i], pendulum2_x[i]]
# y = [0, pendulum1_y[i], pendulum2_y[i]]
#
# line.set_data(x,y)
# #time_text.set_text(time_template % (i*h)) #Uncomment this to display the time in the animated plot
# return line, time_text
#
# =============================================================================
# =============================================================================
# The results
# =============================================================================
#Initial conditions ([initial angle, initail angular speed])
a1_arr = np.array([np.pi/2,0])
a2_arr = np.array([np.pi/2,1])
t = 0 # starting time
h = 0.001 # step size for the RK4 method
steps_no = 100000 # number of steps of the RK4 method
time_arr = np.array([t])
func_array1 = np.array([a1_arr])
func_array2 = np.array([a2_arr])
for i in range(steps_no):
temp =a1_arr
(t,a1_arr) = rk4(deriv_a1,a1_arr,a2_arr,t,h)
t -=h
(t,a2_arr) = rk4(deriv_a2,a2_arr,temp,t,h)
time_arr2 = np.append(time_arr, t)
func_array1 = np.vstack((func_array1,np.array([a1_arr])))
func_array2 = np.vstack((func_array2,np.array([a2_arr])))
# You can plot the pendulum's position or angular speed/acceleration as a function of time
[pendulum1_theta, pendulum1_angular_speed] = func_array1.transpose()
[pendulum2_theta, pendulum2_angular_speed] = func_array2.transpose()
pendulum1_x = r1*np.sin(pendulum1_theta)
pendulum1_y = - r1*np.cos(pendulum1_theta)
pendulum2_x = r2*np.sin(pendulum2_theta) + pendulum1_x
pendulum2_y = pendulum1_y - r2*np.cos(pendulum2_theta)
# Here I used the matplotlib template of the double pendulum animation to animate the plot
# =============================================================================
# fig = plt.figure()
# ax = fig.add_subplot(111, autoscale_on=False, xlim=(-3.9, 3.9), ylim=(-2, 2))
# ax.set_xlabel('$x-Axis$',fontsize=12)
# ax.set_ylabel('$y-Axis$',fontsize=12)
# ax.set_title('Double pendulum simulation (RK4 method)',fontsize=14)
# ax.grid()
#
# line, = ax.plot([], [], 'o-',lw=3,color='mediumvioletred',markersize=15)
# time_template = 'time = %0.1fs'
# time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
#
# ani = animation.FuncAnimation(fig, animate, np.arange(1, len(pendulum1_y)),
# interval=0, blit=True, init_func=init)
#
# #ax.scatter(pendulum2_x, pendulum2_y,s=5, color='black',alpha=0.5)
# #ani.save('double_pendulum_200.avi', fps=20, dpi =8)
# plt.show()
# =============================================================================
# =============================================================================
# #Save each frame separately
# =============================================================================
scatter_x = []
scatter_y = []
counter = 0
save_every_n_frames = 25
for j in range(int(len(pendulum1_y)/save_every_n_frames)):
i = j*save_every_n_frames
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-4,4), ylim=(-2.1, 2.1))
x = [0, pendulum1_x[i]]
y = [0, pendulum1_y[i]]
ax.plot(x,y,lw=3,color='mediumvioletred')
x1 = [pendulum1_x[i], pendulum2_x[i]]
y1 = [pendulum1_y[i], pendulum2_y[i]]
scatter_x.append(pendulum2_x[i])
scatter_y.append(pendulum2_y[i])
ax.plot(x1,y1,'o-',lw=3,color='mediumvioletred',markersize=15)
ax.scatter(scatter_x,scatter_y,lw=0.0005,color='black')
ax.set_xlabel('$x-Axis$',fontsize=12)
ax.set_ylabel('$y-Axis$',fontsize=12)
ax.set_title('Double pendulum simulation (RK4 method)',fontsize=14)
ax.grid()
fig.savefig(str(j)+'.png',dpi=600)
plt.show()
"""
# =============================================================================
# Please check the answers!!!
# =============================================================================
References:
#Implementing the RK4 method in Python
https://youtu.be/mqoqAovXxWA
by Prof. Niels Walet
#The formulas for the angular acceleration
https://www.myphysicslab.com/pendulum/double-pendulum-en.html
#Animating the double pendulum (N.B. the implementation used here is different)
https://matplotlib.org/3.2.1/gallery/animation/double_pendulum_sgskip.html
"""
|
python
|
from __future__ import annotations
from datetime import date
from typing import (
Literal,
Optional,
Sequence,
)
from pydantic.fields import Field
from pydantic.types import StrictBool
from ..api import (
BodyParams,
EndpointData,
Methods,
WrApiQueryParams,
)
from ..types_.endpoint import BaseEndpoint
from ..types_.enums import BillingType
from ..types_.inputs import DateRange, TimelogOptionalFields
from ..types_.scalar import (
ContactId,
FolderId,
TaskId,
TimelogCategoryId,
TimelogId,
)
class _BaseTimelogs(BaseEndpoint):
created_date: Optional[DateRange]
updated_date: Optional[DateRange]
tracked_date: Optional[DateRange]
me: Optional[StrictBool]
descendants: Optional[StrictBool]
sub_tasks: Optional[StrictBool]
plain_text: Optional[StrictBool]
timelog_categories: Optional[Sequence[TimelogCategoryId]]
billing_types: Optional[Sequence[BillingType]]
fields_: Optional[Sequence[Literal[TimelogOptionalFields.BILLING_TYPE]]] = Field(None, alias="fields")
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method="GET",
url=self._url,
query_params=self._query_params,
)
@property
def _url(self) -> str:
raise NotImplementedError()
@property
def _query_params(self) -> WrApiQueryParams:
params = WrApiQueryParams()
if self.created_date:
params["createdDate"] = self._convert_input(self.created_date)
if self.updated_date:
params["updatedDate"] = self._convert_input(self.updated_date)
if self.tracked_date:
params[" trackedDate"] = self._convert_input(self.tracked_date)
if self.me is not None:
params["me"] = self._convert_bool(self.me)
if self.descendants is not None:
params["descendants"] = self._convert_bool(self.descendants)
if self.sub_tasks is not None:
params["subTasks"] = self._convert_bool(self.sub_tasks)
if self.plain_text is not None:
params["plainText"] = self._convert_bool(self.plain_text)
if self.timelog_categories is not None:
params["timelogCategories"] = self._convert_seq(self.timelog_categories)
if self.billing_types is not None:
params["billingTypes"] = self._convert_seq(self.billing_types)
if self.fields_:
params["fields"] = self._convert_seq(self.fields_)
return params
class Timelogs(_BaseTimelogs):
@property
def _url(self) -> str:
return "/timelogs"
class ContactTimelogs(_BaseTimelogs):
contact_id: ContactId
@property
def _url(self) -> str:
return f"/contacts/{self.contact_id}/timelogs"
class FolderTimelogs(_BaseTimelogs):
folder_id: FolderId
@property
def _url(self) -> str:
return f"/folders/{self.folder_id}/timelogs"
class TaskTimelogs(_BaseTimelogs):
task_id: TaskId
@property
def _url(self) -> str:
return f"/tasks/{self.task_id}/timelogs"
class TimelogCategoryTimelogs(_BaseTimelogs):
timelog_category_id: TimelogCategoryId
@property
def _url(self) -> str:
return f"/timelog_categories/{self.timelog_category_id}/timelogs"
class TimelogsById(BaseEndpoint):
timelog_ids: Sequence[TimelogId] = Field(..., max_length=100)
plain_text: Optional[StrictBool]
fields_: Optional[Sequence[Literal[TimelogOptionalFields.BILLING_TYPE]]] = Field(None, alias="fields")
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method="GET",
url=f"/timelogs/{','.join(self.timelog_ids)}",
query_params=self._query_params,
)
@property
def _query_params(self) -> WrApiQueryParams:
params = WrApiQueryParams()
if self.plain_text is not None:
params["plainText"] = self._convert_bool(self.plain_text)
if self.fields_:
params["fields"] = self._convert_seq(self.fields_)
return params
class _CreateOrModifyTimelog(BaseEndpoint):
plain_text: Optional[StrictBool]
category_id: Optional[TimelogCategoryId]
fields_: Optional[Sequence[Literal[TimelogOptionalFields.BILLING_TYPE]]] = Field(None, alias="fields")
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method=self._method,
url=self._url,
body_params=self._body_params,
)
@property
def _url(self) -> str:
raise NotImplementedError()
@property
def _method(self) -> Methods:
raise NotImplementedError()
@property
def _body_params(self) -> BodyParams:
params = {}
if self.plain_text is not None:
params["plainText"] = self._convert_bool(self.plain_text)
if self.category_id is not None:
params["categoryId"] = self.category_id
if self.fields_:
params["fields"] = self._convert_seq(self.fields_)
return params
class CreateTimelog(_CreateOrModifyTimelog):
task_id: TaskId
comment: str
hours: int
tracked_date: date
@property
def _url(self) -> str:
return f"/tasks/{self.task_id}/timelogs"
@property
def _method(self) -> Methods:
return "POST"
@property
def _body_params(self) -> BodyParams:
params = super()._body_params
return {
**params,
**{"comment": self.comment, "hours": str(self.hours), "trackedDate": self.tracked_date.isoformat()},
}
class ModifyTimelog(_CreateOrModifyTimelog):
timelog_id: TimelogId
comment: Optional[str]
hours: Optional[int]
tracked_date: Optional[date]
@property
def _url(self) -> str:
return f"/timelogs/{self.timelog_id}"
@property
def _method(self) -> Methods:
return "PUT"
@property
def _body_params(self) -> BodyParams:
params = super()._body_params
if self.comment is not None:
params["comment"] = self.comment
if self.hours is not None:
params["hours"] = str(self.hours)
if self.tracked_date:
params["trackedDate"] = self.tracked_date.isoformat()
return params
class DeleteTimelog(BaseEndpoint):
timelog_id: TimelogId
@property
def endpoint_data(self) -> EndpointData:
return EndpointData(
method="DELETE",
url=f"/timelogs/{self.timelog_id}",
)
|
python
|
import datetime as _datetime
import os
import random
import string
import inflect
import six
from . import mock_random
inflectify = inflect.engine()
def _slugify(string):
"""
This is not as good as a proper slugification function, but the input space is limited
>>> _slugify("beets")
'beets'
>>> _slugify("Toaster Strudel")
'toaster-strudel'
Here's why: It handles very little. It doesn't handle esoteric whitespace or symbols:
>>> _slugify("Hat\\nBasket- of justice and some @#*(! symbols")
'hat-basket--of-justice-and-some-@#*(!-symbols'
"""
return string.replace(" ", "-").replace("\n", "-").replace(".", "").replace(",", "").lower()
people = [
"I",
"You",
"Nobody",
"The government",
"Everybody",
"The illuminati",
"God himself",
"The President of the United States",
"The world",
"The United Nations",
"The Oakland Raiders",
"Your dad",
"Your mom",
"The band 'Queen'",
"Customs & Immigration"
]
titles = [
'captain',
'lieutenant',
'leftenant',
'colonel',
'general',
'major',
'sir',
'sensei',
'lord',
'duke',
'president',
'master',
'mister',
'miss',
'lady',
'queen',
'king',
'doctor',
'monsieur',
'madame',
'senor',
'senorita',
'lord commander',
'commodore',
'emperor',
'super-emperor',
'madam',
'dame',
'professor',
'father',
'brother',
'sister',
'reverend',
]
streets = [
'street',
'boulevard',
'drive',
'block',
'place',
'boardwalk',
]
countries = [
'testonia',
'testasia',
'arztotzka',
'mordor',
'xanth',
'stankonia',
'strongbadia',
'westeros',
'qarth',
'gallifrey',
'tatooine',
'cybertron',
'aiur',
'lordaeron',
'yemen',
]
adjectives = [
'heroic',
'magnificent',
'mighty',
'amazing',
'wonderful',
'fantastic',
'incredible',
'spectacular',
'tremendous',
'throbbing',
'enormous',
'terrific',
'wondrous',
'spectacular',
'big',
'tiny',
'small',
'mighty',
'musky',
'sky',
'transparent',
'opaque',
'light',
'dark',
'sassy',
'scary',
'extraneous',
'huge',
'aqua',
'aqua',
'marine',
'azure',
'beige',
'black',
'almond',
'blue',
'brown',
'chartreuse',
'coral',
'corn',
'flower',
'crimson',
'cyan',
'navy',
'golden',
'rod',
'gray',
'grey',
'green',
'khaki',
'magenta',
'olive',
'salmon',
'slate',
'turquoise',
'violet',
'pink',
'brick',
'white',
'golden',
'honeydew',
'indigo',
'ivory',
'lavender',
'lemon',
'chiffon',
'purple',
'orchid',
'linen',
'rose',
'orange',
'pale',
'sandy',
'sea',
'shell',
'silver',
'tan',
'teal',
'thistle',
'violet',
'plaid',
'polka',
'dot',
'paisley',
'iron',
'bronze',
'stone',
'birch',
'cedar',
'cherry',
'sandal',
'pine',
'fir',
'yew',
'hem',
'lock',
'spruce',
'chest',
'box',
'butter',
'nut',
'camphor',
'elm',
'oak',
'huckle',
'berry',
'wood'
'maple',
'poplar',
'teak',
'beech',
'nutmeg',
'willow',
'cinnamon',
'spice',
'basil',
'cardamom',
'clove',
'garlic',
'juniper',
'rum',
'lime',
'capable',
'heavy',
'fast',
'slow',
'charming',
'noticeable',
'sly',
'slippery',
'sluggish',
'casual',
'cautious',
'cement',
'evil',
'banana',
'good',
'neutral',
'apple',
'pear',
'winter',
'spring',
'fall',
'autumn',
'summer',
'garbage',
'imposing',
'correct',
'iced',
'handed',
'salty',
'coffee',
'cheese',
'floppy',
'popular',
'misty',
'soulful',
'boaty',
'gassy',
'spectacular',
'sleepy',
'laudable',
'comfortable',
'soft',
'dicey',
'memorable',
'patterned',
'greasy',
'elongated',
'long',
'collapsible',
'mysterious',
'expandible',
'delicious',
'edible',
'scattered',
'impenetrable',
'sexy',
'curvaceous',
'avoidable',
'tractable',
'fussy',
'touchable',
'touchy',
'scandalous',
'murky',
'sloshing',
'damp',
'chubby',
]
containers = [
'bucket',
'bale',
'cluster',
'armload',
'group',
'container',
'box',
'bunch',
'bag',
'tub',
'tote',
'wad',
]
directions = [
"west",
"east",
"north",
"south",
"central",
]
city_suffixes = [
"ford",
"berg",
"shire",
"town",
"hall",
" city",
"sound",
"ton",
]
tlds = [
'.xyz',
'.blue',
'.org',
'.com',
'.net',
'.link',
'.click',
'.wedding',
'.sexy',
'.red',
'.black',
'.pics'
]
nouns = [
'onion',
'chimp',
'blister',
'poop',
'britches',
'mystery',
'boat'
'bench',
'secret',
'mouse',
'house',
'butt',
'hunter',
'fisher',
'bean',
'harvest',
'mixer',
'hand',
'finger',
'nose',
'eye',
'belly',
'jean',
'plan',
'disk',
'horse',
'staple',
'face',
'arm',
'cheek',
'monkey',
'shin',
'button',
'byte',
'cabinet',
'canyon',
'dance',
'crayon',
'sausage',
'meat',
'wad',
'napkin',
'device',
'cape',
'chair',
'person',
'burger',
'ham',
'place',
'beef',
'kitten',
'puppy',
'book',
'clamp',
'cloud',
'code',
'coast',
'coin',
'concern',
'space',
'key',
'bucket',
'object',
'heart',
'stapler',
'mug',
'bottle',
'cable',
'note',
'lamp',
'shelf',
'blanket',
'dong',
'board',
'issue',
'job',
'knife',
'thing',
'phone',
'sweater',
'pant',
'boot',
'sock',
'socks',
'hat',
'ring',
'dong',
'wang',
'wrap',
'holder',
'pen',
'pencil',
'bag',
'potato',
'sword',
'shield',
'spear',
'staff',
'shaft',
'slab',
'grub',
'song',
'axe',
'boat',
'armour',
'lamp',
'club',
'cage',
'hole',
'ass',
'chump',
'jerk',
'foot',
'spud',
]
verbs = [
'jump',
'twirl',
'spin',
'smell',
'slap',
'smack',
'poke',
'prod',
'drop',
'punch',
'grab',
'throw',
'slide',
'dunk',
'braise',
'scatter',
'slide',
'dice',
'hurl',
'buy',
'toast',
'align',
'sell',
'move',
'shoop',
'trade',
'steal',
'flip',
'blast',
'clean',
'hide',
'pinch',
'grasp',
'palm',
'examine',
'taste',
'ingest',
'swallow',
'snort',
'juggle',
'lift',
'eat',
'quaff',
'chug',
'fear',
'assemble',
]
firstnames = [
'testy',
'carl',
'agatha',
'agnes',
'carol',
'harry',
'maya',
'judy',
'mike',
'albert',
'cornelius',
'tim',
'mary',
'peter',
'kiko',
'wilhelm',
'kimmy',
'steve',
'jennifer',
'frank',
'pierre',
'george',
'aya',
'thiago',
'rodrigo',
'aasif',
'mohammed',
'daniel',
'liam',
'jack',
'agustin',
'santiago',
'noah',
'sofia',
'olivia',
'madison',
'chloe',
'camilla',
'carla',
'gary',
'hiroto',
'rasmus',
'charlie',
'miguel',
'alexander',
'youssef',
'emma',
'sara',
'amelia',
'tiffany',
'arnold',
'ronald',
'hogan',
'doug',
'pete',
'jim',
'james',
'mandy',
'andy',
'cole',
'francis',
'david',
'margaret',
'tracy',
'jonathan',
'daniel',
'heather',
'travis',
'courteney',
'yang',
'vivian',
'ryan',
'phil',
'shana',
'allen',
'karen',
'henry',
'graham',
'jesse',
'shirley',
'rafa',
'dylan',
'javier',
'ashley',
'drew',
'tomas',
'taylor',
'matt',
'shigeru',
'shayla',
'stephanie',
'oliver',
'ron',
'jason',
'seth',
'ronald',
'miloslav',
'walter',
]
def slugify_argument(func):
"""
Wraps a function that returns a string, adding the 'slugify' argument.
>>> slugified_fn = slugify_argument(lambda *args, **kwargs: "YOU ARE A NICE LADY")
>>> slugified_fn()
'YOU ARE A NICE LADY'
>>> slugified_fn(slugify=True)
'you-are-a-nice-lady'
"""
@six.wraps(func)
def wrapped(*args, **kwargs):
if "slugify" in kwargs and kwargs['slugify']:
return _slugify(func(*args, **kwargs))
else:
return func(*args, **kwargs)
return wrapped
def capitalize_argument(func):
"""
Wraps a function that returns a string, adding the 'capitalize' argument.
>>> capsified_fn = capitalize_argument(lambda *args, **kwargs: "what in the beeswax is this?")
>>> capsified_fn()
'what in the beeswax is this?'
>>> capsified_fn(capitalize=True)
'What In The Beeswax Is This?'
"""
@six.wraps(func)
def wrapped(*args, **kwargs):
if "capitalize" in kwargs and kwargs['capitalize']:
return func(*args, **kwargs).title()
else:
return func(*args, **kwargs)
return wrapped
def datetime(past=True, random=random):
"""
Returns a random datetime from the past... or the future!
>>> mock_random.seed(0)
>>> datetime(random=mock_random).isoformat()
'1950-02-03T03:04:05'
>>> datetime(random=mock_random, past=False).isoformat()
'2023-08-09T09:00:01'
"""
def year():
if past:
return random.choice(range(1950,2005))
else:
return _datetime.datetime.now().year + random.choice(range(1, 50))
def month():
return random.choice(range(1,12))
def day():
return random.choice(range(1,31))
def hour():
return random.choice(range(0,23))
def minute():
return random.choice(range(0,59))
def second():
return random.choice(range(0,59))
try:
return _datetime.datetime(year=year(),
month=month(),
day=day(),
hour=hour(),
minute=minute(),
second=second())
except ValueError:
return datetime(past=past)
@capitalize_argument
def letter(random=random, *args, **kwargs):
"""
Return a letter!
>>> mock_random.seed(0)
>>> letter(random=mock_random)
'a'
>>> letter(random=mock_random)
'b'
>>> letter(random=mock_random, capitalize=True)
'C'
"""
return random.choice(string.ascii_lowercase)
def number(random=random, *args, **kwargs):
"""
Return a number!
>>> number(random=mock_random)
0
"""
return random.randint(0,9)
@slugify_argument
@capitalize_argument
def title(random=random, *args, **kwargs):
"""
Return a title!
>>> mock_random.seed(0)
>>> title(random=mock_random)
'captain'
>>> title(random=mock_random, capitalize=True)
'Lieutenant'
>>> title(random=mock_random, slugify=True)
'leftenant'
"""
return random.choice(titles)
@slugify_argument
@capitalize_argument
def adjective(random=random, *args, **kwargs):
"""
Return an adjective!
>>> mock_random.seed(0)
>>> adjective(random=mock_random)
'heroic'
>>> adjective(random=mock_random, capitalize=True)
'Magnificent'
>>> adjective(random=mock_random, slugify=True)
'mighty'
"""
return random.choice(adjectives)
@slugify_argument
@capitalize_argument
def noun(random=random, *args, **kwargs):
"""
Return a noun!
>>> mock_random.seed(0)
>>> noun(random=mock_random)
'onion'
>>> noun(random=mock_random, capitalize=True)
'Chimp'
>>> noun(random=mock_random, slugify=True)
'blister'
"""
return random.choice(nouns)
@slugify_argument
@capitalize_argument
def a_noun(random=random, *args, **kwargs):
"""
Return a noun, but with an 'a' in front of it. Or an 'an', depending!
>>> mock_random.seed(0)
>>> a_noun(random=mock_random)
'an onion'
>>> a_noun(random=mock_random, capitalize=True)
'A Chimp'
>>> a_noun(random=mock_random, slugify=True)
'a-blister'
"""
return inflectify.a(noun(random=random))
@slugify_argument
@capitalize_argument
def plural(random=random, *args, **kwargs):
"""
Return a plural noun.
>>> mock_random.seed(0)
>>> plural(random=mock_random)
'onions'
>>> plural(random=mock_random, capitalize=True)
'Chimps'
>>> plural(random=mock_random, slugify=True)
'blisters'
"""
return inflectify.plural(random.choice(nouns))
@slugify_argument
@capitalize_argument
def verb(random=random, *args, **kwargs):
"""
Return a verb!
>>> mock_random.seed(0)
>>> verb(random=mock_random)
'jump'
>>> verb(random=mock_random, capitalize=True)
'Twirl'
>>> verb(random=mock_random, slugify=True)
'spin'
"""
return random.choice(verbs)
@slugify_argument
@capitalize_argument
def firstname(random=random, *args, **kwargs):
"""
Return a first name!
>>> mock_random.seed(0)
>>> firstname(random=mock_random)
'testy'
>>> firstname(random=mock_random, capitalize=True)
'Carl'
>>> firstname(random=mock_random, slugify=True)
'agatha'
"""
return random.choice(firstnames)
@slugify_argument
@capitalize_argument
def lastname(random=random, *args, **kwargs):
"""
Return a first name!
>>> mock_random.seed(0)
>>> lastname(random=mock_random)
'chimp'
>>> mock_random.seed(1)
>>> lastname(random=mock_random, capitalize=True)
'Wonderful'
>>> mock_random.seed(2)
>>> lastname(random=mock_random, slugify=True)
'poopbritches'
>>> [lastname(random=mock_random) for x in range(0,10)]
['wonderful', 'chimp', 'onionmighty', 'magnificentslap', 'smellmouse', 'secretbale', 'boatbenchtwirl', 'spectacularmice', 'incrediblebritches', 'poopbritches']
"""
types = [
"{noun}",
"{adjective}",
"{noun}{second_noun}",
"{adjective}{noun}",
"{adjective}{plural}",
"{noun}{verb}",
"{noun}{container}",
"{verb}{noun}",
"{adjective}{verb}",
"{noun}{adjective}",
"{noun}{firstname}",
"{noun}{title}",
"{adjective}{title}",
"{adjective}-{noun}",
"{adjective}-{plural}"
]
return random.choice(types).format(noun=noun(random=random),
second_noun=noun(random=random),
adjective=adjective(random=random),
plural=plural(random=random),
container=container(random=random),
verb=verb(random=random),
firstname=firstname(random=random),
title=title(random=random))
@slugify_argument
@capitalize_argument
def container(random=random, *args, **kwargs):
"""
Return a container!
>>> mock_random.seed(0)
>>> container(random=mock_random)
'bucket'
>>> container(random=mock_random, capitalize=True)
'Bale'
>>> container(random=mock_random, slugify=True)
'cluster'
"""
return random.choice(containers)
@slugify_argument
@capitalize_argument
def numberwang(random=random, *args, **kwargs):
"""
Return a number that is spelled out.
>>> numberwang(random=mock_random)
'two'
>>> numberwang(random=mock_random, capitalize=True)
'Two'
>>> numberwang(random=mock_random, slugify=True)
'two'
"""
n = random.randint(2, 150)
return inflectify.number_to_words(n)
@slugify_argument
@capitalize_argument
def direction(random=random, *args, **kwargs):
"""
Return a direction!
>>> mock_random.seed(0)
>>> direction(random=mock_random)
'west'
>>> direction(random=mock_random, capitalize=True)
'East'
>>> direction(random=mock_random, slugify=True)
'north'
"""
return random.choice(directions)
@slugify_argument
@capitalize_argument
def city_suffix(random=random, *args, **kwargs):
"""
Return a city suffix, like 'berg' or 'hall'.
>>> mock_random.seed(0)
>>> city_suffix(random=mock_random)
'ford'
>>> city_suffix(random=mock_random, capitalize=True)
'Berg'
>>> city_suffix(random=mock_random, slugify=True)
'shire'
"""
return random.choice(city_suffixes)
@slugify_argument
@capitalize_argument
def tld(random=random, *args, **kwargs):
"""
Return a direction!
>>> mock_random.seed(0)
>>> tld(random=mock_random)
'.xyz'
>>> tld(random=mock_random, capitalize=True)
'.Blue'
>>> tld(random=mock_random, slugify=True)
'org'
"""
return random.choice(tlds)
@slugify_argument
@capitalize_argument
def thing(random=random, *args, **kwargs):
"""
Return a ... thing.
>>> mock_random.seed(0)
>>> thing(random=mock_random)
'two secrets'
>>> mock_random.seed(1)
>>> thing(random=mock_random, capitalize=True)
'Mighty Poop'
>>> mock_random.seed(2)
>>> thing(random=mock_random, slugify=True)
'poop'
>>> mock_random.seed(4)
>>> thing(random=mock_random, slugify=True)
'two-chimps'
"""
def noun_or_adjective_noun():
if random.choice([True, False]):
return noun(random=random)
else:
return adjective(random=random) + " " + noun(random=random)
def plural_or_adjective_plural():
if random.choice([True, False]):
return plural(random=random)
else:
return adjective(random=random) + " " + plural(random=random)
def container_of_nouns():
return container(random=random) + " of " + plural_or_adjective_plural()
def number_of_plurals():
return numberwang(random=random) + " " + plural_or_adjective_plural()
if "an" in kwargs and kwargs['an']:
return random.choice([
inflectify.a(noun_or_adjective_noun()),
inflectify.a(container_of_nouns()),
number_of_plurals(),
])
else:
return random.choice([
noun_or_adjective_noun(),
container_of_nouns(),
number_of_plurals(),
])
@slugify_argument
def a_thing(random=random, *args, **kwargs):
"""
Return a ... thing.
>>> mock_random.seed(0)
>>> a_thing(random=mock_random)
'two secrets'
>>> mock_random.seed(1)
>>> a_thing(random=mock_random, capitalize=True)
'A Mighty Poop'
>>> mock_random.seed(2)
>>> a_thing(random=mock_random, slugify=True)
'a-poop'
>>> mock_random.seed(4)
>>> a_thing(random=mock_random, slugify=True)
'two-chimps'
"""
return thing(random=random, an=True, *args, **kwargs)
@slugify_argument
@capitalize_argument
def things(random=random, *args, **kwargs):
"""
Return a set of things.
>>> mock_random.seed(0)
>>> things(random=mock_random)
'two secrets, two secrets, and two secrets'
>>> mock_random.seed(1)
>>> things(random=mock_random, capitalize=True)
'A Mighty Poop, A Mighty Poop, And A Mighty Poop'
"""
return inflectify.join([a_thing(random=random), a_thing(random=random), a_thing(random=random)])
@slugify_argument
@capitalize_argument
def name(random=random, *args, **kwargs):
"""
Return someone's name
>>> mock_random.seed(0)
>>> name(random=mock_random)
'carl poopbritches'
>>> mock_random.seed(7)
>>> name(random=mock_random, capitalize=True)
'Duke Testy Wonderful'
"""
if random.choice([True, True, True, False]):
return firstname(random=random) + " " + lastname(random=random)
elif random.choice([True, False]):
return title(random=random) + " " + firstname(random=random) + " " + lastname(random=random)
else:
return title(random=random) + " " + lastname(random=random)
@slugify_argument
@capitalize_argument
def domain(random=random, *args, **kwargs):
"""
Return a domain
>>> mock_random.seed(0)
>>> domain(random=mock_random)
'onion.net'
>>> domain(random=mock_random)
'bag-of-heroic-chimps.sexy'
"""
words = random.choice([
noun(random=random),
thing(random=random),
adjective(random=random)+noun(random=random),
])
return _slugify(words)+tld(random=random)
def email(random=random, *args, **kwargs):
"""
Return an e-mail address
>>> mock_random.seed(0)
>>> email(random=mock_random)
'[email protected]'
>>> email(random=mock_random)
'[email protected]'
>>> email(random=mock_random, name="charles")
'[email protected]'
"""
if 'name' in kwargs and kwargs['name']:
words = kwargs['name']
else:
words = random.choice([
noun(random=random),
name(random=random),
name(random=random)+"+spam",
])
return _slugify(words)+"@"+domain(random=random)
def phone_number(random=random, *args, **kwargs):
"""
Return a phone number
>>> mock_random.seed(0)
>>> phone_number(random=mock_random)
'555-0000'
>>> phone_number(random=mock_random)
'1-604-555-0000'
>>> phone_number(random=mock_random)
'864-70-555-0000'
"""
return random.choice([
'555-{number}{other_number}{number}{other_number}',
'1-604-555-{number}{other_number}{number}{other_number}',
'864-70-555-{number}{other_number}{number}{other_number}',
'867-5309'
]).format(number=number(random=random),
other_number=number(random=random))
@slugify_argument
@capitalize_argument
def sentence(random=random, *args, **kwargs):
"""
Return a whole sentence
>>> mock_random.seed(0)
>>> sentence(random=mock_random)
"Agatha Incrediblebritches can't wait to smell two chimps in Boatbencheston."
>>> mock_random.seed(2)
>>> sentence(random=mock_random, slugify=True)
'blistersecret-studios-is-the-best-company-in-liveronion'
"""
if 'name' in kwargs and kwargs['name']:
nm = kwargs(name)
elif random.choice([True, False, False]):
nm = name(capitalize=True, random=random)
else:
nm = random.choice(people)
def type_one():
return "{name} will {verb} {thing}.".format(name=nm,
verb=verb(random=random),
thing=random.choice([a_thing(random=random),
things(random=random)]))
def type_two():
return "{city} is in {country}.".format(city=city(capitalize=True, random=random),
country=country(capitalize=True, random=random))
def type_three():
return "{name} can't wait to {verb} {thing} in {city}.".format(name=nm,
verb=verb(random=random),
thing=a_thing(random=random),
city=city(capitalize=True, random=random))
def type_four():
return "{name} will head to {company} to buy {thing}.".format(name=nm,
company=company(capitalize=True, random=random),
thing=a_thing(random=random))
def type_five():
return "{company} is the best company in {city}.".format(city=city(capitalize=True, random=random),
company=company(capitalize=True, random=random))
def type_six():
return "To get to {country}, you need to go to {city}, then drive {direction}.".format(
country=country(capitalize=True, random=random),
city=city(capitalize=True, random=random),
direction=direction(random=random))
def type_seven():
return "{name} needs {thing}, badly.".format(name=nm, thing=a_thing(random=random))
def type_eight():
return "{verb} {noun}!".format(verb=verb(capitalize=True, random=random), noun=noun(random=random))
return random.choice([type_one,
type_two,
type_three,
type_four,
type_five,
type_six,
type_seven,
type_eight])()
@slugify_argument
@capitalize_argument
def paragraph(random=random, length=10, *args, **kwargs):
"""
Produces a paragraph of text.
>>> mock_random.seed(0)
>>> paragraph(random=mock_random, length=2)
"Agatha Incrediblebritches can't wait to smell two chimps in Boatbencheston. Wonderfulsecretsound is in Gallifrey."
>>> mock_random.seed(2)
>>> paragraph(random=mock_random, length=2, slugify=True)
'blistersecret-studios-is-the-best-company-in-liveronion-wonderfulsecretsound-is-in-gallifrey'
"""
return " ".join([sentence(random=random) for x in range(0, length)])
def markdown(random=random, length=10, *args, **kwargs):
"""
Produces a bunch of markdown text.
>>> mock_random.seed(0)
>>> markdown(random=mock_random, length=2)
'Nobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.\\nNobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.'
"""
def title_sentence():
return "\n" + "#"*random.randint(1,5) + " " + sentence(capitalize=True, random=random)
def embellish(word):
return random.choice([word, word, word, "**"+word+"**", "_"+word+"_"])
def randomly_markdownify(string):
return " ".join([embellish(word) for word in string.split(" ")])
sentences = []
for i in range(0, length):
sentences.append(random.choice([
title_sentence(),
sentence(random=random),
sentence(random=random),
randomly_markdownify(sentence(random=random))
]))
return "\n".join(sentences)
@slugify_argument
@capitalize_argument
def gender(random=random, *args, **kwargs):
return "Awesome"
@slugify_argument
@capitalize_argument
def company(random=random, *args, **kwargs):
"""
Produce a company name
>>> mock_random.seed(0)
>>> company(random=mock_random)
'faculty of applied chimp'
>>> mock_random.seed(1)
>>> company(random=mock_random)
'blistersecret studios'
>>> mock_random.seed(2)
>>> company(random=mock_random)
'pooppooppoop studios'
>>> mock_random.seed(3)
>>> company(random=mock_random)
'britchesshop'
>>> mock_random.seed(4)
>>> company(random=mock_random, capitalize=True)
'Mystery Studies Department'
>>> mock_random.seed(5)
>>> company(random=mock_random, slugify=True)
'the-law-offices-of-magnificentslap-boatbench-and-smellmouse'
"""
return random.choice([
"faculty of applied {noun}",
"{noun}{second_noun} studios",
"{noun}{noun}{noun} studios",
"{noun}shop",
"{noun} studies department",
"the law offices of {lastname}, {noun}, and {other_lastname}",
"{country} ministry of {plural}",
"{city} municipal {noun} department",
"{city} plumbing",
"department of {noun} studies",
"{noun} management systems",
"{plural} r us",
"inter{verb}",
"the {noun} warehouse",
"integrated {noun} and {second_noun}",
"the {noun} and {second_noun} pub",
"e-cyber{verb}",
"{adjective}soft",
"{domain} Inc.",
"{thing} incorporated",
"{noun}co",
]).format(noun=noun(random=random),
plural=plural(random=random),
country=country(random=random),
city=city(random=random),
adjective=adjective(random=random),
lastname=lastname(random=random),
other_lastname=lastname(random=random),
domain=domain(random=random),
second_noun=noun(random=random),
verb=verb(random=random),
thing=thing(random=random))
@slugify_argument
@capitalize_argument
def country(random=random, *args, **kwargs):
"""
Produce a country name
>>> mock_random.seed(0)
>>> country(random=mock_random)
'testasia'
>>> country(random=mock_random, capitalize=True)
'West Xanth'
>>> country(random=mock_random, slugify=True)
'westeros'
"""
return random.choice([
"{country}",
"{direction} {country}"
]).format(country=random.choice(countries),
direction=direction(random=random))
@slugify_argument
@capitalize_argument
def city(random=random, *args, **kwargs):
"""
Produce a city name
>>> mock_random.seed(0)
>>> city(random=mock_random)
'east mysteryhall'
>>> city(random=mock_random, capitalize=True)
'Birmingchimp'
>>> city(random=mock_random, slugify=True)
'wonderfulsecretsound'
"""
return random.choice([
"{direction} {noun}{city_suffix}",
"{noun}{city_suffix}",
"{adjective}{noun}{city_suffix}",
"{plural}{city_suffix}",
"{adjective}{city_suffix}",
"liver{noun}",
"birming{noun}",
"{noun}{city_suffix} {direction}"
]).format(direction=direction(random=random),
adjective=adjective(random=random),
plural=plural(random=random),
city_suffix=city_suffix(random=random),
noun=noun(random=random))
@slugify_argument
@capitalize_argument
def postal_code(random=random, *args, **kwargs):
"""
Produce something that vaguely resembles a postal code
>>> mock_random.seed(0)
>>> postal_code(random=mock_random)
'b0b 0c0'
>>> postal_code(random=mock_random, capitalize=True)
'E0E 0F0'
>>> postal_code(random=mock_random, slugify=True)
'h0h-0i0'
"""
return random.choice([
"{letter}{number}{letter} {other_number}{other_letter}{other_number}",
"{number}{other_number}{number}{number}{other_number}",
"{number}{letter}{number}{other_number}{other_letter}"
]).format(
number=number(random=random),
other_number=number(random=random),
letter=letter(random=random),
other_letter=letter(random=random)
)
@slugify_argument
@capitalize_argument
def street(random=random, *args, **kwargs):
"""
Produce something that sounds like a street name
>>> mock_random.seed(0)
>>> street(random=mock_random)
'chimp place'
>>> street(random=mock_random, capitalize=True)
'Boatbench Block'
>>> mock_random.seed(3)
>>> street(random=mock_random, slugify=True)
'central-britches-boulevard'
"""
return random.choice([
"{noun} {street_type}",
"{adjective}{verb} {street_type}",
"{direction} {adjective}{verb} {street_type}",
"{direction} {noun} {street_type}",
"{direction} {lastname} {street_type}",
]).format(noun=noun(random=random),
lastname=lastname(random=random),
direction=direction(random=random),
adjective=adjective(random=random),
verb=verb(random=random),
street_type=random.choice(streets))
@slugify_argument
@capitalize_argument
def address(random=random, *args, **kwargs):
"""
A street name plus a number!
>>> mock_random.seed(0)
>>> address(random=mock_random)
'0000 amazingslap boardwalk'
>>> address(random=mock_random, capitalize=True)
'0000 South Throbbingjump Boulevard'
>>> address(random=mock_random, slugify=True)
'two-central-britches-boulevard'
"""
return random.choice([
"{number}{other_number}{number}{other_number} {street}",
"{number}{other_number} {street}",
"{numberwang} {street}",
"apt {numberwang}, {number}{other_number}{other_number} {street}",
"apt {number}{other_number}{number}, {numberwang} {street}",
"po box {number}{other_number}{number}{other_number}",
]).format(number=number(random=random),
other_number=number(random=random),
numberwang=numberwang(random=random),
street=street(random=random))
def image(random=random, width=800, height=600, https=False, *args, **kwargs):
"""
Generate the address of a placeholder image.
>>> mock_random.seed(0)
>>> image(random=mock_random)
'http://dummyimage.com/800x600/292929/e3e3e3&text=mighty poop'
>>> image(random=mock_random, width=60, height=60)
'http://placekitten.com/60/60'
>>> image(random=mock_random, width=1920, height=1080)
'http://dummyimage.com/1920x1080/292929/e3e3e3&text=To get to Westeros, you need to go to Britchestown, then drive west.'
>>> image(random=mock_random, https=True, width=1920, height=1080)
'https://dummyimage.com/1920x1080/292929/e3e3e3&text=East Mysteryhall is in Westeros.'
"""
target_fn = noun
if width+height > 300:
target_fn = thing
if width+height > 2000:
target_fn = sentence
s = ""
if https:
s = "s"
if random.choice([True, False]):
return "http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}".format(
s=s,
width=width,
height=height,
text=target_fn(random=random))
else:
return "http{s}://placekitten.com/{width}/{height}".format(s=s, width=width, height=height)
|
python
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import reactor
from twisted.web import proxy, server
site = server.Site(proxy.ReverseProxyResource('www.yahoo.com', 80, ''))
reactor.listenTCP(8080, site)
reactor.run()
|
python
|
from torch.optim.lr_scheduler import LambdaLR
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
""" Create a schedule with a learning rate that decreases linearly after
linearly increasing during a warmup period.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def jaccard(str1, str2):
a = set(str1.lower().split())
b = set(str2.lower().split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def calculate_jaccard_score(
original_tweet,
target_string,
sentiment_val,
idx_start,
idx_end,
offsets,
verbose=False):
if idx_end < idx_start:
idx_end = idx_start
filtered_output = ""
for ix in range(idx_start, idx_end + 1):
filtered_output += original_tweet[offsets[ix][0]: offsets[ix][1]]
if (ix+1) < len(offsets) and offsets[ix][1] < offsets[ix+1][0]:
filtered_output += " "
if sentiment_val == "neutral" or len(original_tweet.split()) < 2:
filtered_output = original_tweet
jac1 = utils.jaccard(target_string.strip(), filtered_output.strip())
st1 = filtered_output
if idx_end < idx_start:
idx_start = idx_end
filtered_output = ""
for ix in range(idx_start, idx_end + 1):
filtered_output += original_tweet[offsets[ix][0]: offsets[ix][1]]
if (ix+1) < len(offsets) and offsets[ix][1] < offsets[ix+1][0]:
filtered_output += " "
if sentiment_val == "neutral" or len(original_tweet.split()) < 2:
filtered_output = original_tweet
jac2 = utils.jaccard(target_string.strip(), filtered_output.strip())
st2 = filtered_output
if jac1 > jac2:
jac = jac1
filtered_output = st1
else:
jac = jac2
filtered_output = st2
return jac, filtered_output
|
python
|
# Copyright (c) 2019-2021, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import pytest
import vector
ak = pytest.importorskip("awkward")
numba = pytest.importorskip("numba")
pytest.importorskip("vector._backends.numba_object")
@pytest.mark.numba
def test():
@numba.njit
def extract(x):
return x[2][0]
array = vector.Array([[{"x": 1, "y": 2}], [], [{"x": 3, "y": 4}, {"x": 5, "y": 6}]])
out = extract(array)
assert isinstance(out, vector._backends.object_.VectorObject2D)
assert out.x == pytest.approx(3)
assert out.y == pytest.approx(4)
array = vector.Array(
[[{"x": 1, "y": 2, "z": 3, "E": 4}], [], [{"x": 5, "y": 6, "z": 7, "E": 15}]]
)
out = extract(array)
assert isinstance(out, vector._backends.object_.MomentumObject4D)
assert out.x == pytest.approx(5)
assert out.y == pytest.approx(6)
assert out.z == pytest.approx(7)
assert out.t == pytest.approx(15)
|
python
|
import socket #for sockets
import sys #for exit
# create dgram udp socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
print('Failed to create socket')
sys.exit()
HOST = '' # Symbolic name meaning all available interfaces
PORT = 6000 # Arbitrary non-privileged port
s.bind((HOST, PORT))
#while(1) :
#msg = input('Enter message to send : ')
try :
#Set the whole string
msg = b'\xc0\xa8\x01\x0fHDLMIRACLE\xaa\xaa\x0f\x01\x17\x00\x95\x001\x01J\x01d\x00\x03\xd7\xd1'
s.sendto(msg, (HOST, PORT))
print(msg)
# receive data from client (data, addr)
#d = s.recvfrom(1024)
#reply = d[0]
#addr = d[1]
#print('Server reply : ' + reply)
except socket.error as msg:
print('Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
|
python
|
# Copyright (c) 2018 Sony Pictures Imageworks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from socket import gethostname
import Utils
from Manifest import QtCore, QtWidgets, opencue, os
class LocalBookingWidget(QtWidgets.QWidget):
"""
A widget for creating opencue RenderParitions, otherwise know
as local core booking.
"""
hosts_changed = QtCore.Signal()
def __init__(self, target, parent=None):
QtWidgets.QWidget.__init__(self, parent)
# Can either be a opencue job, layer, or frame.
self.__target = target
self.__parent = parent
self.jobName = self.getTargetJobName()
QtWidgets.QVBoxLayout(self)
layout = QtWidgets.QGridLayout()
self.__select_host = QtWidgets.QComboBox(self)
self.__lba_group = QtWidgets.QGroupBox("Settings", self)
try:
owner = opencue.api.getOwner(os.environ["USER"])
for host in owner.getHosts():
if host.data.lockState != opencue.api.host_pb2.OPEN:
self.__select_host.addItem(host.data.name)
except Exception, e:
pass
self.__deed_button = None
self.__msg_widget = None
if self.__select_host.count() == 0:
self.__deed_button = QtWidgets.QPushButton("Deed This Machine", self)
msg = "You have not deeded any hosts or they are not NIMBY locked."
self.__msg_widget = QtWidgets.QLabel(msg, self)
self.layout().addWidget(self.__msg_widget)
self.layout().addWidget(self.__deed_button)
self.__deed_button.pressed.connect(self.deedLocalhost)
self.__lba_group.setDisabled(True)
self.__text_target = QtWidgets.QLabel(self.__target.data.name, self)
self.__num_threads = QtWidgets.QSpinBox(self)
self.__num_threads.setValue(1);
self.__num_cores = QtWidgets.QLineEdit(self)
self.__num_cores.setText("1");
self.__num_cores.setReadOnly(True)
self.__num_frames = QtWidgets.QSpinBox(self)
self.__num_frames.setValue(1)
self.__frame_warn = QtWidgets.QLabel(self)
self.__num_mem = QtWidgets.QSlider(self)
self.__num_mem.setValue(4);
self.__num_mem.setOrientation(QtCore.Qt.Horizontal)
self.__num_mem.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.__num_mem.setTickInterval(1)
self.__text_num_mem = QtWidgets.QSpinBox(self)
self.__text_num_mem.setValue(4)
self.__text_num_mem.setSuffix("GB")
#
# Next layout is if the deed is in use.
#
layout2 = QtWidgets.QGridLayout()
self.__run_group = QtWidgets.QGroupBox("Deed Currently in Use", self)
self.__run_cores = QtWidgets.QSpinBox(self)
self.__run_mem = QtWidgets.QSlider(self)
self.__run_mem.setValue(4)
self.__run_mem.setOrientation(QtCore.Qt.Horizontal)
self.__run_mem.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.__run_mem.setTickInterval(1)
self.__text_run_mem = QtWidgets.QSpinBox(self)
self.__text_run_mem.setValue(4)
self.__text_run_mem.setSuffix("GB")
self.__btn_clear = QtWidgets.QPushButton("Clear", self)
#
# Setup the signals.
#
self.__btn_clear.pressed.connect(self.clearCurrentHost)
self.__select_host.activated.connect(self.__host_changed)
self.__num_mem.valueChanged.connect(self.__text_num_mem.setValue)
self.__text_num_mem.valueChanged.connect(self.__num_mem.setValue)
self.__num_threads.valueChanged.connect(self.__calculateCores)
self.__num_frames.valueChanged.connect(self.__calculateCores)
self.__run_mem.valueChanged.connect(self.__text_run_mem.setValue)
self.__text_run_mem.valueChanged.connect(self.__run_mem.setValue)
self.layout().addWidget(QtWidgets.QLabel("Target Host:"))
self.layout().addWidget(self.__select_host)
layout.addWidget(QtWidgets.QLabel("Target:"), 1, 0)
layout.addWidget(self.__text_target, 1, 1, 1, 3)
layout.addWidget(QtWidgets.QLabel("Parallel Frames:"), 2, 0)
layout.addWidget(self.__num_frames, 2, 1)
layout.addWidget(QtWidgets.QLabel("Threads: "), 2, 2)
layout.addWidget(self.__num_threads, 2, 3)
layout.addWidget(QtWidgets.QLabel("Cores: "), 3, 0)
layout.addWidget(self.__num_cores, 3, 1)
layout.addWidget(self.__frame_warn, 3, 2, 1, 2)
layout.addWidget(QtWidgets.QLabel("Memory (GB): "), 4, 0)
layout.addWidget(self.__num_mem, 4, 1, 1, 2)
layout.addWidget(self.__text_num_mem, 4, 3)
#
# Layout 2
#
layout2.addWidget(QtWidgets.QLabel("Running Cores:"), 1, 0)
layout2.addWidget(self.__run_cores, 1, 1)
layout2.addWidget(QtWidgets.QLabel("Memory (GB): "), 3, 0)
layout2.addWidget(self.__run_mem, 3, 1, 1, 2)
layout2.addWidget(self.__text_run_mem, 3, 3)
layout2.addWidget(self.__btn_clear, 4, 0)
#
# Set up overall layouts
#
self.__run_group.setLayout(layout2)
self.__lba_group.setLayout(layout)
self.__stack = QtWidgets.QStackedLayout()
self.__stack.addWidget(self.__lba_group)
self.__stack.addWidget(self.__run_group)
self.layout().addLayout(self.__stack)
## Set initial values.
self.__host_changed(self.__select_host.currentText())
self.resize(400, 400)
def getTargetJobName(self):
if Utils.isJob(self.__target):
return self.__target.data.name
elif Utils.isLayer(self.__target):
return self.__target.name
elif Utils.isFrame(self.__target):
return self.__parent.getJob().data.name
else:
return ''
def hostAvailable(self):
return self.__select_host.count() > 0
def __host_changed(self, hostname):
hostname = str(hostname)
if not hostname:
return
host = opencue.api.findHost(str(hostname))
try:
rp = [r for r in host.getRenderPartitions() if r.job == self.jobName]
if rp:
rp = rp[0]
self.__stack.setCurrentIndex(1)
self.__btn_clear.setText("Clear")
self.__btn_clear.setDisabled(False)
self.__run_cores.setRange(1, int(host.data.idleCores) + rp.maxCores / 100)
self.__run_cores.setValue(rp.maxCores / 100)
self.__run_mem.setRange(1, int(host.data.totalMemory / 1024 / 1024))
self.__run_mem.setValue(int(rp.maxMemory / 1024 / 1024))
else:
self.__stack.setCurrentIndex(0)
self.__num_frames.setRange(1, host.data.idleCores)
self.__num_threads.setRange(1, host.data.idleCores)
self.__num_mem.setRange(1, int(host.data.totalMemory / 1024 / 1024))
self.__num_threads.setRange(1, host.data.idleCores)
except Exception, e:
print "Failed to get RenderParition information, %s" % e
def deedLocalhost(self):
show_name = os.environ.get("SHOW", "pipe")
try:
_show = opencue.api.findShow(show_name)
except Exception, e:
msg = QtWidgets.QMessageBox(self)
msg.setText("Error %s, please setshot and rerun cuetopia", e)
msg.exec_()
return
user = os.environ["USER"]
try:
owner = opencue.api.getOwner(user)
except opencue.EntityNotFoundException, e:
# Owner does not exist
owner = _show.createOwner(user)
hostname = gethostname()
try:
host = opencue.api.findHost(hostname.rsplit(".",2)[0])
owner.takeOwnership(host.data.name)
self.__select_host.addItem(host.data.name)
self.__lba_group.setDisabled(False)
if self.__deed_button:
self.__deed_button.setVisible(False)
if self.__msg_widget:
self.__msg_widget.setVisible(False)
self.__deed_button = None
self.__msg_widget = None
self.hosts_changed.emit()
except Exception, e:
msg = QtWidgets.QMessageBox(self)
msg.setText("Unable to determine your machine's hostname. " +
"It is not setup properly for local booking")
msg.exec_()
def __calculateCores(self, ignore):
frames = self.__num_frames.value()
threads = self.__num_threads.value()
self.__num_cores.setText(str(frames * threads))
if self.__hasError():
self.__frame_warn.setText("Invalid thread ratio")
else:
self.__frame_warn.setText("")
def __hasError(self):
cores = int(self.__num_cores.text())
frames = self.__num_frames.value()
threads = self.__num_threads.value()
if frames * threads > self.__num_frames.maximum():
return True
elif frames == 0:
return True
elif cores % threads > 0:
return True
elif threads > cores:
return True
return False
def clearCurrentHost(self):
hostname = str(self.__select_host.currentText())
if not hostname:
return
try:
self.__btn_clear.setText("Clearing....")
self.__btn_clear.setDisabled(True)
host = opencue.api.findHost(str(hostname))
rp = [r for r in host.getRenderPartitions() if r.job == self.jobName]
if rp:
rp = rp[0]
rp.delete()
## Wait for hosts to clear out, then switch
## back to the booking widget
for i in range(0, 10):
try:
rp = [r for r in host.getRenderPartitions() if r.job == self.jobName][0]
time.sleep(1)
except:
break
self.__host_changed(hostname)
except Exception,e:
print "Error clearing host: %s" % e
def bookCurrentHost(self):
if self.__hasError():
return
host = opencue.api.findHost(str(self.__select_host.currentText()))
rp = [r for r in host.getRenderPartitions() if r.job == self.jobName]
if rp:
# A render partition already exists on this hosts and user is modifying
rp[0].setMaxResources(int(self.__run_cores.value() * 100),
int(self.__run_mem.value()) * 1024 * 1024,
0)
else:
self.__target.addRenderPartition(str(self.__select_host.currentText()),
int(self.__num_threads.value()),
int(self.__num_cores.text()),
int(self.__num_mem.value() * 1048576),
0)
class LocalBookingDialog(QtWidgets.QDialog):
"""
A dialog to wrap a LocalBookingWidget. Provides action buttons.
"""
def __init__(self, target, parent=None):
QtWidgets.QDialog.__init__(self, parent)
QtWidgets.QVBoxLayout(self)
btn_layout = QtWidgets.QHBoxLayout()
self.setWindowTitle("Assign Local Cores")
self.__booking = LocalBookingWidget(target, parent)
self.__btn_ok = QtWidgets.QPushButton("Ok")
self.__btn_cancel = QtWidgets.QPushButton("Cancel")
self.__updateOkButtion()
btn_layout.addStretch()
btn_layout.addWidget(self.__btn_ok)
btn_layout.addWidget(self.__btn_cancel)
self.layout().addWidget(self.__booking)
self.layout().addLayout(btn_layout)
self.__booking.hosts_changed.connect(self.__updateOkButtion)
self.__btn_ok.pressed.connect(self.doLocalBooking)
self.__btn_cancel.pressed.connect(self.close)
def __updateOkButtion(self):
self.__btn_ok.setDisabled(not self.__booking.hostAvailable())
def doLocalBooking(self):
try:
self.__booking.bookCurrentHost()
self.close()
except Exception, e:
msg = QtWidgets.QMessageBox(self)
msg.setText("Failed to book local cores. \
There were no pending frames that met your criteria. Be sure to double check \
if your allocating enough memory and that your job has waiting frames.")
msg.setDetailedText(str(e))
msg.exec_()
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import time
import unittest
from ipykernel.tests.utils import execute, wait_for_idle
from sos_notebook.test_utils import flush_channels, sos_kernel, NotebookTest
from selenium.webdriver.common.keys import Keys
class TestFrontEnd(NotebookTest):
def test_toggle_console(self, notebook):
time.sleep(2)
assert notebook.is_console_panel_open()
notebook.toggle_console_panel()
time.sleep(2)
assert not notebook.is_console_panel_open()
notebook.toggle_console_panel()
time.sleep(2)
assert notebook.is_console_panel_open()
def test_run_in_console(self, notebook):
idx = notebook.call("print(1)", kernel="SoS")
notebook.execute_cell(idx, in_console=True)
# the latest history cell
assert "1" == notebook.get_cell_output(-1, in_console=True)
# if the cell is non-SoS, the console should also change kernel
idx = notebook.call("cat(123)", kernel="R")
notebook.execute_cell(idx, in_console=True)
# the latest history cell
assert "123" == notebook.get_cell_output(-1, in_console=True)
idx = notebook.call("print(12345)", kernel="SoS")
notebook.execute_cell(idx, in_console=True)
# the latest history cell
assert "12345" == notebook.get_cell_output(-1, in_console=True)
def test_run_directly_in_console(self, notebook):
notebook.edit_prompt_cell('print("haha")', kernel='SoS', execute=True)
assert "haha" == notebook.get_cell_output(-1, in_console=True)
notebook.edit_prompt_cell('cat("haha2")', kernel="R", execute=True)
assert "haha2" == notebook.get_cell_output(-1, in_console=True)
def test_history_in_console(self, notebook):
notebook.edit_prompt_cell("a = 1", execute=True)
assert "" == notebook.get_prompt_content()
notebook.edit_prompt_cell("b <- 2", kernel="R", execute=True)
assert "" == notebook.get_prompt_content()
notebook.prompt_cell.send_keys(Keys.UP)
assert "b <- 2" == notebook.get_prompt_content()
notebook.prompt_cell.send_keys(Keys.UP)
assert "a = 1" == notebook.get_prompt_content()
# FIXME: down keys does not work, perhaps because the cell is not focused and
# the first step would be jumping to the end of the line
notebook.prompt_cell.send_keys(Keys.DOWN)
notebook.prompt_cell.send_keys(Keys.DOWN)
# assert 'b <- 2' == notebook.get_prompt_content()
def test_clear_history(self, notebook):
notebook.edit_prompt_cell("a = 1", execute=True)
notebook.edit_prompt_cell("b <- 2", kernel="R", execute=True)
# use "clear" to clear all panel cells
notebook.edit_prompt_cell("clear", kernel="SoS", execute=False)
# we cannot wait for the completion of the cell because the cells
# will be cleared
notebook.prompt_cell.send_keys(Keys.CONTROL, Keys.ENTER)
assert not notebook.panel_cells
def test_switch_kernel(self, notebook):
kernels = notebook.get_kernel_list()
assert "SoS" in kernels
assert "R" in kernels
backgroundColor = {
"SoS": [0, 0, 0],
"R": [220, 220, 218],
"python3": [255, 217, 26],
}
# test change to R kernel by click
notebook.select_kernel(index=0, kernel_name="R", by_click=True)
# check background color for R kernel
assert backgroundColor["R"], notebook.get_input_backgroundColor(0)
# the cell keeps its color after evaluation
notebook.edit_cell(
index=0,
content="""\
%preview -n rn
rn <- rnorm(5)
""",
render=True,
)
output = notebook.get_cell_output(0)
assert "rn" in output and "num" in output
assert backgroundColor["R"], notebook.get_output_backgroundColor(0)
# test $get and shift to SoS kernel
idx = notebook.call(
"""\
%get rn --from R
len(rn)
""",
kernel="SoS",
)
assert backgroundColor["SoS"], notebook.get_input_backgroundColor(idx)
assert "5" in notebook.get_cell_output(idx)
# switch to python3 kernel
idx = notebook.call(
"""\
%use Python3
""",
kernel="SoS",
)
assert backgroundColor["python3"] == notebook.get_input_backgroundColor(
idx)
notebook.append_cell("")
assert backgroundColor["python3"] == notebook.get_input_backgroundColor(
idx)
# def testInterrupt(self, notebook):
# # switch to python3 kernel
# from textwrap import dedent
# from selenium.webdriver.common.by import By
# from selenium.webdriver import ActionChains
# import time
# index = len(notebook.cells)
# notebook.add_cell(
# index=index - 1, cell_type="code", content=dedent(
# """\
# import time
# while True:
# time.sleep(1)
# """,
# ))
# notebook.select_kernel(index=index, kernel_name='SoS', by_click=True)
# notebook._focus_cell(index)
# notebook.current_cell.send_keys(Keys.CONTROL, Keys.ENTER)
# time.sleep(2)
# top_menu = notebook.browser.find_element_by_id("kernel_menu")
# ActionChains(notebook.browser).move_to_element(top_menu).click().perform()
# int_menu = notebook.browser.find_element_by_id("int_kernel").find_elements_by_tag_name('a')[0]
# ActionChains(notebook.browser).move_to_element(int_menu).click().perform()
# notebook._wait_for_done(index, expect_error=True)
def get_completions(kc, text):
flush_channels()
kc.complete(text, len(text))
reply = kc.get_shell_msg(timeout=2)
return reply["content"]
def inspect(kc, name, pos=0):
flush_channels()
kc.inspect(name, pos)
reply = kc.get_shell_msg(timeout=2)
return reply["content"]
def is_complete(kc, code):
flush_channels()
kc.is_complete(code)
reply = kc.get_shell_msg(timeout=2)
return reply["content"]
class TestKernelInteraction(unittest.TestCase):
def testInspector(self):
with sos_kernel() as kc:
# match magics
self.assertTrue("%get " in get_completions(kc, "%g")["matches"])
self.assertTrue("%get " in get_completions(kc, "%")["matches"])
self.assertTrue("%with " in get_completions(kc, "%w")["matches"])
# path complete
self.assertGreater(len(get_completions(kc, "!ls ")["matches"]), 0)
self.assertEqual(
len(get_completions(kc, "!ls SOMETHING")["matches"]), 0)
#
wait_for_idle(kc)
# variable complete
execute(kc=kc, code="alpha=5")
wait_for_idle(kc)
execute(kc=kc, code="%use Python3")
wait_for_idle(kc)
self.assertTrue("alpha" in get_completions(kc, "al")["matches"])
self.assertTrue("all(" in get_completions(kc, "al")["matches"])
# for no match
self.assertEqual(
len(get_completions(kc, "alphabetatheta")["matches"]), 0)
# get with all variables in
self.assertTrue("alpha" in get_completions(kc, "%get ")["matches"])
self.assertTrue(
"alpha" in get_completions(kc, "%get al")["matches"])
# with use and restart has kernel name
self.assertTrue(
"Python3" in get_completions(kc, "%with ")["matches"])
self.assertTrue(
"Python3" in get_completions(kc, "%use ")["matches"])
self.assertTrue(
"Python3" in get_completions(kc, "%shutdown ")["matches"])
self.assertTrue(
"Python3" in get_completions(kc, "%shutdown ")["matches"])
self.assertTrue(
"Python3" in get_completions(kc, "%use Py")["matches"])
#
self.assertEqual(
len(get_completions(kc, "%use SOME")["matches"]), 0)
#
wait_for_idle(kc)
execute(kc=kc, code="%use SoS")
wait_for_idle(kc)
def testCompleter(self):
with sos_kernel() as kc:
# match magics
ins_print = inspect(kc, "print")["data"]["text/plain"]
self.assertTrue("print" in ins_print,
"Returned: {}".format(ins_print))
wait_for_idle(kc)
#
# keywords
ins_depends = inspect(kc, "depends:")["data"]["text/plain"]
self.assertTrue("dependent targets" in ins_depends,
"Returned: {}".format(ins_depends))
wait_for_idle(kc)
#
execute(kc=kc, code="alpha=5")
wait_for_idle(kc)
execute(kc=kc, code="%use Python3")
wait_for_idle(kc)
# action
ins_run = inspect(kc, "run:")["data"]["text/plain"]
self.assertTrue("sos.actions" in ins_run,
"Returned: {}".format(ins_run))
wait_for_idle(kc)
#
ins_alpha = inspect(kc, "alpha")["data"]["text/plain"]
self.assertTrue("5" in ins_alpha, "Returned: {}".format(ins_alpha))
wait_for_idle(kc)
for magic in ("get", "run", "sosrun"):
ins_magic = inspect(kc, "%" + magic, 2)["data"]["text/plain"]
self.assertTrue("usage: %" + magic in ins_magic,
"Returned: {}".format(ins_magic))
wait_for_idle(kc)
execute(kc=kc, code="%use SoS")
wait_for_idle(kc)
def testIsComplete(self):
with sos_kernel() as kc:
# match magics
status = is_complete(kc, "prin")
self.assertEqual(status["status"], "complete")
#
status = is_complete(kc, "a=1")
self.assertEqual(status["status"], "complete")
#
status = is_complete(kc, "")
self.assertEqual(status["status"], "complete")
# the status seems to be version dependent on ipython
#status = is_complete(kc, "input:\n a=1,")
#self.assertEqual(status["status"], "complete")
#
#status = is_complete(kc, "parameter: a=1,")
#self.assertEqual(status["status"], "complete")
#
status = is_complete(kc, "%dict -r")
self.assertEqual(status["status"], "complete")
wait_for_idle(kc)
if __name__ == "__main__":
unittest.main()
|
python
|
"""Base method for all global interpretations. Is
a subclass of base ModelInterpreter"""
from ..model_interpreter import ModelInterpreter
class BaseGlobalInterpretation(ModelInterpreter):
"""Base class for global model interpretations"""
pass
|
python
|
import json
import unittest
from pyshared.server.ref import CallCommand
from pyshared.server.ref import DelCommand
from pyshared.server.ref import ListCommand
from pyshared.server.ref import LocalSharedResourcesManager
from pyshared.server.ref import SetCommand
from pyshared.server.ref import default_command_mapper
from pyshared.server.rx import ReactiveSharedResourcesServer
from rx import Observable
class DefaultTest(unittest.TestCase):
shared_resource = None
def setUp(self):
self.shared_resource = LocalSharedResourcesManager({
'number': 10
})
def test_call(self):
call_command = CallCommand(
resource_name='number',
method='__add__',
args=[7]
)
self.assertEqual(17, call_command.exec(self.shared_resource))
class ReactiveTest(unittest.TestCase):
reactive_server = None
def setUp(self):
self.reactive_server = ReactiveSharedResourcesServer(LocalSharedResourcesManager({
'number': 10
}))
def test_call(self):
call_command = CallCommand(
resource_name='number',
method='__sub__',
args=[5]
)
result = []
self.reactive_server(call_command).subscribe(result.append)
self.assertEqual(result, [5])
def test_call_with_result(self):
call_command = CallCommand(
resource_name='number',
method='__add__',
args=[5],
result='result'
)
result = []
self.reactive_server(call_command).subscribe(result.append)
self.assertEqual(result, [{'result': 15}])
def test_call_any_result(self):
call_command = CallCommand(
resource_name='number',
method='__add__',
args=[5],
result=True
)
result = []
self.reactive_server(call_command).subscribe(result.append)
self.assertEqual(15, list(result[0].values())[0])
def test_list(self):
list_command = ListCommand()
result = []
self.reactive_server(list_command).subscribe(result.append)
self.assertEqual(result, [['number']])
def test_set(self):
set_command = SetCommand(
resource_name='a',
value=10
)
result = []
self.reactive_server(set_command).subscribe(result.append)
self.assertEqual(result, [{'a': 10}])
def test_del(self):
del_command = DelCommand(resource_name='number')
result = []
self.reactive_server(del_command).subscribe(result.append)
self.assertEqual(result, ['number'])
def test_mapper(self):
result = []
Observable.from_([
{'cmd': 'call', 'resource_name': 'number', 'method': '__sub__', 'args': [1]},
{'cmd': 'call', 'resource_name': 'number', 'method': '__add__', 'args': [5]},
]).map(default_command_mapper) \
.flat_map(self.reactive_server) \
.subscribe(result.append)
self.assertEqual(result, [9, 15])
def test_json_mapper(self):
result = []
Observable.from_([
'{"cmd": "call", "resource_name": "number", "method": "__sub__", "args": [1]}',
'{"cmd": "call", "resource_name": "number", "method": "__add__", "args": [5]}',
]).map(json.loads) \
.map(default_command_mapper) \
.flat_map(self.reactive_server) \
.subscribe(result.append)
self.assertEqual(result, [9, 15])
|
python
|
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class RightBoard:
def __init__(self, driver):
self.driver = driver
self.elements = RightBoardElements(self.driver)
def click(self, elem):
self.driver.execute_script(
"arguments[0].click();",
elem
)
@staticmethod
def __input_into(elem, value):
current_input_text = elem.get_attribute("value")
elem.send_keys(len(current_input_text) * Keys.BACKSPACE)
elem.send_keys(value)
# OPERATIONS
def open_orders(self):
self.click(self.elements.open_orders_tab)
def completed_orders(self):
self.click(self.elements.completed_orders_tab)
def buy_tab(self):
self.click(self.elements.buy_tab)
def sell_tab(self):
self.click(self.elements.sell_tab)
def input_buy_at_price(self, new_value):
self.__input_into(self.elements.at_price_input, new_value)
def input_amount_to_buy(self, new_value):
self.__input_into(self.elements.amount_input, new_value)
def input_total_money_to_spend(self, new_value):
self.__input_into(self.elements.total_input, new_value)
def buy_sell_button(self):
self.click(self.elements.buy_sell_button)
class RightBoardElements:
CONTAINER_PATH = "./div/div[3]/div/div[3]"
# UPPER SECTION
OPEN_ORDERS_PATH = CONTAINER_PATH + "/div[3]/div/div[1]"
COMPLETED_ORDERS_PATH = CONTAINER_PATH + "/div[3]/div/div[2]"
TOGGLE_CONTEXT_CURRENCY_PATH = CONTAINER_PATH + "/div[3]/div[2]/div/div/div[1]/div/div/input"
CANCEL_ALL_ORDERS_PATH = CONTAINER_PATH + "/div[3]/div[2]/div/div/div[1]/button"
ORDER_LIST_PATH = CONTAINER_PATH + "/div[3]/div[2]/div/div/div[3]/div/div"
# LOWER SECTION
BUY_TAB_PATH = CONTAINER_PATH + "/div[4]/div/div[1]/div[1]"
SELL_TAB_PATH = CONTAINER_PATH + "/div[4]/div/div[1]/div[2]"
AT_PRICE_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/div[2]/div/div/input"
AMOUNT_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/div[3]/div/div/input"
TOTAL_BASE_CURRENCY_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/div[4]/div/div/input"
BUY_OR_SELL_BUTTON_PATH = CONTAINER_PATH + "/div[4]/div/div[2]/div/form/button"
def __init__(self, driver):
self.driver = driver
self.root = self.driver.find_element(By.ID, "root")
self.container = self.root.find_element(By.XPATH, self.CONTAINER_PATH)
# UPPER SECTION
self.open_orders_tab = self.root.find_element(By.XPATH, self.OPEN_ORDERS_PATH)
self.completed_orders_tab = self.root.find_element(By.XPATH, self.COMPLETED_ORDERS_PATH)
# self.toggle_context_only_orders = self.root.find_element(By.XPATH, self.TOGGLE_CONTEXT_CURRENCY_PATH)
# self.cancel_all_orders = self.root.find_element(By.XPATH, self.CANCEL_ALL_ORDERS_PATH)
# self.order_list_view = self.root.find_element(By.XPATH, self.ORDER_LIST_PATH)
# LOWER SECTION
self.buy_tab = self.root.find_element(By.XPATH, self.BUY_TAB_PATH)
self.sell_tab = self.root.find_element(By.XPATH, self.SELL_TAB_PATH)
@property
def at_price_input(self):
return self.root.find_element(By.XPATH, self.AT_PRICE_PATH)
@property
def amount_input(self):
return self.root.find_element(By.XPATH, self.AMOUNT_PATH)
@property
def total_input(self):
return self.root.find_element(By.XPATH, self.TOTAL_BASE_CURRENCY_PATH)
@property
def buy_sell_button(self):
return self.root.find_element(By.XPATH, self.BUY_OR_SELL_BUTTON_PATH)
|
python
|
import threading
import time
import signal
import sys
from callbacks_event_listener import EventListener
from wpwithin_python import WPWithin,\
PricePerUnit,\
Price,\
Service,\
CommonPSPKeys,\
WorldpayPSPKeys,\
WP_PSP_NAME
client = WPWithin("127.0.0.1",
9090,
True,
start_callback_server=True,
callback_port=9092,
event_listener=EventListener())
client.setup("Python3 Device", "Sample Python3 producer device")
psp_config = {
CommonPSPKeys.psp_name: WP_PSP_NAME,
CommonPSPKeys.hte_public_key: "T_C_6a38539b-89d0-4db9-bec3-d825779c1809",
CommonPSPKeys.hte_private_key: "T_S_6b0f27d5-3787-4304-a596-01160c49a55d",
WorldpayPSPKeys.wp_api_endpoint: "https://api.worldpay.com/v1",
WorldpayPSPKeys.wp_merchant_client_key: "T_C_6a38539b-89d0-4db9-bec3-d825779c1809",
WorldpayPSPKeys.wp_merchant_service_key: "T_S_6b0f27d5-3787-4304-a596-01160c49a55d"
}
client.init_producer(psp_config)
price_per_unit = PricePerUnit(amount=650, currency_code="GBP")
rw_price = Price(price_id=1,
description="Car Wash",
price_per_unit=price_per_unit,
unit_id=2,
unit_description="Single wash")
service = Service(service_id=1,
name="RoboWash",
description="Car washed by robot",
prices={1: rw_price})
client.add_service(service)
print("Start service broadcast for 20 seconds")
client.start_service_broadcast(20000)
def signal_handler(signal_number, stack_frame):
print("shutting down...")
client.shutdown()
signal.signal(signal.SIGINT, signal_handler)
while True:
pass
|
python
|
from django.urls import path, include
urlpatterns = [
path('', include('accounts.urls.accounts')),
path('', include('accounts.urls.employers')),
path('', include('accounts.urls.professionals')),
]
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
from notice.models import Notice, Qna
from main.models import search_word
from django.views.generic import ListView
from django.db.models import Q
from django.utils import timezone
import datetime
def main(request):
# Main_Notice = Notice.objects.order_by('-id')[0:4] # 메인페이지 게시물 4개 최신순
Hot_QNA = Qna.objects.filter(create_at__gte=timezone.now()-datetime.timedelta(days=7)).order_by('-hits')[0:4]
# QNA 게시물 조회수 순 4개 (현재날짜부터 7일까지 데이터중)
Rank = search_word.objects.order_by('-hits')[0:10] # 랭킹 10개 검색횟수 많은순
time1 = timezone.now()
time7 = timezone.now()-datetime.timedelta(days=7)
return render(request, 'main/main.html', {
# 'MainNotice':Main_Notice,
'Rank':Rank,
'HotQNA':Hot_QNA,
'time1' : time1,
'time7' : time7,
})
class SearchView(ListView): # 게시글
model = search_word
template_name = 'main/Search_result.html'
def get_queryset(self):
keyword = self.request.GET.get('q', '')
if keyword:
if search_word.objects.filter(keyword=keyword):
a = search_word.objects.get(keyword=keyword)
a.hits += 1
a.save()
else:
b = search_word(keyword=keyword)
b.save()
return keyword
|
python
|
import json
from django import template
register = template.Library()
@register.filter
def here(page, request):
return request.path.startswith(page.get_absolute_url())
@register.simple_tag
def node_module(path):
return '/node_modules/{}'.format(path)
@register.assignment_tag(takes_context=True)
def navigation_json(context, pages, section=None):
"""
Renders a navigation list for the given pages.
The pages should all be a subclass of PageBase, and possess a get_absolute_url() method.
You can also specify an alias for the navigation, at which point it will be set in the
context rather than rendered.
"""
request = context["request"]
# Compile the entries.
def page_entry(page):
# Do nothing if the page is to be hidden from not logged in users
if page.hide_from_anonymous and not request.user.is_authenticated():
return
# Do nothing if the page is set to offline
if not page.is_online:
return
url = page.get_absolute_url()
return {
"url": url,
"title": str(page),
"here": request.path.startswith(url),
"children": [page_entry(x) for x in page.navigation if
page is not request.pages.homepage]
}
# All the applicable nav items
entries = [page_entry(x) for x in pages if page_entry(x) is not None]
# Add the section.
if section:
section_entry = page_entry(section)
entries = [section_entry] + list(entries)
return json.dumps(entries)
|
python
|
from django.db import models
from django.contrib.auth.models import User
from pyuploadcare.dj.models import ImageField
# Create your models here.
class Neighborhood(models.Model):
name = models.CharField(max_length=100)
location = models.CharField(max_length=100)
admin = models.ForeignKey("Profile", on_delete=models.CASCADE, related_name='hood')
health_department = models.TextField(null=True, blank=True)
police_department = models.TextField(null=True, blank=True)
description = models.TextField()
logo = models.ImageField(upload_to = 'images/', default='')
def __str__(self):
return self.name
def create_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def find_neighborhood(cls, neighborhood_id):
return cls.objects.filter(id=neighborhood_id)
class Meta:
ordering =['-pk']
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
name = models.CharField(max_length=50, blank=True)
bio = models.TextField(max_length=300, blank=True, default='No bio')
profile_pic = models.ImageField(upload_to='images/', default='default.png')
location = models.CharField(max_length=100, blank=True, null=True)
neighborhood = models.ForeignKey(Neighborhood, on_delete=models.SET_NULL, null=True, related_name='members', blank=True)
contact = models.CharField(max_length=20, blank=True)
def __str__(self):
return self.user.username
def save_user_profile(self):
self.save()
@classmethod
def get_hood_members(cls,hood):
members=cls.objects.filter(hood__icontains=hood)
return members
class Business(models.Model):
name = models.CharField(max_length=300)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='owner')
neighborhood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE, related_name='business')
email = models.EmailField(max_length=100)
description = models.TextField(max_length=1000)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
found=cls.objects.get(id=business_id)
return found
class Post(models.Model):
title = models.CharField(max_length=100, null=True)
post = models.TextField()
posted_on = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE,related_name='post_owner')
hood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE,related_name='hood_post')
def __str__(self):
return self.title
def create_post(self):
self.save()
def delete_post(self):
self.delete()
class Meta:
ordering =['-pk']
|
python
|
##
# Copyright © 2020, The Gust Framework Authors. All rights reserved.
#
# The Gust/Elide framework and tools, and all associated source or object computer code, except where otherwise noted,
# are licensed under the Zero Prosperity license, which is enclosed in this repository, in the file LICENSE.txt. Use of
# this code in object or source form requires and implies consent and agreement to that license in principle and
# practice. Source or object code not listing this header, or unless specified otherwise, remain the property of
# Elide LLC and its suppliers, if any. The intellectual and technical concepts contained herein are proprietary to
# Elide LLC and its suppliers and may be covered by U.S. and Foreign Patents, or patents in process, and are protected
# by trade secret and copyright law. Dissemination of this information, or reproduction of this material, in any form,
# is strictly forbidden except in adherence with assigned license requirements.
##
load(
"@io_bazel_rules_closure//closure/private/rules:soy_library.bzl",
_soy_library = "soy_library",
)
load(
"@io_bazel_rules_closure//closure:defs.bzl",
_closure_js_template_library = "closure_js_template_library",
_closure_py_template_library = "closure_py_template_library",
_closure_java_template_library = "closure_java_template_library",
_closure_messages = "closure_messages",
)
load(
"//defs/toolchain:schema.bzl",
"JAVAPROTO_POSTFIX_",
"CLOSUREPROTO_POSTFIX_",
)
load(
"//defs:config.bzl",
_JS_TEMPLATES = "JS_TEMPLATES",
_JAVA_TEMPLATES = "JAVA_TEMPLATES",
_PYTHON_TEMPLATES = "PYTHON_TEMPLATES",
)
INJECTED_SSR_SOY_DEPS = [
"@gust//gust/page:page_soy",
]
INJECTED_SSR_PROTO_DEPS = [
"@gust//gust/page:page_proto",
]
def _template_library(name,
srcs,
soy_deps = [],
js_deps = [],
py_deps = [],
java_deps = [],
proto_deps = [],
style_deps = [],
js = _JS_TEMPLATES,
java = _JAVA_TEMPLATES,
python = _PYTHON_TEMPLATES,
java_package = None,
precompile = True):
""" Declare a universal, cross-platform template library, making use of the built-in
Soy integration. """
_soy_library(
name = name,
srcs = srcs,
deps = soy_deps,
proto_deps = proto_deps,
)
if js:
_closure_js_template_library(
name = "%s-js" % name,
srcs = srcs,
deps = js_deps + style_deps,
proto_deps = proto_deps,
)
if python:
_closure_py_template_library(
name = "%s-py" % name,
srcs = srcs,
deps = soy_deps + style_deps,
proto_deps = proto_deps,
)
if java:
_closure_java_template_library(
name = "%s-java" % name,
srcs = srcs,
deps = soy_deps,
java_deps = (
[("%s-%s" % (p, JAVAPROTO_POSTFIX_)) for p in proto_deps] +
[
"@safe_html_types//:java",
"@safe_html_types//:java-proto",
] +
[("%s-java_jcompiled" % p) for p in soy_deps]),
proto_deps = proto_deps,
precompile = precompile,
java_package = java_package,
)
def _ssr_library(name,
srcs,
soy_deps = [],
js_deps = [],
py_deps = [],
java_deps = [],
proto_deps = [],
style_deps = [],
java = _JAVA_TEMPLATES,
python = _PYTHON_TEMPLATES,
java_package = None,
precompile = True,
**kwargs):
""" Declare a template for use exclusively during SSR (Server-Side Rendering). This
also injects additional SSR-related dependencies automatically. """
_template_library(
name = name,
srcs = srcs,
soy_deps = (soy_deps or []) + INJECTED_SSR_SOY_DEPS,
proto_deps = (proto_deps or []) + INJECTED_SSR_PROTO_DEPS,
java_package = java_package,
js = False,
python = _PYTHON_TEMPLATES,
)
def _template_messages(name,
deps,
targetLocale,
sourceLocale = "en",
**kwargs):
""" Generate an XLIFF messages file for the provided set of templates. """
_closure_messages(
name = name,
deps = deps,
targetLocale = targetLocale,
sourceLocale = sourceLocale,
**kwargs
)
ssr_library = _ssr_library
template_library = _template_library
template_messages = _template_messages
|
python
|
#!/usr/bin/env python3
import sys
from os import path
sys.path.insert(0, path.join(path.dirname(__file__)))
from importers.monzo_debit import Importer as monzo_debit_importer
from beancount.ingest import extract
account_id = "acc_yourMonzoAccountId"
account = "Assets:Monzo:Something"
CONFIG = [
monzo_debit_importer(account_id, account),
]
extract.HEADER = ';; -*- mode: org; mode: beancount; coding: utf-8; -*-\n'
|
python
|
import os
import sys
import tensorflow as tf
from absl import app, logging
from absl.flags import argparse_flags
import _jsonnet
def parse_args(args, parser):
# Parse command line arguments
parser = parser if parser else argparse_flags.ArgumentParser()
parser.add_argument("input", type=str) # Name of TPU to train on, if any
def local_parse_args(args):
parser = argparse_flags.ArgumentParser()
parse_args(args, parser)
return parser.parse_args(args[1:])
# Returns content if worked, None if file not found, or throws an exception
def try_path(dir, rel):
if not rel:
raise RuntimeError("Got invalid filename (empty string).")
if rel[0] == "/":
full_path = rel
else:
full_path = dir + rel
if full_path[-1] == "/":
raise RuntimeError("Attempted to import a directory")
if not os.path.isfile(full_path):
return full_path, None
with open(full_path) as f:
return full_path, f.read()
def import_callback(dir, rel):
full_path, content = try_path(dir, rel)
if content:
return full_path, content
raise RuntimeError("File not found")
def main(args):
try:
_jsonnet.evaluate_file(
args.input, ext_vars={"MODEL_PATH": "Bob"}, import_callback=import_callback,
)
except RuntimeError as e:
logging.error(e)
sys.exit(-1)
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main, flags_parser=parse_args)
|
python
|
import logging
from typing import Any, List, Optional
from homeassistant.components.select import SelectEntity
from gehomesdk import ErdCodeType
from ...devices import ApplianceApi
from .ge_erd_entity import GeErdEntity
from .options_converter import OptionsConverter
_LOGGER = logging.getLogger(__name__)
class GeErdSelect(GeErdEntity, SelectEntity):
"""ERD-based selector entity"""
device_class = "select"
def __init__(self, api: ApplianceApi, erd_code: ErdCodeType, converter: OptionsConverter, erd_override: str = None, icon_override: str = None, device_class_override: str = None):
super().__init__(api, erd_code, erd_override=erd_override, icon_override=icon_override, device_class_override=device_class_override)
self._converter = converter
@property
def current_option(self):
return self._converter.to_option_string(self.appliance.get_erd_value(self.erd_code))
@property
def options(self) -> List[str]:
"Return a list of options"
return self._converter.options
async def async_select_option(self, option: str) -> None:
_LOGGER.debug(f"Setting select from {self.current_option} to {option}")
"""Change the selected option."""
if option != self.current_option:
await self.appliance.async_set_erd_value(self.erd_code, self._converter.from_option_string(option))
|
python
|
import webbrowser
def open_page(url: str, new: int = 0, autoraise: bool = True):
webbrowser.open(url, new=new, autoraise=autoraise)
actions = {'open webpage': open_page}
|
python
|
def util(node,visited,recstack):
visited[node]=True
recstack[node]=True
for i in graph[node]:
if visited[i]==False:
if util(i,visited,recstack):
return True
elif recstack[i]==True:
return True
recstack[node]=False
return False
def isCyclic(n, graph):
visited=[False]*(n)
recstack=[False]*(n)
for i in range(n):
if util(i,visited,recstack)==True:
return 1
return 0
|
python
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file marine-integrations/mi/dataset/parser/test/test_adcpt_m_log9.py
@author Tapana Gupta
@brief Test code for adcpt_m_log9 data parser
Files used for testing:
ADCPT_M_LOG9_simple.txt
File contains 25 valid data records
ADCPT_M_LOG9_large.txt
File contains 615 valid data records
ADCPT_M_LOG9_bad.txt
File contains 4 invalid data records
"""
import unittest
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger; log = get_logger()
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.adcpt_m_log9 import AdcptMLog9Parser
from mi.dataset.test.test_parser import BASE_RESOURCE_PATH
RESOURCE_PATH = os.path.join(BASE_RESOURCE_PATH, 'adcpt_m', 'resource')
MODULE_NAME = 'mi.dataset.parser.adcpt_m_log9'
SIMPLE_LOG_FILE = "ADCPT_M_LOG9_simple.txt"
LARGE_LOG_FILE = "ADCPT_M_LOG9_large.txt"
# Define number of expected records/exceptions for various tests
NUM_REC_LARGE_LOG_FILE = 615
NUM_REC_SIMPLE_LOG_FILE = 25
YAML_FILE = "ADCPT_M_LOG9_simple.yml"
LARGE_YAML_FILE = "ADCPT_M_LOG9_large.yml"
INVALID_DATA_FILE_1 = 'ADCPT_M_LOG9_bad.txt'
NUM_INVALID_EXCEPTIONS = 9
@attr('UNIT', group='mi')
class AdcptMLog9ParserUnitTestCase(ParserUnitTestCase):
"""
adcpt_m_log9 Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
def open_file(self, filename):
file = open(os.path.join(RESOURCE_PATH, filename), mode='r')
return file
def open_file_write(self, filename):
file = open(os.path.join(RESOURCE_PATH, filename), mode='w')
return file
def create_rec_parser(self, file_handle):
"""
This function creates a Adcpt_m_log9 parser for recovered data.
"""
parser = AdcptMLog9Parser(self.rec_config,
file_handle,
self.exception_callback)
return parser
def test_verify_record(self):
"""
Simple test to verify that records are successfully read and parsed from a data file
"""
log.debug('===== START SIMPLE TEST =====')
in_file = self.open_file(SIMPLE_LOG_FILE)
parser = self.create_rec_parser(in_file)
# In a single read, get all particles in this file.
number_expected_results = NUM_REC_SIMPLE_LOG_FILE
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END SIMPLE TEST =====')
def test_invalid_data(self):
"""
Read data from a file containing invalid data.
Verify that no particles are created and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA =====')
in_file = self.open_file(INVALID_DATA_FILE_1)
parser = self.create_rec_parser(in_file)
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(len(self.exception_callback_value), NUM_INVALID_EXCEPTIONS)
in_file.close()
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_verify_record_against_yaml(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START YAML TEST =====')
in_file = self.open_file(LARGE_LOG_FILE)
parser = self.create_rec_parser(in_file)
# In a single read, get all particles in this file.
number_expected_results = NUM_REC_LARGE_LOG_FILE
result = parser.get_records(number_expected_results)
self.assert_particles(result, LARGE_YAML_FILE, RESOURCE_PATH)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END YAML TEST =====')
def create_yml_file(self):
"""
Create a yml file corresponding to an actual recovered dataset. This is not an actual test - it allows
us to create what we need for integration testing, i.e. a yml file.
"""
in_file = self.open_file(LARGE_LOG_FILE)
parser = self.create_rec_parser(in_file)
log.debug("Getting records...")
# In a single read, get all particles in this file.
result = parser.get_records(NUM_REC_LARGE_LOG_FILE)
log.debug("Done.")
self.particle_to_yml(result, LARGE_YAML_FILE)
def particle_to_yml(self, particles, filename):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = self.open_file_write(filename)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' % (i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.3f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
|
python
|
# Inspired by ABingo: www.bingocardcreator.com/abingo
HANDY_Z_SCORE_CHEATSHEET = (
(1, float('-Inf')),
(0.10, 1.29),
(0.05, 1.65),
(0.025, 1.96),
(0.01, 2.33),
(0.001, 3.08))[::-1]
PERCENTAGES = {0.10: '90%', 0.05: '95%', 0.01: '99%', 0.001: '99.9%'}
DESCRIPTION_IN_WORDS = {0.10: 'fairly confident', 0.05: 'confident',
0.01: 'very confident', 0.001: 'extremely confident'}
def calculate_variance(n, p):
"""
Calculate the sample variance for a binominal distribution
"""
return p * (1 - p) / n
def zscore(alternatives):
"""
Calculate the z-score
"""
if len(alternatives) != 2:
raise ValueError("Cant compute more than two alternatives")
n0 = alternatives[0].participants
n1 = alternatives[1].participants
if n0 == 0 or n1 == 0:
raise ValueError("No participants for at least one of the experiments")
hits0 = alternatives[0].hits
hits1 = alternatives[1].hits
cr0 = n0 / hits0 # cr: conversion rate
cr1 = n1 / hits1
numerator = cr0 - cr1
variance0 = calculate_variance(n0, cr0)
variance1 = calculate_variance(n1, cr1)
return numerator / ((variance0 + variance1) ** 0.5)
def best_p(zscore):
"""
Find the the p-value using a table
"""
for p, z in HANDY_Z_SCORE_CHEATSHEET:
if zscore > z:
break
return (p, z)
def test(data):
pass
def describe(alternatives, p, best, worst):
index_best = 0
index_worst = 1
words = ""
n0 = alternatives[0].participants
n1 = alternatives[1].participants
if n0 < 10 or n1 < 10:
words += "Take these results with a grain of salt since your " + \
"samples are so small: "
words += "The best alternative you have is: %s, which had " % \
alternatives[best].content
words += "%d conversions from %d participants " \
% (alternatives[best].hits, alternatives[best].participants)
words += "(%f). The other alternative was %s, " \
% (alternatives[best].hits / alternatives[best].participants,
alternatives[worst].content)
words += "which had %d conversions from %d participants " \
% (alternatives[worst].hits, alternatives[worst].participants)
words += "(%f). " % (alternatives[best].hits /
alternatives[best].participants)
if p == 1:
words += "However, this difference is not statistically significant."
else:
words += "This difference is %f likely to be " % p
words += " statistically significant, which means you can be "
words += "%s that it is the result of your alternatives actually " \
% "foo"
words += " mattering, rather than "
words += "being due to random chance. However, this statistical test"
words += " can't measure how likely the currently "
words += "observed magnitude of the difference is to be accurate or"
words + " not. It only says \"better\", not \"better "
words += "by so much\"."
return words
|
python
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from past.utils import old_div
import rlpy
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=88044.,
boyan_N0=64502,
lambda_=0.43982644088,
initial_learn_rate=0.920244401,
kernel_resolution=11.6543336229):
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = rlpy.Domains.BicycleRiding()
opt["domain"] = domain
kernel_width = old_div((domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]), kernel_resolution)
representation = rlpy.Representations.KernelizediFDD(domain, sparsify=sparsify,
kernel=rlpy.Representations.linf_triangle_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = rlpy.Policies.eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = rlpy.Agents.Q_Learning(policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = rlpy.Experiments.Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=True,
visualize_performance=True)
experiment.plot()
# experiment.save()
|
python
|
import os
from jobControl import jobControl
from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from pyspark.sql.types import IntegerType, StringType
from utils import arg_utils, dataframe_utils
job_args = arg_utils.get_job_args()
job_name = os.path.basename(__file__).split(".")[0]
num_partitions = 6
jobExec = jobControl.Job(job_name, job_args)
jobExec.target_schema = (
jobExec.target_schema if jobExec.target_schema else jobExec.database_edw
)
def main():
table_columns = dataframe_utils.return_hive_table_columns(
spark, jobExec.target_schema, jobExec.target_table
)
df_page_view = (
spark.table(f"{jobExec.database_web_analytics}.page_view")
.filter(f.col("reference_date") == int(jobExec.reference_date))
.filter(f.col("app_id") == f.lit("GymPass"))
)
df_stg_person_type = spark.table(f"{jobExec.database_work}.stg_person_type")
df_dim_products = (
df_page_view.withColumnRenamed("event_id", "page_view_id")
.join(df_stg_person_type, "person_id", "left")
.withColumn(
"created_at_converted",
f.when(
f.col("os_timezone").isNotNull(),
f.expr("from_utc_timestamp(derived_tstamp, os_timezone)"),
)
.when(
f.col("geo_timezone").isNotNull(),
f.expr("from_utc_timestamp(derived_tstamp, os_timezone)"),
)
.otherwise(f.col("derived_tstamp")),
)
.withColumn(
"date",
(f.date_format("created_at_converted", "yyyyMMdd")).cast(IntegerType()),
)
.withColumn(
"hour",
(f.date_format("created_at_converted", "H")).cast(IntegerType()),
)
.withColumn(
"minute",
(f.date_format("created_at_converted", "m")).cast(IntegerType()),
)
.withColumn(
"person_type",
f.when(f.upper(f.col("useragent")).like("%BOT%"), f.lit("BOT"))
.when(
df_stg_person_type["person_type"].isNotNull(),
df_stg_person_type["person_type"],
)
.otherwise(f.lit("REGULAR USER")),
)
.withColumn("utc_date", f.col("reference_date"))
.select(
f.col("page_view_id"),
"date",
"hour",
"minute",
f.col("geo_country").alias("page_view_country_name"),
"viewer_id",
"person_id",
"company_id",
"person_type",
f.col("mkt_source").alias("utm_source"),
f.col("mkt_medium").alias("utm_medium"),
f.col("mkt_campaign").alias("utm_campaign"),
f.col("mkt_term").alias("utm_term"),
f.col("mkt_content").alias("utm_content"),
"latitude",
"longitude",
"utc_date",
"reference_date",
)
)
df_dim_products = dataframe_utils.createPartitionColumns(
df_dim_products, jobExec.reference_date
)
df_dim_products = jobExec.select_dataframe_columns(
spark, df_dim_products, table_columns
)
df_dim_products = df_dim_products.repartition(num_partitions, "page_view_id")
df_dim_products.write.insertInto(
f"{jobExec.target_schema}.{jobExec.target_table}", overwrite=True
)
jobExec.totalLines = (
(spark.table(f"{jobExec.target_schema}.{jobExec.target_table}"))
.filter(f.col("reference_date") == jobExec.reference_date)
.count()
)
if jobExec.totalLines > 0:
table_location = dataframe_utils.returnHiveTableLocation(
spark,
jobExec.target_schema,
jobExec.target_table,
True,
jobExec.reference_date,
)
delete_statement = f"DELETE FROM {jobExec.database_public}.{jobExec.target_table} WHERE utc_date = {jobExec.reference_date}"
jobExec.redshift.executeStatement(delete_statement, "delete")
jobExec.redshift.LoadS3toRedshift(
table_location, jobExec.database_public, jobExec.target_table
)
else:
jobExec.logger.warning("Target table is empty")
if __name__ == "__main__":
spark = SparkSession.builder.appName(job_name).enableHiveSupport().getOrCreate()
jobExec.execJob(
main,
spark,
add_hive_path=True,
delete_excessive_files=True,
infer_partitions=True,
)
|
python
|
from fontbakery.checkrunner import Section
from fontbakery.fonts_spec import spec_factory
def check_filter(item_type, item_id, item):
# Filter out external tool checks for testing purposes.
if item_type == "check" and item_id in (
"com.google.fonts/check/035", # ftxvalidator
"com.google.fonts/check/036", # ots-sanitize
"com.google.fonts/check/037", # Font Validator
"com.google.fonts/check/038", # Fontforge
"com.google.fonts/check/039", # Fontforge
):
return False
return True
def test_external_specification():
"""Test the creation of external specifications."""
specification = spec_factory(default_section=Section("Dalton Maag OpenType"))
specification.auto_register(
globals(),
spec_imports=["fontbakery.specifications.opentype"],
filter_func=check_filter)
# Probe some tests
expected_tests = ["com.google.fonts/check/002", "com.google.fonts/check/171"]
specification.test_expected_checks(expected_tests)
# Probe tests we don't want
assert "com.google.fonts/check/035" not in specification._check_registry.keys()
assert len(specification.sections) > 1
def test_spec_imports():
"""
When a names array in spec_imports contained sub module names, the import
would fail.
https://github.com/googlefonts/fontbakery/issues/1886
"""
def _test(spec_imports, expected_tests,expected_conditions=tuple()):
specification = spec_factory(default_section=Section("Testing"))
specification.auto_register({}, spec_imports=spec_imports)
specification.test_expected_checks(expected_tests)
if expected_conditions:
registered_conditions = specification.conditions.keys()
for name in expected_conditions:
assert name in registered_conditions, ('"{}" is expected to be '
'registered as a condition.'.format(name))
# this is in docs/writing specifications
spec_imports = [
['fontbakery.specifications', ['cmap', 'head']]
]
# Probe some tests
expected_tests = [
"com.google.fonts/check/076", # in cmap
"com.google.fonts/check/043" # in head
]
_test(spec_imports, expected_tests)
# the example from issue #1886
spec_imports = (
(
"fontbakery.specifications",
(
"general",
"cmap",
"head",
"os2",
"post",
"name",
"hhea",
"dsig",
"hmtx",
"gpos",
"gdef",
"kern",
"glyf",
"fvar",
"shared_conditions",
),
),
)
# Probe some tests
expected_tests = [
"com.google.fonts/check/076", # in cmap
"com.google.fonts/check/043" # in head
]
_test(spec_imports, expected_tests)
# make sure the suggested workaround still works:
# https://github.com/googlefonts/fontbakery/issues/1886#issuecomment-392535435
spec_imports = (
"fontbakery.specifications.general",
"fontbakery.specifications.cmap",
"fontbakery.specifications.head",
"fontbakery.specifications.os2",
"fontbakery.specifications.post",
"fontbakery.specifications.name",
"fontbakery.specifications.hhea",
"fontbakery.specifications.dsig",
"fontbakery.specifications.hmtx",
"fontbakery.specifications.gpos",
"fontbakery.specifications.gdef",
"fontbakery.specifications.kern",
"fontbakery.specifications.glyf",
"fontbakery.specifications.fvar",
"fontbakery.specifications.shared_conditions"
)
# Probe some tests
expected_tests = [
"com.google.fonts/check/076", # in cmap
"com.google.fonts/check/043" # in head
]
_test(spec_imports, expected_tests)
# cherry pick attributes from a module (instead of getting submodules)
# also from this is in docs/writing specifications
# Import just certain attributes from modules.
# Also, using absolute import module names:
spec_imports = [
# like we do in fontbakery.specifications.fvar
('fontbakery.specifications.shared_conditions', ('is_variable_font',
'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord',
'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord')),
# just as an example: import a check and a dependency/condition of
# that check from the googlefonts specific spec:
('fontbakery.specifications.googlefonts', (
# "License URL matches License text on name table?"
'com_google_fonts_check_030',
# This condition is a dependency of the check above:
'familyname',
))
]
# Probe some tests
expected_tests = [
"com.google.fonts/check/030" # in googlefonts
]
expected_conditions = ('is_variable_font', 'regular_wght_coord',
'regular_wdth_coord', 'regular_slnt_coord', 'regular_ital_coord',
'regular_opsz_coord', 'bold_wght_coord', 'familyname')
_test(spec_imports, expected_tests, expected_conditions)
def test_opentype_checks_load():
spec_imports = ("fontbakery.specifications.opentype", )
specification = spec_factory(default_section=Section("OpenType Testing"))
specification.auto_register({}, spec_imports=spec_imports)
specification.test_dependencies()
def test_googlefonts_checks_load():
spec_imports = ("fontbakery.specifications.googlefonts", )
specification = spec_factory(default_section=Section("Google Fonts Testing"))
specification.auto_register({}, spec_imports=spec_imports)
specification.test_dependencies()
def test_in_and_exclude_checks():
spec_imports = ("fontbakery.specifications.opentype", )
specification = spec_factory(default_section=Section("OpenType Testing"))
specification.auto_register({}, spec_imports=spec_imports)
specification.test_dependencies()
explicit_checks = ["06", "07"] # "06" or "07" in check ID
exclude_checks = ["065", "079"] # "065" or "079" in check ID
iterargs = {"font": 1}
check_names = {
c[1].id for c in specification.execution_order(
iterargs,
explicit_checks=explicit_checks,
exclude_checks=exclude_checks)
}
check_names_expected = set()
for section in specification.sections:
for check in section.checks:
if any(i in check.id for i in explicit_checks) and not any(
x in check.id for x in exclude_checks):
check_names_expected.add(check.id)
assert check_names == check_names_expected
def test_in_and_exclude_checks_default():
spec_imports = ("fontbakery.specifications.opentype",)
specification = spec_factory(default_section=Section("OpenType Testing"))
specification.auto_register({}, spec_imports=spec_imports)
specification.test_dependencies()
explicit_checks = None # "All checks aboard"
exclude_checks = None # "No checks left behind"
iterargs = {"font": 1}
check_names = {
c[1].id for c in specification.execution_order(
iterargs,
explicit_checks=explicit_checks,
exclude_checks=exclude_checks)
}
check_names_expected = set()
for section in specification.sections:
for check in section.checks:
check_names_expected.add(check.id)
assert check_names == check_names_expected
|
python
|
# Generated by Django 3.2.5 on 2021-07-18 12:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0006_auto_20210718_1014'),
]
operations = [
migrations.AddField(
model_name='job',
name='delivery_address',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='job',
name='delivery_latitude',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='job',
name='delivery_longitude',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='job',
name='delivery_name',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='job',
name='delivery_phone',
field=models.CharField(blank=True, max_length=50),
),
]
|
python
|
"""fix Contact's name constraint
Revision ID: 41414dd03c5e
Revises: 508756c1b8b3
Create Date: 2021-11-26 20:42:31.599524
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '41414dd03c5e'
down_revision = '508756c1b8b3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('contacts_name_key', 'contacts', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('contacts_name_key', 'contacts', ['name'])
# ### end Alembic commands ###
|
python
|
#/usr/bin/env python
import sys
import logging
logger = logging.getLogger('utility_to_osm.ssr2.git_diff')
import utility_to_osm.file_util as file_util
from osmapis_stedsnr import OSMstedsnr
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# diff is called by git with 7 parameters:
# path old-file old-hex old-mode new-file new-hex new-mode
new_file, old_file = sys.argv[1], sys.argv[2]
logger.info('Reading %s', old_file)
content = file_util.read_file(old_file)
old_osm = OSMstedsnr.from_xml(content)
logger.info('Reading %s', new_file)
content = file_util.read_file(new_file)
new_osm = OSMstedsnr.from_xml(content)
print('\n=== Missing stedsnr ===\n')
old_stedsnr = sorted(old_osm.stedsnr.keys())
new_stedsnr = sorted(new_osm.stedsnr.keys())
for key in old_stedsnr:
if key not in new_stedsnr:
print('Diff, %s missing in old' % key)
print(old_osm.stedsnr[key][0])
for key in new_stedsnr:
if key not in old_stedsnr:
print('Diff, %s missing in new' % key)
print(new_osm.stedsnr[key][0])
print('\n=== Tagging differences ===\n')
stedsnr = set(old_stedsnr).intersection(new_stedsnr)
for key in stedsnr:
old = old_osm.stedsnr[key][0]
new = new_osm.stedsnr[key][0]
limit_distance = 1e-5 # FIXME: resonable?
old_lat, old_lon = float(old.attribs['lat']), float(old.attribs['lon'])
new_lat, new_lon = float(new.attribs['lat']), float(new.attribs['lon'])
if abs(old_lat - new_lat) > limit_distance or abs(old_lon - new_lon) > limit_distance:
print('Diff in position %s old [%s, %s] != new [%s, %s]' % (key, old_lat, old_lon, new_lat, new_lon))
for tag_key in old.tags:
if tag_key not in new.tags:
print('Diff %s, %s missing in new:' % (key, tag_key))
print(' old[%s] = %s\n' % (tag_key, old.tags[tag_key]))
for tag_key in new.tags:
if tag_key not in old.tags:
print('Diff %s, %s missing in old:' % (key, tag_key))
print(' new[%s] = %s\n' % (tag_key, new.tags[tag_key]))
common_tags = set(old.tags.keys()).intersection(new.tags.keys())
for tag_key in common_tags:
if tag_key in ('ssr:date', ):
continue # don't care
o, n = new.tags[tag_key], old.tags[tag_key]
if o != n:
print('Diff %s:\n old[%s] = %s\n new[%s] = %s\n' % (key, tag_key, o, tag_key, n))
|
python
|
# THIS FILE IS GENERATED FROM SIGPROFILEMATRIXGENERATOR SETUP.PY
short_version = '1.1.0'
version = '1.1.0'
|
python
|
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
class WarehouseSortEnv(MiniGridEnv):
"""
Environment with a door and key, sparse reward
"""
def __init__(self, size=8):
super().__init__(
grid_size=size,
max_steps=10*size*size
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal in the bottom-right corner
self.put_obj(Goal(), width - 2, height - 2)
# Create a vertical splitting wall
# splitIdx = self._rand_int(2, width-2)
# self.grid.vert_wall(splitIdx, 0)
# Place the agent at a random position and orientation
# on the left side of the splitting wall
self.place_agent(size=(width, height))
# Place a door in the wall
# doorIdx = self._rand_int(1, width-2)
# self.put_obj(Door('yellow', is_locked=True), splitIdx, doorIdx)
# Place a yellow key on the left side
self.package = Package('yellow')
self.place_obj(self.package)
# self.put_obj(
# obj=self.package,
# i=1,
# j=1,
# )
self.mission = "use the key to open the door and then get to the goal"
def step(self, action):
self.step_count += 1
reward = 0
done = False
# Get the position in front of the agent
fwd_pos = self.front_pos
# print(fwd_pos)
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
reward = -0.06
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
reward = -0.06
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell != None and fwd_cell.type == 'goal' and self.carrying:
done = True
reward = self._reward()
# if fwd_cell != None and fwd_cell.type == 'goal':
# done = True
# reward = self._reward()
if fwd_cell != None and fwd_cell.type == 'lava':
done = True
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
if self.step_count >= self.max_steps:
done = True
# Pickup the Package
if np.all(np.array(self.package.cur_pos) == self.agent_pos):
package_cell = self.grid.get(*self.agent_pos)
if self.carrying is None:
self.carrying = package_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*self.agent_pos, None)
obs = self.gen_obs()
return obs, reward, done, {}
class WarehouseSortEnv7x7(WarehouseSortEnv):
def __init__(self):
super().__init__(size=10)
register(
id='MiniGrid-WarehouseSort-7x7-v0',
entry_point='gym_minigrid.envs:WarehouseSortEnv7x7'
)
|
python
|
"""Plot road network
"""
import os
import cartopy.crs as ccrs
import geopandas
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from atra.utils import load_config, get_axes, plot_basemap, scale_bar, plot_basemap_labels, save_fig
def main(config):
"""Read shapes, plot map
"""
data_path = config['paths']['data']
# data
output_file = os.path.join(config['paths']['figures'], 'network-road-map.png')
road_edge_file_national = os.path.join(data_path, 'network', 'road_edges_national.shp')
road_edge_file_provincial = os.path.join(data_path, 'network', 'road_edges_provincial.shp')
# basemap
proj_lat_lon = ccrs.PlateCarree()
ax = get_axes()
plot_basemap(ax, data_path)
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, data_path, include_regions=False)
colors = {
'National': '#ba0f03',
'Provincial': '#e0881f'
}
# edges
edges_provincial = geopandas.read_file(road_edge_file_provincial)
ax.add_geometries(
list(edges_provincial.geometry),
crs=proj_lat_lon,
linewidth=1.25,
edgecolor=colors['Provincial'],
facecolor='none',
zorder=4
)
edges_national = geopandas.read_file(road_edge_file_national)
ax.add_geometries(
list(edges_national.geometry),
crs=proj_lat_lon,
linewidth=1.25,
edgecolor=colors['National'],
facecolor='none',
zorder=5
)
# legend
legend_handles = [
mpatches.Patch(color=color, label=label)
for label, color in colors.items()
]
plt.legend(handles=legend_handles, loc='lower left')
# save
save_fig(output_file)
if __name__ == '__main__':
CONFIG = load_config()
main(CONFIG)
|
python
|
import torch
import torch.nn as nn
from utils.util import count_parameters
class Embedding(nn.Module):
"""A conditional RNN decoder with attention."""
def __init__(self, input_size, emb_size, dropout=0.0, norm=False):
super(Embedding, self).__init__()
self.embedding = nn.Embedding(input_size, emb_size)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(emb_size)
self.norm = norm
def forward(self, x):
x = self.dropout(self.embedding(x))
if self.norm:
x = self.layer_norm(x)
return x
|
python
|
class FileReader(object):
def read(self, file):
with open(file) as f:
return f.read()
def read_lines(self, file):
lines = []
with open(file) as f:
for line in f:
lines.append(line)
return lines
|
python
|
from locust import HttpUser, task
from locust import User
import tensorflow as tf
from locust.contrib.fasthttp import FastHttpUser
def read_image(file_name, resize=True):
img = tf.io.read_file(filename=file_name)
img = tf.io.decode_image(img)
if resize:
img = tf.image.resize(img, [224, 224])
return img
# class QuickstartUser(HttpUser):
# # wait_time = between(1, 2.5)
# @task
# def tf_serving_test(self):
# data_path = '/home/Tiexin-RS/dataset/tr3_cropped/data/1.png'
# ori_data = read_image(data_path, False)
# ori_data = tf.cast(tf.reshape(ori_data, (-1, 1024, 1024, 3)), tf.float32)
# # ori_data = tf.random.uniform((1, 1024, 1024, 3),
# # minval=0,
# # maxval=255,
# # dtype=tf.float32)
# data = ori_data.numpy()
# payload = {"inputs": {'input_1': data.tolist()}}
# self.client.post("v1/models/deeplab_52_unfreeze:predict", json=payload)
class QuickstartUser(FastHttpUser):
# wait_time = between(1, 2.5)
def on_start(self):
data_path = '/home/Tiexin-RS/dataset/tr3_cropped/data/1.png'
ori_data = read_image(data_path, False)
ori_data = tf.cast(tf.reshape(ori_data, (-1, 1024, 1024, 3)),
tf.float32)
data = ori_data.numpy()
self.payload = {"inputs": {'input_1': data.tolist()}}
@task
def tf_serving_test(self):
self.client.request(method='POST',
path="v1/models/deeplab_52_unfreeze:predict",
json=self.payload)
|
python
|
# coding=utf-8
import unittest
import urllib2
import zipfile
import random
from tempfile import NamedTemporaryFile
from StringIO import StringIO
from . import EPUB
try:
import lxml.etree as ET
except ImportError:
import xml.etree.ElementTree as ET
class EpubTests(unittest.TestCase):
def setUp(self):
# get a small epub test file as a file-like object
self.epub2file = NamedTemporaryFile(delete=False)
test_file_content = urllib2.urlopen('http://www.hxa.name/articles/content/EpubGuide-hxa7241.epub')
self.epub2file.write(test_file_content.read())
self.epub2file.seek(0)
# get an epub with no guide element
self.epub2file2 = NamedTemporaryFile(delete=False)
test_file_content2 = urllib2.urlopen('http://www.gutenberg.org/ebooks/2701.epub.noimages')
self.epub2file2.write(test_file_content2.read())
self.epub2file2.seek(0)
def test_instantiation(self):
epub=EPUB(self.epub2file)
members = len(epub.namelist())
self.assertNotEqual(epub.filename, None)
self.assertEqual(len(epub.opf),4)
self.assertEqual(len(epub.opf[0]),11) #metadata items
self.assertEqual(len(epub.opf[1]),11) #manifest items
self.assertEqual(len(epub.opf[2]),8) #spine items
self.assertEqual(len(epub.opf[3]),3) #guide items
# test writing
new_epub=StringIO()
#epub.writetodisk("test_instantiation")
epub.writetodisk(new_epub)
epub=EPUB(new_epub)
self.assertEqual(len(epub.opf),4)
self.assertEqual(members,len(epub.namelist()))
self.assertTrue(zipfile.is_zipfile(new_epub))
def test_addpart(self):
epub=EPUB(self.epub2file,mode='a')
members = len(epub.namelist())
self.assertNotEqual(epub.filename, None)
part = StringIO('<?xml version="1.0" encoding="utf-8" standalone="yes"?>')
epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2)
self.assertEqual(len(epub.opf[2]),9) #spine items
# test writing
new_epub=StringIO()
epub.writetodisk(new_epub)
epub=EPUB(new_epub)
self.assertEqual(len(epub.opf[2]),9)
self.assertEqual(members+1,len(epub.namelist()))
#test delete
epub._delete("testpart.xhtml")
new_epub=StringIO()
epub.writetodisk(new_epub)
new_zip = zipfile.ZipFile(new_epub)
self.assertEqual(members,len(new_zip.namelist()))
self.assertTrue(zipfile.is_zipfile(new_epub))
def test_addpart_noguide(self):
epub2=EPUB(self.epub2file2,mode='a')
self.assertEqual(len(epub2.opf),3)
self.assertEqual(epub2.info['guide'],None)
num_spine_items = len(epub2.opf[2])
uxml = u'<?xml version="1.0" encoding="utf-8" standalone="yes"?><test>VojtěchVojtíšek</test>'
part = StringIO(unicode(uxml))
epub2.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2)
self.assertEqual(len(epub2.opf[2]), num_spine_items +1) #spine items
new_epub=StringIO()
epub2.writetodisk(new_epub)
epub2=EPUB(new_epub)
def test_addmetadata(self):
epub=EPUB(self.epub2file,mode='a')
members = len(epub.namelist())
epub.addmetadata('test', 'GOOD')
self.assertIn('<dc:test>GOOD<',ET.tostring(epub.opf, encoding="UTF-8"))
self.assertTrue(epub.opf.find('.//{http://purl.org/dc/elements/1.1/}test') is not None)
self.assertEqual(epub.info['metadata']['test'], 'GOOD')
# test writing
new_epub=StringIO()
epub.writetodisk(new_epub)
epub=EPUB(new_epub)
self.assertEqual(epub.info['metadata']['test'], 'GOOD')
new_zip = zipfile.ZipFile(new_epub)
self.assertEqual(members,len(new_zip.namelist()))
self.assertTrue(zipfile.is_zipfile(new_epub))
def test_new_epub(self):
f = '%012x.epub' % random.randrange(16**12) # random name
epub = EPUB(f, mode='w')
epub.addmetadata('test', 'GOOD')
uxml = u'<?xml version="1.0" encoding="utf-8" standalone="yes"?><test>VojtěchVojtíšek</test>'
part = StringIO(unicode(uxml))
epub.addpart(part, "testpart.xhtml", "application/xhtml+xml", 2)
epub.close()
epub = EPUB(f, mode='r')
self.assertEqual(len(epub.opf), 4) # opf lenght
self.assertEqual(len(epub.opf[0]), 6) # metadata
self.assertEqual(len(epub.opf[1]), 2) # manifest
self.assertEqual(len(epub.opf[2]), 1) # spine
self.assertEqual(len(epub.opf[3]), 0) # guide
|
python
|
"""Collection of Object."""
import sqlite3
class Connection(sqlite3.Connection):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.execute('pragma foreign_keys=1')
class CustomCommand:
"""Object for custom command."""
__slots__ = (
"id",
"type",
"name",
"invokedName",
"brief",
"description",
"help",
"category",
"content",
"aliases",
"url",
"uses",
)
def __init__(self, id, name, category, **kwargs):
self.id = id
# NOTE: Can be 'text' or 'imported'
# - text: using text and not imported from pastebin/gist
# - imported: imported from pastebin/gist
self.type = kwargs.pop("type", "text")
# Will always return None unless type == 'imported'
self.url = kwargs.pop("url", None)
self.name = name
# Incase its invoked using its alias
self.invokedName = kwargs.pop("invokedName", name)
# TODO: Add "brief"
self.brief = None
self.description = kwargs.pop("description", None)
self.help = self.description
self.content = kwargs.pop("content", "NULL")
self.category = category
self.aliases = kwargs.pop("aliases", [])
self.uses = kwargs.pop("uses", -1)
def __str__(self):
return self.name
|
python
|
import networkx as nx
import numpy as np
import sys
from scipy.io import mmread
from scipy.sparse import coo_matrix
np.set_printoptions(threshold=sys.maxsize)
if len(sys.argv) != 2:
print("Usage: python3 ./hits.py <file.mtx>")
exit()
graph_coo = mmread(sys.argv[1])
print("Loading COO matrix")
print(graph_coo.nnz, " edges")
graph_nx = nx.DiGraph(graph_coo)
print("Creating NetworkX Graph")
print("NetworkX is Directed: ", nx.is_directed(graph_nx))
print("NetworkX Graph has ", graph_nx.number_of_edges(), " edges")
max_iter = 10000
tol = 1e-6
hubs_nx, auths_nx = nx.hits(graph_nx, max_iter, tol, normalized=True)
# Numpy implementation
hrank = np.zeros((graph_coo.shape[0], 1))
arank = np.zeros((graph_coo.shape[0], 1))
hrank += 1/graph_coo.shape[0]
arank += 1/graph_coo.shape[0]
for _ in range(0, max_iter):
hlast = hrank
alast = arank
hrank = np.zeros((graph_coo.shape[0], 1))
arank = np.zeros((graph_coo.shape[0], 1))
for edge in range(0, graph_coo.nnz):
src = int(graph_coo.row[edge])
dest = int(graph_coo.col[edge])
arank[dest] += hlast[src]
hrank[src] += alast[dest]
# Normalize
hrank = hrank / np.max(hrank)
arank = arank / np.max(arank)
err = np.sum(np.absolute(hrank-hlast))
if err < tol:
break
hrank = hrank / np.linalg.norm(hrank, ord=1)
arank = arank / np.linalg.norm(arank, ord=1)
hubs_np = {}
auths_np = {}
for i in range(0, graph_coo.shape[0]):
hubs_np[i] = hrank[i]
auths_np[i] = arank[i]
print("Hubs: ")
for key, val in sorted(hubs_nx.items(), key=lambda x: x[1], reverse=True):
print(key, val, hubs_nx[key])
print("Authorities: ")
for key, val in sorted(auths_nx.items(), key=lambda x: x[1], reverse=True):
print(key, val, auths_nx[key])
|
python
|
import pickle
from typing import Any, Union
from datetime import datetime
class DataStorage:
_DataStorageObj = None
def __new__(cls, *args, **kwargs):
if cls._DataStorageObj is None:
cls._DataStorageObj = super().__new__(cls)
return cls._DataStorageObj
def __init__(self):
self.path = r"word_dump"
self._catalogue = []
self._data: dict[str, Any] = {}
self._setup()
def _setup(self):
self._data = self._data_storage_handler()
if self._data['creation'] != datetime.today().strftime('%Y-%m-%d'):
self._data.clear()
self._data['creation'] = datetime.today().strftime('%Y-%m-%d')
self._data_storage_handler(self._data)
def _data_storage_handler(self, obj_to_store=None) -> Union[dict[str, Any], None]:
if obj_to_store is not None:
with open(self.path, "wb") as handle:
pickle.dump(obj_to_store, handle,
protocol=pickle.HIGHEST_PROTOCOL)
else:
try:
with open(self.path, 'rb') as handle:
return pickle.load(handle)
except FileNotFoundError:
create_file = {"creation": datetime.today().strftime('%Y-%m-%d')}
self._data_storage_handler(create_file)
return create_file
def store_object(self, code: int, words):
self._data[code] = words
self._data_storage_handler(self._data)
def load_object(self, code: int):
yield from self._data.setdefault(code, None)
def remove(self, *names):
for name in names:
del self._data[name]
def __getitem__(self, key):
return self._data[key]
def saved(self, item):
return item in self._data
def __setitem__(self, key, value):
self._data[key] = value
def __contains__(self, item):
return item in self._data
|
python
|
class discord:
Colour = None
class datetime:
datetime = None
|
python
|
import math
import os
def activator(data, train_x, sigma): #data = [p, q] #train_x = [3, 5]
distance = 0
for i in range(len(data)): #0 -> 1
distance += math.pow(data[i] - train_x[i], 2) # 計算 D() 函式
return math.exp(- distance / (math.pow(sigma, 2))) # 最後返回 W() 函式
def grnn(data, train_x, train_y, sigma): #data = [p, q] #train_x = [[3, 5], [3, 11], ...]
result = []
out_dim = len(train_y[0]) # 檢查 train_y 的維度
for dim in range(out_dim):
factor, divide = 0, 0
for i in range(len(train_x)): #0 -> 13
cache = activator(data, train_x[i], sigma) # cache 儲存 W() 函式
factor += train_y[i][dim] * cache # train_y * W() 函式累加,公式中的分子
divide += cache # W() 函式累加,公式中的分母
result.append(factor / divide) # 最終的預測值[list]
# print("grnn finish !")
return result # 返回預測值[list],result = [y*]
# def activator_n(data, train_x, sigma): #data = [p, q] #train_x = [3, 5]
# distance = 0
# mu = 15.0
# for i in range(len(data)): #0 -> 1
# distance += math.pow(data[i] - train_x[i], 2) # 計算 D() 函式
# # e = math.exp(-distance/(2 * (sigma ** 2)))
# e = math.exp(-((math.sqrt(distance) - mu) ** 2)/(2 * (sigma ** 2)))
# return e * (1 / (sigma * math.sqrt(2 * math.pi))) # 最後返回 W() 函式
# def grnn(data, train_x, train_y, sigma): #data = [p, q] #train_x = [[3, 5], [3, 11], ...]
# result = []
# out_dim = len(train_y[0]) # 檢查 train_y 的維度
# for dim in range(out_dim):
# factor, divide = 0, 0
# for i in range(len(train_x)): #0 -> 13
# # cache = activator(data, train_x[i], sigma) # cache 儲存 W() 函式
# cache_n = activator_n(data, train_x[i], sigma)
# # print("W(x) = ", cache, " f(x) = ", cache_n)
# # os.system("pause")
# factor += train_y[i][dim] * cache_n # train_y * W() 函式累加,公式中的分子
# divide += cache_n # W() 函式累加,公式中的分母
# result.append(factor / divide) # 最終的預測值[list]
# # print("grnn finish !")
# return result # 返回預測值[list],result = [y*]
|
python
|
# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,line-too-long
from unittest import mock
import os
import pytest
from eze.plugins.tools.checkmarx_kics import KicsTool
from eze.utils.io import create_tempfile_path
from tests.plugins.tools.tool_helper import ToolMetaTestBase
class TestKicsTool(ToolMetaTestBase):
ToolMetaClass = KicsTool
SNAPSHOT_PREFIX = "kics"
def test_creation__no_config(self):
# Given
input_config = {}
expected_config = {
"SOURCE": ".",
"CONFIG_FILE": None,
"REPORT_FILE": create_tempfile_path("tmp-kics-report.json"),
"REPORT_PATH": os.path.dirname(create_tempfile_path("tmp-kics-report.json")),
"REPORT_FILENAME": "tmp-kics-report.json",
"INCLUDE_FULL_REASON": True,
#
"ADDITIONAL_ARGUMENTS": "",
"IGNORED_FILES": None,
"EXCLUDE": [],
"IGNORED_VULNERABILITIES": None,
"IGNORE_BELOW_SEVERITY": None,
"DEFAULT_SEVERITY": None,
}
# When
testee = KicsTool(input_config)
# Then
assert testee.config == expected_config
def test_creation__with_config(self):
# Given
input_config = {
"SOURCE": "eze",
"ADDITIONAL_ARGUMENTS": "--something foo",
"REPORT_FILE": "C:/Users/User1/temp-kics-file.json",
"CONFIG_FILE": None,
"INCLUDE_FULL_REASON": True,
}
expected_config = {
"SOURCE": "eze",
"REPORT_PATH": "C:/Users/User1",
"REPORT_FILENAME": "temp-kics-file.json",
"REPORT_FILE": "C:/Users/User1/temp-kics-file.json",
"CONFIG_FILE": None,
"INCLUDE_FULL_REASON": True,
#
"ADDITIONAL_ARGUMENTS": "--something foo",
"IGNORED_FILES": None,
"EXCLUDE": [],
"IGNORED_VULNERABILITIES": None,
"IGNORE_BELOW_SEVERITY": None,
"DEFAULT_SEVERITY": None,
}
# When
testee = KicsTool(input_config)
# Then
assert testee.config == expected_config
@mock.patch("eze.plugins.tools.checkmarx_kics.extract_cmd_version", mock.MagicMock(return_value="""1.4.4"""))
def test_check_installed__success(self):
# When
expected_output = "1.4.4"
output = KicsTool.check_installed()
# Then
assert output == expected_output
def test_parse_report__snapshot(self, snapshot):
# Test container fixture and snapshot
self.assert_parse_report_snapshot_test(snapshot)
@mock.patch("eze.utils.cli.async_subprocess_run")
@mock.patch("eze.utils.cli.is_windows_os", mock.MagicMock(return_value=True))
@pytest.mark.asyncio
async def test_run_scan__cli_command__multi_value_flag_exclude_and_no_folder_path_given(
self, mock_async_subprocess_run
):
# Given
input_config = {
"SOURCE": "eze",
"REPORT_FILE": "tmp-kics-report.json",
"EXCLUDE": [
"PATH-TO-EXCLUDED-FOLDER/.*",
"PATH-TO-NESTED-FOLDER/SOME_NESTING/.*",
"PATH-TO-EXCLUDED-FILE.js",
],
}
expected_cmd = 'kics scan -s -p eze --output-path . --output-name tmp-kics-report.json -e= "PATH-TO-EXCLUDED-FOLDER/.*" "PATH-TO-NESTED-FOLDER/SOME_NESTING/.*" PATH-TO-EXCLUDED-FILE.js'
# Test run calls correct program
await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
@mock.patch("eze.utils.cli.async_subprocess_run")
@pytest.mark.asyncio
async def test_run_scan__cli_command_with_multi_sources_and_report(self, mock_async_subprocess_run):
# Given
input_config = {
"SOURCE": "Dockerfile,azure-pipelines.yml",
"REPORT_FILE": "C:/Users/User1/tmp-kics-report.json",
}
expected_cmd = "kics scan -s -p Dockerfile,azure-pipelines.yml --output-path C:/Users/User1 --output-name tmp-kics-report.json"
# Test run calls correct program
await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
|
python
|
"""Simple templating engine. See `TemplateEngine` class."""
import os
import re
import inspect
__all__ = ['TemplateEngine', 'TemplateSyntaxError', 'annotate_block']
class TemplateEngine:
"""Simple templating engine.
WARNING: do NOT use this engine with templates from untrusted sources.
Expressions in the template file are passed to `eval()`, and can therefore
call any Python function.
This engine supports:
- inline replacement through `$<expr>$`;
- block replacement with template-controlled indentation through
`\n$<indent><name>\n`;
- blocks can be defined both programmatically and from within the template
through `\n$block <name>\n` and `\n$endblock\n`;
- conditional blocks through `\n$if <expr>\n`, `\n$else\n`, and
`\n$endif\n`.
Note that `<indent>` is one space less than the actual indent to make the
block name line up with where the block should be. If no indent is
specified, no indent is added, so it's not currently possible to output
blocks indented by a single space. A dollar sign can be inserted into the
output by writing `$$`. To indent the template itself for readability with
lots of nesting, any number of spaces followed by a | sign will be removed
before any other processing (when a line in the output needs to start with
a |, just add a second | in front).
Additionally, some pretty-printing is supported through @ characters:
- Inline @ signs are replaced with spaces or newlines based on line length
in a way that preserves indentation. An additional four-space indent is
added when auto-wrapping.
- Double inline @ signs escape this; they are changed into a single @
sign.
- A single @ sign at the start of a line, usually followed by a space,
indicates that the line is a comment. The appropriate comment character
sequence will be prefixed when the comment is inserted. The content is
interpreted as markdown text; heuristics are used to try to rewrap the
text to the appropriate line width. @ signs on these lines are NOT
interpreted as spacing (they are literal), since this would have no
effect anyway.
- A double @ sign at the start of a line is replaced by the appropriate
comment sequence, but otherwise the line is treated the same way as
normal code. That is, wrapping points have to be specified explicitly
using @ symbols, and @@ is escaped to @.
- Three @ signs at the start of a line are replaced with a single @ sign
in the output. The line is treated as regular code.
The formatting step can be disabled, allowing the output of the template
engine to be used as a block within a subsequent engine.
Unlike the C preprocessor, line numbers are NOT preserved. The focus is on
generating well-formatted, readable code.
To use it, first define inline replacements, conditions, or add to blocks.
Then `apply_*` the engine on files or strings."""
def __init__(self):
super().__init__()
self._variables = {}
self._blocks = {}
def __setitem__(self, key, value):
"""Defines a variable within the expression engine."""
self._variables[str(key)] = value
def __getitem__(self, key):
"""Returns the current value of a variable within the expression
engine."""
return self._variables[str(key)]
def __delitem__(self, key):
"""Undefines a variable within the expression engine."""
del self._variables[str(key)]
def __iter__(self):
"""Iterates over the variables defined within the expression engine."""
return iter(self._variables)
def passthrough(self, *names):
"""Pass expansion of the given variable names on to the next template
by assigning them to `$<name>$`."""
for name in names:
self[name] = '$%s$' % name
def _get_scope(self):
"""Returns the dictionary of variables that should be available for
eval()-based directives."""
variables = self._variables.copy()
variables['defined'] = lambda x: bool(self._blocks.get(x, []))
variables['re'] = re
return variables
def append_block(self, key, code, *args):
"""Add a block of code to the given key.
`code` must be a string or a list of strings, the latter case being
equivalent with passing `'\n'.join(code)`. Regardless of the number of
terminating newlines, the spacing between consecutive blocks is always
a single empty line."""
# Preprocess the arguments to allow for different calling conventions.
if isinstance(code, list):
code = '\n'.join(code)
if args:
code += '\n' + '\n'.join(args)
# Blocks can contain directives and are internally stored as directive
# lists. So split the code into directives now.
directives = self._split_directives(code)
# Save the block.
key = str(key)
if key not in self._blocks:
self._blocks[key] = []
self._blocks[key].append(directives)
def reset_block(self, key):
"""Removes all code blocks associated with the given key."""
key = str(key)
if key in self._blocks:
del self._blocks[key]
def apply_file_to_file(self, template_filename, output_filename, *args, **kwargs):
"""Applies this template engine to the given template file, writing the
result to the given output file. Extra arguments are passed to
`apply_str_to_str()` and are documented there."""
output = self.apply_file_to_str(template_filename, *args, **kwargs)
with open(output_filename, 'w') as output_file:
output_file.write(output)
def apply_str_to_file(self, template, output_filename, *args, **kwargs):
"""Applies this template engine to the given template string, writing the
result to the given output file. Extra arguments are passed to
`apply_str_to_str()` and are documented there."""
output = self.apply_str_to_str(template, *args, **kwargs)
with open(output_filename, 'w') as output_file:
output_file.write(output)
def apply_file_to_str(self, template_filename, *args, **kwargs):
"""Applies this template engine to the given template file, returning
the result as a string. Extra arguments are passed to
`apply_str_to_str()` and are documented there."""
with open(template_filename, 'r') as template_file:
template = annotate_block(
template_file.read(),
template_filename,
kwargs.get('comment', '#').strip())
try:
return self.apply_str_to_str(template, *args, **kwargs)
except TemplateSyntaxError as exc:
exc.set_filename(template_filename)
raise
def apply_str_to_str(self, template, comment='# ', wrap=80,
postprocess=True, annotate=False):
"""Applies this template engine to the given template string, returning
the result as a string. The `comment` keyword argument specifies the
character sequence that leads comment lines; it defaults to '# ' for
Python files. The `wrap` keyword argument specifies the desired number
of characters per line when wrapping; it defaults to 80. The
`postprocess` keyword argument can be set to `False` to disable
post-processing altogether; use this when the output of this templating
step will be used within a later templating step."""
# If the template is specified as a list of strings, join them first.
if isinstance(template, list):
template = '\n'.join(template)
# Remove any template indentation, which is separated from output
# indentation through pipe symbols.
template = re.sub(r'\n *\|', '\n', template)
# Split the template file into a list of alternating literals and
# directives.
directives = self._split_directives(template)
# Handle $ directives.
markers = self._process_directives(directives)
output = self._process_markers(markers)
# Process @ directives to clean up the output.
if postprocess:
output = self._process_wrapping(output, comment, wrap, annotate)
return output
@staticmethod
def _split_directives(template):
"""Splits a template string into directives. The resulting list contains an
odd amount of items, where every even-indexed item is a literal string and
every odd-indexed item is a two-tuple of a line number and a directive.
Inline directives include the surrounding dollar signs. Non-inline
directives include the dollar prefix and newline suffix, while the newline
before the directive is considered part of the preceding literal."""
# Split the directive using regular expressions. A newline is prefixed and
# suffixed to ensure that the newlines matched by block directives at the
# start and end of the input are always there. The prefixed newline is
# stripped immediately; the final newline is stripped when we finish
# parsing when the template engine ensures that all files end in a single
# newline.
directives = re.split(r'(\$[^$\n]*\$|(?<=\n)\$[^\n]+\n)', '\n' + template + '\n')
directives[0] = directives[0][1:]
# Insert line number information.
line_number = 1
directive_line_number = 1
directive_source = None
for idx, item in enumerate(directives):
if directive_source is None:
directive_line_number = line_number
line_number += item.count('\n')
if idx % 2 == 1:
directive = item
directives[idx] = ((directive_source, directive_line_number), directive)
else:
source = re.findall(r'@![v\^]->[^\n]+\n', item)
if not source:
continue
source = source[-1]
if source.startswith('@!^->'):
directive_source = None
elif source.startswith('@!v->source='):
directive_source, directive_line_number = source[12:].rsplit(':', maxsplit=1)
directive_line_number = int(directive_line_number)
else:
assert False
return directives
def _process_directives(self, directives, block_recursion_limit=100): #pylint: disable=R0912,R0914,R0915
"""Process a directive list as returned by `_split_directives()` into a
list of literals and markers. Literals and markers are distinguished by
type: literals are strings, markers are N-tuples. The first entry of a
marker tuple is a string that identifies what it represents.
Currently the only marker is 'indent'. It's a two-tuple; the second
entry is an integer representing an indentation delta (number of
spaces). This indentation needs to be applied to subsequent literals."""
# Make a copy of the directive list so we can consume it one entry at a
# time without affecting the argument.
directive_stack = list(directives)
# Conditional code block stack. For code to be handled, all entries in
# this list must be True (or there must be zero entries). Each $if
# directive appends its condition to the list, $else directives invert
# the last one, and $endif directives remove from the list.
condition_stack = []
# Line number of the outermost $if statement, used for line number info
# when we're missing an $endif.
outer_if_line_nr = None
# Block definition buffer.
block_buffer = None
block_key = None
# Number of recursive $block definitions.
block_level = 0
# Block definitions.
block_definitions = {}
# Number of recursive block insertions.
block_recursion = 0
# Line number of the outermost $block statement, used for line number
# info when we're missing an $endblock.
outer_block_line_nr = None
# Output buffer.
output_buffer = []
# Iterate over all the directives and literals.
while directive_stack:
directive_or_literal = directive_stack.pop(0)
# Handle literals first.
if isinstance(directive_or_literal, str):
literal = directive_or_literal
# If we're in the middle of a block definition, save the
# literal to the block buffer.
if block_buffer is not None:
block_buffer.append(literal)
continue
# Delete literals that have been conditioned away.
if not all(condition_stack):
continue
# Output the literal.
output_buffer.append(literal)
continue
# Unpack the directive.
directive_tuple = directive_or_literal
line_nr, directive = directive_tuple
# Handle markers inserted into the stack by this function.
if line_nr is None:
marker = directive
if marker[0] == 'end_block':
block_recursion -= 1
else:
output_buffer.append(marker)
continue
# Parse/simplify the directive syntax.
if directive.endswith('$'):
indent = 0
directive = directive[1:-1]
argument = None
else:
matches = re.match(r'\$( *)([^ ]*)(?: (.*))?$', directive)
indent = len(matches.group(1))
if indent:
indent += 1
directive = '$' + matches.group(2).rstrip()
argument = matches.group(3)
# Handle $block directive.
if directive == '$block':
if not argument:
raise TemplateSyntaxError(
line_nr, '$block without key')
block_level += 1
if block_level == 1:
block_buffer = []
block_key = argument
outer_block_line_nr = line_nr
continue
# Don't continue here; save nested $block directives to the
# buffer!
# Handle $endblock directive.
if directive == '$endblock':
if argument:
raise TemplateSyntaxError(
line_nr, 'unexpected argument for $endblock')
if block_level == 0:
raise TemplateSyntaxError(
line_nr, '$endblock without $block')
block_level -= 1
if block_level == 0:
if block_key not in block_definitions:
block_definitions[block_key] = []
block_definitions[block_key].append(block_buffer)
block_key = None
block_buffer = None
continue
# Don't continue here; save nested $endblock directives to the
# buffer!
# If we're in the middle of a block definition, don't process
# directives yet.
if block_buffer is not None:
block_buffer.append(directive_tuple)
continue
# Handle $if directive.
if directive == '$if':
if not argument:
raise TemplateSyntaxError(
line_nr, '$if without expression')
if not condition_stack:
outer_if_line_nr = line_nr
if not all(condition_stack):
# Don't try to evaluate the condition if we're already
# conditioned away.
condition = False
else:
try:
condition = bool(eval(argument, self._get_scope())) #pylint: disable=W0123
except (NameError, ValueError, TypeError, SyntaxError) as exc:
raise TemplateSyntaxError(
line_nr, 'error in $if expression: {}'.format(exc))
condition_stack.append(condition)
continue
# Handle $else directive.
if directive == '$else':
if argument:
raise TemplateSyntaxError(
line_nr, 'unexpected argument for $else')
if not condition_stack:
raise TemplateSyntaxError(
line_nr, '$else without $if')
condition_stack[-1] = not condition_stack[-1]
continue
# Handle $endif directive.
if directive == '$endif':
if argument:
raise TemplateSyntaxError(
line_nr, 'unexpected argument for $endif')
if not condition_stack:
raise TemplateSyntaxError(
line_nr, '$endif without $if')
del condition_stack[-1]
continue
# Don't process directives further if we're inside a false conditional
# block.
if not all(condition_stack):
continue
# Handle dollar escape sequences.
if directive == '':
output_buffer.append('$')
continue
# Handle inline directives.
if not directive.startswith('$'):
try:
result = str(eval(directive, self._get_scope())) #pylint: disable=W0123
except (NameError, ValueError, TypeError, SyntaxError) as exc:
raise TemplateSyntaxError(
line_nr, 'error in inline expression: {}'.format(exc))
output_buffer.append(result)
continue
# Handle block insertions.
if directive.startswith('$') and not argument:
block_recursion += 1
if block_recursion > block_recursion_limit:
raise TemplateSyntaxError(
line_nr, 'block recursion limit reached ({})'.format(block_recursion_limit))
key = directive[1:]
# Get the blocks associated with the given key, if any.
blocks = self._blocks.get(key, [])
blocks.extend(block_definitions.get(key, []))
# Flatten the directive lists.
directives = [(None, ('indent', indent))]
for block_directives in blocks:
directives.extend(block_directives)
directives.append('\n\n')
directives.append((None, ('indent', -indent)))
directives.append((None, ('end_block',)))
# Insert the directives at the start of our directive stack.
directive_stack[0:0] = directives
continue
# Unknown directive.
raise TemplateSyntaxError(
line_nr, 'unknown directive: {}'.format(directive))
# Raise errors when we have unterminated blocks.
if condition_stack:
raise TemplateSyntaxError(
outer_if_line_nr, '$if without $endif')
if block_buffer is not None:
raise TemplateSyntaxError(
outer_block_line_nr, '$block without $endblock')
return output_buffer
@staticmethod
def _process_markers(markers):
"""Processes a list of literals and markers as returned by
`_process_directives()` into a single string representing the source
code."""
# Join all consecutive literals together, then split them into lines.
# That allows us to prefix indentation properly.
marker_buffer = [[]]
for marker_or_literal in markers:
if isinstance(marker_or_literal, tuple):
marker_buffer[-1] = ''.join(marker_buffer[-1]).split('\n')
marker_buffer.append(marker_or_literal)
marker_buffer.append([])
else:
marker_buffer[-1].append(marker_or_literal)
marker_buffer[-1] = ''.join(marker_buffer[-1]).split('\n')
# Current number of spaces to indent by.
indent = 0
# Buffer to output processed literals to.
output_buffer = []
# State variables used to collapse empty lines and annotations.
empty_line = False
source_annotation = None
for marker_or_literals in marker_buffer:
# Handle markers.
if isinstance(marker_or_literals, tuple):
marker = marker_or_literals
if marker[0] == 'indent':
indent += marker[1]
continue
raise AssertionError('unknown marker: {}'.format(indent))
# Handle blocks of literals. We process indentation markers and
# collapse multiple newlines and source markers into one to
# (hopefully) improve readability.
for literal in marker_or_literals:
literal = literal.rstrip()
if not literal:
empty_line = True
elif literal.startswith('@!'):
source_annotation = literal
else:
if output_buffer and empty_line:
output_buffer.append('')
if source_annotation is not None:
output_buffer.append(source_annotation)
output_buffer.append(' ' * indent + literal)
empty_line = False
source_annotation = None
# Make sure we output the source termination marker at the end, if any.
if source_annotation and source_annotation.startswith('!@^->'):
output_buffer.append(source_annotation)
return '\n'.join(output_buffer)
def _process_wrapping(self, text, comment, wrap, annotate): #pylint disable=R0912
"""Post-processes code by handling comment and wrapping markers."""
output_lines = []
# Since multiple subsequent lines of commented text should be
# interpreted as a single paragraph before they're wrapped, we need to
# postpone this wrapping until we encounter a line that doesn't belong
# to the current paragraph. `paragraph_buffer` maintains a list of
# words within the current paragraph, while `paragraph_buffer_indent`
# contains the indentation characters of the first line of the
# paragraph, where indentation characters means any set of spaces,
# dashes, and asterisks. For subsequent lines to belong to the same
# paragraph, they must have the same indentation, except using only
# spaces. Those rules make markdown-styled lists parse correctly.
paragraph_buffer = None
paragraph_buffer_leading = None
paragraph_buffer_hanging = None
# List of source annotations that have not been written yet.
annotations = []
for line in text.split('\n'):
# Strip trailing spaces.
line = line.rstrip()
# Add indentation in the input block to the output indent.
match = re.match(r'( *)(.*)$', line)
indent = match.group(1)
line = match.group(2)
# Detect the type of input line (normal code, text comment, or code
# comment).
line_is_text = False
if line.startswith('@@@'):
# Escape sequence for @ at start of line in code. Just strip
# the first at to turn it into an inline escape.
line = line[1:]
elif line.startswith('@@'):
# Code comment.
indent += comment
# Strip the '@@' sequence.
line = line[2:]
elif line.startswith('@!'):
# Source annotation.
if annotate:
annotations.append(comment.strip() + line[2:])
continue
elif line.startswith('@'):
# Text comment.
indent += comment
line_is_text = True
# Strip the '@' or '@ ' sequence.
if line.startswith('@ '):
line = line[2:]
else:
line = line[1:]
# If this is a comment line, figure out its indentation to
# determine whether it's a continuation of the previous comment
# paragraph, if any. If it is, or it starts a new block, buffer it
# until we get a line that isn't a continuation of it.
if line_is_text:
# Output source annotations before processing the comment.
output_lines.extend(annotations)
annotations = []
match = re.match(r'([-* ]*)(.*)$', line)
comment_indent = match.group(1)
line = match.group(2)
if paragraph_buffer is not None:
if line and indent + comment_indent == paragraph_buffer_hanging:
# Continuation of that paragraph.
paragraph_buffer.extend(line.split())
continue
# Not a continuation of the buffered paragraph. Output the
# current buffer so we can start a new one.
output_lines.extend(self._wrap(
paragraph_buffer_leading,
paragraph_buffer_hanging,
paragraph_buffer,
wrap))
paragraph_buffer = None
if line:
# Start a new paragraph.
paragraph_buffer = line.split()
paragraph_buffer_leading = indent + comment_indent
paragraph_buffer_hanging = indent + ' '*len(comment_indent)
else:
# Output empty lines immediately to maintain them. They'd
# be lost if we'd stick them in the paragraph buffer.
output_lines.append((indent + comment_indent).rstrip())
continue
# The current line is not commented text, so we need to write and
# invalidate the current paragraph buffer, if any, before we can
# continue.
if paragraph_buffer is not None:
output_lines.extend(self._wrap(
paragraph_buffer_leading,
paragraph_buffer_hanging,
paragraph_buffer,
wrap))
paragraph_buffer = None
# Output annotations after dumping the comment paragraph buffer,
# but before outputting the statement.
output_lines.extend(annotations)
annotations = []
# Split the text into tokens split by single at signs. Also
# handle escaping, which admittedly is a little awkward right now
# with the double replacing.
line = line.replace('@@', '@_')
line = re.split(r'\@(?!_)', line)
line = (token.replace('@_', '@') for token in line)
# Wrap the text.
output_lines.extend(self._wrap(
indent,
indent + ' ',
line,
wrap))
# If we were still buffering a paragraph of commented text, output it
# now.
if paragraph_buffer is not None:
output_lines.extend(self._wrap(
paragraph_buffer_leading,
paragraph_buffer_hanging,
paragraph_buffer,
wrap))
# Join the lines together and ensure that the file ends in a single
# newline.
return '\n'.join(output_lines).rstrip() + '\n'
@staticmethod
def _wrap(leading_indent, hanging_indent, tokens, wrap):
"""Wraps tokenized text.
`tokens` is a list of non-breakable strings representing the line or
paragraph that is to be wrapped. The first line is prefixed with
`leading_indent`, subsequent lines are prefixed with `hanging_indent`.
`wrap` specifies the maximum desired number of characters on a single
line."""
line = leading_indent
first = True
for token in tokens:
# The first token gets some special treatment here.
if first:
line += token
first = False
continue
if len(line) + len(token) + 1 > wrap:
# Too long, need to wrap: yield the previous line and start a
# new one.
yield line.rstrip()
line = hanging_indent + token
else:
# No overflow, add to current line.
line += ' ' + token
# If we saw at least one token, yield the final line.
if not first:
yield line.rstrip()
class TemplateSyntaxError(Exception):
"""Template syntax error class. Contains line number and source file
information."""
def __init__(self, source, message, filename=None):
super().__init__(message)
if isinstance(source, int):
self._filename = filename
self._line_nr = source
else:
self._filename, self._line_nr = source
self._message = message
def set_filename(self, filename):
"""Sets the filename associated with this syntax error."""
if self._filename is None:
self._filename = filename
def __str__(self):
filename = self._filename
if filename is None:
filename = '<unknown>'
return 'on {} line {}: {}'.format(filename, self._line_nr, self._message)
def annotate_block(template, fname=None, comment='#'):
"""Annotates template source file + line number to every non-directive line
of the given template. If `fname` is `None`, the filename and line number
offset is taken from the caller of this function."""
comment = comment.strip()
if fname is None:
previous_frame = inspect.currentframe().f_back
(fname, offset, _, _, _) = inspect.getframeinfo(previous_frame)
# inspect returns the last line of a statement. We assume that blocks
# are defined as a """ multiline string, so we need to subtract the
# number of newlines in the block.
offset -= template.count('\n')
else:
offset = 1
template = template.split('\n')
annotated = []
for line_no, line in enumerate(template):
annotated.append('@!v->source=%s:%s' % (fname, line_no + offset))
annotated.append(line)
annotated.append('@!^->end')
return '\n'.join(annotated)
def preload_template(fname, comment='#'):
"""Preloads a template from a file relative to the calling Python file."""
comment = comment.strip()
if not os.path.isabs(fname):
previous_frame = inspect.currentframe().f_back
caller_fname, _, _, _, _ = inspect.getframeinfo(previous_frame)
fname = os.path.dirname(caller_fname) + os.sep + fname
with open(fname, 'r') as fil:
template = fil.read()
return annotate_block(template, fname, comment)
|
python
|
from rdkit import Chem
from rdkit.ML.Descriptors import MoleculeDescriptors
from rdkit.Chem import Descriptors
from padelpy import from_smiles
import re
import time
nms=[x[0] for x in Descriptors._descList]
print('\n')
calc = MoleculeDescriptors.MolecularDescriptorCalculator(nms)
f=open('/scratch/woon/b3lyp_2017/datasmile2.txt')
for i, line in enumerate(f):
mydes=[]
mylist=[]
line=line.split(',')
number=str(line[0])
print(number)
line=line[1]
m = Chem.MolFromSmiles(line)
try:
time.sleep(1)
des= from_smiles(line,fingerprints=True,timeout=180)
des=str(des).split(',')
for ii in range(len(des)):
b=des[ii].split(',')
b=des[ii].strip('[').strip(']')
b=re.sub('[^.,a-zA-Z0-9 \n\.]', '', b)
b=b.replace('[',' ')
b=b.replace(']',' ')
b=b.strip()
b=b.split(' ')
mylist.append(b[0])
try:
b=b[1]
except:
b=''
if bool(b)==True:
mydes.append(float(b))
if bool(b)==False:
mydes.append('NA')
# print(len(mylist))
a=calc.CalcDescriptors(m)
a=str(a)
a=a.replace('(', '')
a=a.replace(')', '')
line=str(line)
towrite=str(number+','+line.strip(' ').strip('\n')+','+a+','+str(mydes))
with open('/scratch/woon/b3lyp_2017/book4.csv', 'a') as mydata:
mydata.write(towrite+'\n')
except:
time.sleep(3)
|
python
|
from torch import randn
from torch.nn import Linear
from backpack import extend
def data_linear(device="cpu"):
N, D1, D2 = 100, 64, 256
X = randn(N, D1, requires_grad=True, device=device)
linear = extend(Linear(D1, D2).to(device=device))
out = linear(X)
vin = randn(N, D2, device=device)
vout = randn(N, D1, device=device)
return {
"X": X,
"module": linear,
"output": out,
"vout_ag": vout,
"vout_bp": vout.unsqueeze(2),
"vin_ag": vin,
"vin_bp": vin.unsqueeze(2),
}
|
python
|
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import yaml
logger = logging.getLogger(__name__)
def recursive_update(original_dict: dict, new_dict: dict) -> dict:
"""Recursively update original_dict with new_dict"""
for new_key, new_value in new_dict.items():
if isinstance(new_value, dict):
original_dict[new_key] = recursive_update(
original_dict.get(new_key, {}), new_value
)
else:
original_dict[new_key] = new_value
return original_dict
class Password:
def __init__(self, password: str) -> None:
self.password = password or ""
def __repr__(self) -> str:
return "*" * len(self.password)
def get(self) -> str:
return self.password
def __bool__(self):
return bool(self.password)
# Configurations are loaded from the defaults of the package and eventually a local config.yaml file
config_files = [
Path(__file__).parent / "resources" / "default_config.yaml",
Path("config.yaml"),
]
config = {}
for config_file in config_files:
if config_file.exists():
new_config = yaml.safe_load(config_file.read_text())
if isinstance(new_config, dict):
config = recursive_update(config, new_config)
config["backup-dir"] = Path(config["backup-dir"]).absolute()
config["password"] = Password(config.get("password"))
|
python
|
# Generated by Django 3.2.6 on 2021-11-29 00:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kube', '0003_auto_20210917_0032'),
]
operations = [
migrations.RemoveField(
model_name='kubecluster',
name='type',
),
]
|
python
|
import json
from flask import make_response
from marshmallow import fields, Schema, post_load, EXCLUDE
from flask_apispec.utils import Ref
from flask_apispec.views import MethodResource
from flask_apispec import doc, use_kwargs, marshal_with
# All the following schemas are set with unknown = EXCLUDE
# because part of a multiple schema input.
# This way none of them will raise errors for unknown fields handled by others
class NameSchema(Schema):
name = fields.Str()
class Meta:
unknown = EXCLUDE
class NameGenreSchema(Schema):
name = fields.Str()
genre = fields.Str()
class Meta:
unknown = EXCLUDE
class GenreSchema(Schema):
genre = fields.Str()
class Meta:
unknown = EXCLUDE
class InstrumentSchema(Schema):
instrument = fields.Str()
class Meta:
unknown = EXCLUDE
class TestFunctionViews:
def test_use_kwargs(self, app, client):
@app.route('/')
@use_kwargs({'name': fields.Str()}, location='querystring')
def view(**kwargs):
return kwargs
res = client.get('/', {'name': 'freddie'})
assert res.json == {'name': 'freddie'}
def test_use_kwargs_nolocation(self, app, client):
@app.route('/')
@use_kwargs({'name': fields.Str()})
def view(**kwargs):
return kwargs
res = client.get('/', {'name': 'freddie'})
# default location is 'json', i.e. no kwargs will be received here
assert res.json == {}
def test_view_returning_tuple(self, app, client):
@app.route('/all')
@use_kwargs({'name': fields.Str()}, location='querystring')
def all(**kwargs):
return kwargs, 202, {'x-msg': 'test'}
@app.route('/headers')
@use_kwargs({'name': fields.Str()}, location='querystring')
def view_headers(**kwargs):
return kwargs, {'x-msg': 'test'}
@app.route('/code')
@use_kwargs({'name': fields.Str()}, location='querystring')
def view_code(**kwargs):
return kwargs, 202
res_all = client.get('/all', {'name': 'freddie'})
assert res_all.json == {'name': 'freddie'}
assert res_all.status_code == 202
assert res_all.headers.get('x-msg') == 'test'
res_headers = client.get('/headers', {'name': 'freddie'})
assert res_headers.json == {'name': 'freddie'}
assert res_headers.status_code == 200
assert res_headers.headers.get('x-msg') == 'test'
res_code = client.get('/code', {'name': 'freddie'})
assert res_code.json == {'name': 'freddie'}
assert res_code.status_code == 202
assert 'x-msg' not in res_code.headers
def test_use_kwargs_schema(self, app, client):
class ArgSchema(Schema):
name = fields.Str()
@app.route('/')
@use_kwargs(ArgSchema, location='querystring')
def view(**kwargs):
return kwargs
res = client.get('/', {'name': 'freddie'})
assert res.json == {'name': 'freddie'}
def test_use_kwargs_schema_with_post_load(self, app, client):
class User:
def __init__(self, name):
self.name = name
def update(self, name):
self.name = name
class ArgSchema(Schema):
name = fields.Str()
@post_load
def make_object(self, data, **kwargs):
return User(**data)
@app.route('/', methods=('POST', ))
@use_kwargs(ArgSchema(), location='json_or_form')
def view(user):
assert isinstance(user, User)
return {'name': user.name}
data = {'name': 'freddie'}
res = client.post('/', data)
assert res.json == data
def test_use_kwargs_schema_many(self, app, client):
class ArgSchema(Schema):
name = fields.Str()
@app.route('/', methods=('POST',))
@use_kwargs(ArgSchema(many=True), location='json')
def view(*args):
return list(args)
data = [{'name': 'freddie'}, {'name': 'john'}]
res = client.post('/', json.dumps(data), content_type='application/json')
assert res.json == data
def test_use_kwargs_multiple(self, app, client):
@app.route('/')
@use_kwargs(NameSchema, location='querystring')
@use_kwargs(InstrumentSchema, location='querystring')
def view(**kwargs):
return kwargs
res = client.get('/', {'name': 'freddie', 'instrument': 'vocals'})
assert res.json == {'name': 'freddie', 'instrument': 'vocals'}
def test_use_kwargs_callable_as_schema(self, app, client):
def schema_factory(request):
assert request.method == 'GET'
assert request.path == '/'
class ArgSchema(Schema):
name = fields.Str()
return ArgSchema
@app.route('/')
@use_kwargs(schema_factory, location='querystring')
def view(**kwargs):
return kwargs
res = client.get('/', {'name': 'freddie'})
assert res.json == {'name': 'freddie'}
def test_marshal_with_default(self, app, client, models, schemas):
@app.route('/')
@marshal_with(schemas.BandSchema)
def view():
return models.Band('queen', 'rock')
res = client.get('/')
assert res.json == {'name': 'queen', 'genre': 'rock'}
def test_marshal_with_codes(self, app, client, models, schemas):
@app.route('/')
@marshal_with(schemas.BandSchema)
@marshal_with(schemas.BandSchema(only=('name', )), code=201)
def view():
return models.Band('queen', 'rock'), 201
res = client.get('/')
assert res.json == {'name': 'queen'}
def test_integration(self, app, client, models, schemas):
@app.route('/')
@use_kwargs(
{'name': fields.Str(), 'genre': fields.Str()},
location='querystring'
)
@marshal_with(schemas.BandSchema)
def view(**kwargs):
return models.Band(**kwargs)
res = client.get('/', {'name': 'queen', 'genre': 'rock'})
assert res.json == {'name': 'queen', 'genre': 'rock'}
class TestClassViews:
def test_inheritance_unidirectional(self, app, client):
@doc(tags=['base'])
class BaseResource(MethodResource):
@doc(description='parent')
def get(self, **kwargs):
pass
@doc(tags=['child'])
class ChildResource(BaseResource):
@doc(description='child')
def get(self, **kwargs):
return kwargs
assert not any(MethodResource.__apispec__.values())
assert BaseResource.__apispec__['docs'][0].options[0]['tags'] == ['base']
assert ChildResource.__apispec__['docs'][0].options[0]['tags'] == ['child']
assert BaseResource.get.__apispec__['docs'][0].options[0]['description'] == 'parent'
assert ChildResource.get.__apispec__['docs'][0].options[0]['description'] == 'child'
def test_inheritance_only_http_methods(self, app):
@use_kwargs({'genre': fields.Str()})
class ConcreteResource(MethodResource):
def _helper(self, **kwargs):
return kwargs
with app.test_request_context():
resource = ConcreteResource()
assert resource._helper() == {}
def test_kwargs_inheritance(self, app, client):
class BaseResource(MethodResource):
@use_kwargs(NameSchema, location='querystring')
def get(self, **kwargs):
pass
class ConcreteResource(BaseResource):
@use_kwargs(GenreSchema, location='querystring')
def get(self, **kwargs):
return kwargs
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/', {'name': 'queen', 'genre': 'rock'})
assert res.json == {'name': 'queen', 'genre': 'rock'}
def test_kwargs_inheritance_ref(self, app, client, schemas):
class BaseResource(MethodResource):
@use_kwargs(NameSchema, location='querystring')
def get(self, **kwargs):
pass
class ConcreteResource(BaseResource):
kwargs = GenreSchema
@use_kwargs(Ref('kwargs'), location='querystring')
@marshal_with(schemas.BandSchema)
def get(self, **kwargs):
return kwargs
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/', {'name': 'queen', 'genre': 'rock'})
assert res.json == {'name': 'queen', 'genre': 'rock'}
def test_kwargs_inheritance_false(self, app, client, models, schemas):
class BaseResource(MethodResource):
@use_kwargs(NameGenreSchema, location='querystring')
def get(self):
pass
class ConcreteResource(BaseResource):
@use_kwargs(NameSchema, inherit=False, location='querystring')
def get(self, **kwargs):
return kwargs
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/', {'name': 'queen', 'genre': 'rock'})
assert res.json == {'name': 'queen'}
def test_kwargs_apply_false(self, app, client):
class ConcreteResource(MethodResource):
@use_kwargs(GenreSchema, apply=False)
def get(self, **kwargs):
return kwargs
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/', {'name': 'queen', 'genre': 'rock'})
assert res.json == {}
def test_schemas_class(self, app, client, models, schemas):
@marshal_with(schemas.BandSchema)
class ConcreteResource(MethodResource):
@marshal_with(schemas.BandSchema(only=('genre', )), code=201)
def get(self, **kwargs):
return models.Band('slowdive', 'shoegaze'), 201
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/')
assert res.json == {'genre': 'shoegaze'}
def test_schemas_class_inheritance(self, app, client, models, schemas):
@marshal_with(schemas.BandSchema(only=('genre', )))
class BaseResource(MethodResource):
def get(self):
pass
class ConcreteResource(BaseResource):
def get(self, **kwargs):
return models.Band('slowdive', 'shoegaze'), 201
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/')
assert res.json == {'genre': 'shoegaze'}
def test_schemas_inheritance(self, app, client, models, schemas):
class BaseResource(MethodResource):
@marshal_with(schemas.BandSchema)
def get(self):
pass
class ConcreteResource(BaseResource):
@marshal_with(schemas.BandSchema(only=('genre', )), code=201)
def get(self, **kwargs):
return models.Band('slowdive', 'shoegaze'), 201
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/')
assert res.json == {'genre': 'shoegaze'}
def test_schemas_inheritance_refs(self, app, client, models, schemas):
class BaseResource(MethodResource):
schema = None
@marshal_with(Ref('schema'))
def get(self):
pass
class ConcreteResource(BaseResource):
schema = schemas.BandSchema
def get(self, **kwargs):
return models.Band('slowdive', 'shoegaze')
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/')
assert res.json == {'name': 'slowdive', 'genre': 'shoegaze'}
def test_schemas_inheritance_false(self, app, client, models, schemas):
class BaseResource(MethodResource):
@marshal_with(schemas.BandSchema, code=201)
def get(self):
pass
class ConcreteResource(BaseResource):
@marshal_with(schemas.BandSchema(only=('genre', )), inherit=False)
def get(self, **kwargs):
return models.Band('slowdive', 'shoegaze'), 201
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/')
assert res.json == {'genre': 'shoegaze'}
def test_schemas_apply_false(self, app, client, models, schemas):
class ConcreteResource(MethodResource):
@marshal_with(schemas.BandSchema, apply=False)
def get(self, **kwargs):
return {'genre': 'spacerock'}
app.add_url_rule('/', view_func=ConcreteResource.as_view('concrete'))
res = client.get('/')
assert res.json == {'genre': 'spacerock'}
def test_schemas_none(self, app, client, models, schemas):
class ConcreteResource(MethodResource):
@marshal_with(None, code=204)
def delete(self, **kwargs):
response = make_response('', 204)
response.headers = {}
return response
app.add_url_rule('/<id>/', view_func=ConcreteResource.as_view('concrete'))
res = client.delete('/5/')
assert res.body == b''
|
python
|
from abc import ABC, abstractmethod
import itertools
import numpy as np
import matplotlib.pyplot as plt
import tqdm
from . import _heatmap
from . import preprocessing
class Regressor(ABC):
'''
Mix-in class for Regression models.
'''
@abstractmethod
def get_output(self):
'''
Returns the output activations of the model.
Returns
-------
numpy.array
The output activations.
'''
pass
@abstractmethod
def feed(self, input_data):
'''
Accepts input array and feeds it to the model.
Parameters
----------
input_data : numpy.array
The input to feed the model.
Raises
------
ValueError
If the input data has invalid dimensions/shape.
Note
----
This function only feeds the input data, to get the output after calling this
function use :py:func:`get_output` or :py:func:`get_output_onehot`
'''
pass
@property
@abstractmethod
def _out_size(self):
'''
Returns number of nodes/neurons in the output layer.
'''
pass
def r2score(self, testing_data, testing_targets):
'''
Return R-squared or coefficient of determination value.
Parameters
----------
testing_data : numpy.array
numpy array containing testing data.
testing_targets : numpy.array
numpy array containing testing targets, corresponding to the testing data.
Returns
-------
r2score : float
The average cost of the model over the testing data.
Raises
------
ValueError
If :code:`testing_data` or :code:`testing_tagets` has invalid dimensions/shape.
'''
self.feed(testing_data)
output = self.get_output()
error = ((output-testing_targets)**2).sum()
var = ((testing_targets-testing_targets.mean(axis=0)) ** 2).sum()
return 1-error/var
|
python
|
from __future__ import absolute_import, print_function
import pytest
from steam_friends import app
from steam_friends.views import api, auth, main
def test_app(flask_app):
assert flask_app.debug is False # todo: should this be True?
assert flask_app.secret_key
assert flask_app.testing is True
assert api.blueprint == flask_app.blueprints['api']
assert auth.blueprint == flask_app.blueprints['auth']
assert main.blueprint == flask_app.blueprints['steam_friends']
def test_app_more_config(monkeypatch):
key = "TEST_ENV_VAR"
envvar = "SF_{}".format(key)
value = "False"
monkeypatch.setenv(envvar, value)
flask_app = app.create_app(app_env="test")
assert flask_app.config.get(key) is False
def test_debug_app(monkeypatch):
monkeypatch.setenv("SF_DEBUG", "True")
flask_app = app.create_app(app_env="test")
assert flask_app.debug is True
def test_bad_app(monkeypatch):
monkeypatch.setenv("SF_ENV", "")
with pytest.raises(SystemExit):
app.create_app()
with pytest.raises(SystemExit):
app.create_app(app_env="")
|
python
|
# from classify.data.loaders.snli import SNLIDataLoader
# __all__ = ["SNLIDataLoader"]
|
python
|
import numpy as np
class InvertedPendulum:
def __init__(self, length, mass, gravity=9.81):
self.length = length
self.mass = mass
self.gravity = gravity
# matrices of the linearized system
self.A = np.array([[0, 1, 0, 0],
[gravity/length, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
self.B = np.array([0, 1/length, 0, 1])
def calc_force(self, X, x_acc):
angle = X[0]
s = np.sin(angle)
c = np.cos(angle)
acc_normal = self.gravity * c - x_acc * s
# force exerted on the EE by the pendulum
f = np.array([self.mass * acc_normal * s, -self.mass * acc_normal * c])
return f
def step(self, X, u, dt):
''' State X = [angle, dangle, x, dx], input u = ddx '''
angle = X[0]
s = np.sin(angle)
c = np.cos(angle)
acc_tangential = self.gravity * s + u * c
angle_acc = acc_tangential / self.length
dX = np.array([X[1], angle_acc, X[3], u])
X = X + dt * dX
return X
|
python
|
class Base:
@property
def id(self):
return self._id
def __repr__(self):
return '({} {})'.format(self.__class__.__name__, self.id)
def __unicode__(self):
return u'({} {})'.format(self.__class__.__name__, self.id)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
|
python
|
# from DETR main.py with modifications.
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import math
import sys
from PIL import Image
import requests
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader, DistributedSampler
import torch
from torch import nn
from torchvision.models import resnet50
import torchvision.transforms as T
from skimage import io
from models.transformer import TransformerModel
from models.tramap import TraMapModel
from models.backbone import BackboneModel
from custom_criterion import MSLELoss
from dataset import MapQueryDataset
def get_args_parser():
parser = argparse.ArgumentParser('TransforMap', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-4, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Map backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
# dataset parameters
parser.add_argument('--dataset_path', type=str)
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
return parser
def main(args):
device = torch.device(args.device)
# Seed
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Build the models
backbone_model = BackboneModel(hidden_dim=args.hidden_dim, arch=args.backbone)
transformer_model = TransformerModel(
d_model=args.hidden_dim,
n_head=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
normalize_before=False
)
model = TraMapModel(backbone_model, transformer_model)
print("DEVICE:", device)
backbone_model.to(device)
transformer_model.to(device)
model.to(device)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
# Data loader
transforms = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.1888, 0.2168, 0.2469], std=[0.3322, 0.2871, 0.2899])
])
dataset_train = MapQueryDataset(transforms=transforms, split='train')
sampler_train = torch.utils.data.RandomSampler(dataset_train)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=False)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
num_workers=args.num_workers)
output_dir = Path(args.output_dir)
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats = None
# Criterion / Loss function
# criterion = MSLELoss()
# criterion = nn.MSELoss()
# criterion = nn.L1Loss()
criterion = nn.SmoothL1Loss()
criterion.to(device)
# Logger thing
MB = 1024.0 * 1024.0
print_every = 10
target = data_loader_train
print("Start Training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
model.train()
criterion.train()
print("EPOCH:", epoch)
i = 0
## Training process ##
# Move to GPU or CPU
for sample, query, duration in data_returner(data_loader_train):
query = query.to(device)
sample = sample.to(device)
## Target duration
duration = duration.to(device)
duration = duration.float()
outputs = model(sample, query)
outputs = outputs.flatten()
# RMSE if criterion set to MSE
# loss = torch.sqrt(criterion(outputs, duration) + 1e-8)
# Else
loss = criterion(outputs, duration)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stop the training process".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
loss.backward()
if args.clip_max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_max_norm)
optimizer.step()
if i % print_every == 0:
# print("Output: {} Target: {}".format(outputs.tolist()[0], duration.tolist()[0]))
if torch.cuda.is_available():
print("Iter: {} Memory: {:d}MB Loss: {}".format(i, math.trunc(torch.cuda.max_memory_allocated() / MB), loss_value))
# print(outputs[0].item(), duration[0].item())
else:
print("Iter: {} Loss:{}".format(i, loss_value))
i += 1
lr_scheduler.step()
## Saving or Not saving, there is no in between
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def data_returner(iteratable, print_freq=10):
for obj in iteratable:
yield obj
if __name__ == '__main__':
parser = argparse.ArgumentParser('TransfoMap training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
python
|
# Generated by Django 2.1.2 on 2018-12-05 14:28
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("barriers", "0020_auto_20181025_1545")]
operations = [
migrations.RemoveField(model_name="barriercontributor", name="barrier"),
migrations.RemoveField(model_name="barriercontributor", name="contributor"),
migrations.RemoveField(model_name="barriercontributor", name="created_by"),
migrations.RemoveField(model_name="barriercontributor", name="modified_by"),
migrations.RemoveField(
model_name="historicalbarriercontributor", name="barrier"
),
migrations.RemoveField(
model_name="historicalbarriercontributor", name="contributor"
),
migrations.RemoveField(
model_name="historicalbarriercontributor", name="created_by"
),
migrations.RemoveField(
model_name="historicalbarriercontributor", name="history_user"
),
migrations.RemoveField(
model_name="historicalbarriercontributor", name="modified_by"
),
migrations.RemoveField(
model_name="barrierinstance", name="commercial_sensitivities"
),
migrations.RemoveField(model_name="barrierinstance", name="fta_infringement"),
migrations.RemoveField(
model_name="barrierinstance", name="has_legal_infringement"
),
migrations.RemoveField(
model_name="barrierinstance", name="infringement_summary"
),
migrations.RemoveField(model_name="barrierinstance", name="other_infringement"),
migrations.RemoveField(
model_name="barrierinstance", name="political_sensitivities"
),
migrations.RemoveField(model_name="barrierinstance", name="wto_infringement"),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="commercial_sensitivities"
),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="fta_infringement"
),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="has_legal_infringement"
),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="infringement_summary"
),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="other_infringement"
),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="political_sensitivities"
),
migrations.RemoveField(
model_name="historicalbarrierinstance", name="wto_infringement"
),
migrations.DeleteModel(name="BarrierContributor"),
migrations.DeleteModel(name="HistoricalBarrierContributor"),
]
|
python
|
#special thanks to this solution from:
#https://stackoverflow.com/questions/40237952/get-scrapy-crawler-output-results-in-script-file-function
#https://stackoverflow.com/questions/41495052/scrapy-reactor-not-restartable
from scrapy import signals
from scrapy.signalmanager import dispatcher
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from multiprocessing import Process, Queue
from EveroutSpider import EveroutSpider
from TravelPDXSpider import TravelPortlandSpider
import random
def spider_runner_results():
#List of available spiders, one of which is randomly chosen
list = [EveroutSpider, TravelPortlandSpider]
Spider = random.choice(list)
results = []
def crawler_results(signal, sender, item, response, spider):
results.append(item)
dispatcher.connect(crawler_results, signal=signals.item_scraped)
runner = CrawlerRunner()
d = runner.crawl(Spider)
d.addBoth(lambda _: reactor.stop())
reactor.run() # the script will block here until the crawling is finished
return results
if __name__ == '__main__':
print(spider_runner_results())
|
python
|
# -*- coding: utf-8 -*-
"""Helper module to work with files."""
import fnmatch
import logging
import os
import re
from stat import S_IRGRP, S_IROTH, S_IRUSR, S_IWGRP, S_IWOTH, S_IWUSR
# pylint: disable=redefined-builtin
from ._exceptions import FileNotFoundError
MAXLEN = 120
ILEGAL = r'<>:"/\|?*'
LOGGER = logging.getLogger(__name__)
MODE666 = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH
# pylint: disable=useless-object-inheritance
class File(object):
"""Easy manipulation of files in the SAME directory."""
def __init__(self, fp):
"""Set and validate the basic properties."""
if not self.isfile(fp):
raise FileNotFoundError(fp)
self.path = os.path.dirname(fp) or os.getcwd()
self.basename = os.path.basename(fp)
self.name, self.ext = os.path.splitext(self.basename)
self.writable = os.access(fp, os.W_OK)
def siblings(self):
"""Collect files and directories in the same directory."""
return [f for f in os.listdir(self.path) if f != self.basename]
@staticmethod
def isfile(path):
"""Check if a given path is a file."""
return os.path.isfile(path)
@staticmethod
def exists(path):
"""Check if a given path is a file or a directory."""
return os.path.exists(path)
@staticmethod
def mkwinsafe(name, space=' '):
"""Delete most common characters not allowed in Windows filenames."""
space = space if space not in ILEGAL else ' '
name = ''.join(c for c in name
if c not in ILEGAL).replace(' ', space).strip()
name = re.sub(r'\s\s+', ' ', name) if space == ' ' else name
return name[:MAXLEN]
@staticmethod
def validate(basename):
"""Check for a proper basename."""
if basename != os.path.basename(basename):
LOGGER.critical('This (%s) is not a basename!', basename)
return False
name, ext = os.path.splitext(basename)
if not name:
LOGGER.critical('Not a valid name (lenght 0)!')
return False
if not ext:
LOGGER.critical('Not a valid extension (lenght 0)!')
return False
return True
def baserename(self, new_basename):
"""Rename the file to a 'safe' basename."""
if not self.validate(new_basename):
return False
name, ext = os.path.splitext(new_basename)
name = self.mkwinsafe(name)
new_basename = name + ext
if new_basename == self.basename:
return True
if new_basename not in self.siblings():
try:
os.rename(self.basename, new_basename)
except OSError as err:
LOGGER.critical('%s', err)
return False
self.basename = new_basename
self.name = name
self.ext = ext
else:
LOGGER.info('The file (%s) already exist in the directory!',
new_basename)
return True
@staticmethod
def uxchmod(fp, mode=MODE666):
"""Change the mode of the file (default is 0666)."""
return os.chmod(fp, mode)
def cwdfiles(pattern='*'):
"""List the files in current directory that match a given pattern."""
return fnmatch.filter(os.listdir('.'), pattern)
|
python
|
from numpy import dtype
db_spec = True
try:
import sqlalchemy.types as sqlt
except:
db_spec = False
return_keys = [
'id',
'created_at',
'number',
'total_price',
'subtotal_price',
'total_weight',
'total_tax',
'total_discounts',
'total_line_items_price',
'name',
'total_price_usd',
'order_number',
'processing_method',
'source_name',
'fulfillment_status',
'payment_gateway_names',
'customer',
'line_items',
'refunds',
'email',
'discount_applications',
'discount_codes',
'updated_at',
'shipping_lines'
]
keys_list = [
'id',
'created_at',
'number',
'total_price',
'subtotal_price',
'total_weight',
'total_tax',
'total_discounts',
'total_line_items_price',
'name',
'total_price_usd',
'order_number',
'processing_method',
'source_name',
'fulfillment_status',
'payment_gateway_names',
'email',
'updated_at'
]
def DBSpec():
if db_spec is True:
order_types = {
'id': sqlt.BigInteger,
'order_date': sqlt.DateTime,
'number': sqlt.BigInteger,
'total_price': sqlt.Float,
'subtotal_price': sqlt.Float,
'total_weight': sqlt.Float,
'total_tax': sqlt.Float,
'total_discounts': sqlt.Float,
'total_line_items_price': sqlt.Float,
'name': sqlt.String,
'total_price_usd': sqlt.Float,
'order_number': sqlt.BigInteger,
'processing_method': sqlt.String,
'source_name': sqlt.String,
'fulfillment_status': sqlt.String,
'payment_gateway_names': sqlt.String,
'email': sqlt.String,
'updated_at': sqlt.DateTime
}
ref_types = {
'id': sqlt.BigInteger,
'refund_date': sqlt.DateTime,
'order_id': sqlt.BigInteger
}
refli_types = {
'id': sqlt.BigInteger,
'refund_id': sqlt.BigInteger,
'order_id': sqlt.BigInteger,
'line_item_id': sqlt.BigInteger,
'quantity': sqlt.Integer,
'variant_id': sqlt.BigInteger,
'subtotal': sqlt.Float,
'total_tax': sqlt.Float
}
adj_types = {
'id': sqlt.BigInteger,
'refund_id': sqlt.BigInteger,
'order_id': sqlt.BigInteger,
'amount': sqlt.Float,
'tax_amount': sqlt.Float,
'kind': sqlt.String,
'reason': sqlt.String
}
item_types = {
'id': sqlt.BigInteger,
'order_id': sqlt.BigInteger,
'order_date': sqlt.DateTime,
'variant_id': sqlt.BigInteger,
'quantity': sqlt.Integer,
'price': sqlt.Float,
'name': sqlt.String,
'product_id': sqlt.BigInteger,
'sku': sqlt.String,
'title': sqlt.String,
'total_discount': sqlt.Float,
'variant_title': sqlt.String
}
trans_types = {
'id': sqlt.BigInteger,
'source_order_id': sqlt.BigInteger,
'type': sqlt.String,
'fee': sqlt.Float,
'amount': sqlt.Float
}
cust_types = {
'order_id': sqlt.BigInteger,
'order_date': sqlt.DateTime,
'customer_id': sqlt.BigInteger,
'orders_count': sqlt.Integer,
'email': sqlt.String,
'created_at': sqlt.DateTime,
'total_spent': sqlt.Float
}
discapp_types = {
'order_id': sqlt.BigInteger,
'order_date': sqlt.DateTime,
'type': sqlt.String,
'title': sqlt.String,
'description': sqlt.String,
'value': sqlt.NUMERIC,
'value_type': sqlt.String,
'allocation_method': sqlt.String,
'target_selection': sqlt.String,
'target_type': sqlt.String
}
disccodes_types = {
'order_id': sqlt.BigInteger,
'order_date': sqlt.DateTime,
'code': sqlt.String,
'amount': sqlt.DECIMAL,
'type': sqlt.String,
}
shipline_types = {
'id': sqlt.String,
'carrier_identifier': sqlt.String,
'code': sqlt.String,
'delivery_category': sqlt.String,
'ship_discount_price': sqlt.Float,
'phone': sqlt.String,
'ship_price': sqlt.Float,
'requested_fulfillment_id': sqlt.String,
'source': sqlt.String,
'title': sqlt.String,
'order_id': sqlt.BigInteger,
'order_date': sqlt.DateTime,
}
else:
order_types = {}
ref_types = {}
refli_types = {}
adj_types = {}
item_types = {}
trans_types = {}
cust_types = {}
discapp_types = {}
disccodes_types = {}
shipline_types = {}
return {
'Refunds': ref_types,
'Orders': order_types,
'LineItems': item_types,
'RefundLineItem': refli_types,
'Adjustments': adj_types,
'Transactions': trans_types,
'Customers': cust_types,
'DiscountApps': discapp_types,
'DiscountCodes': disccodes_types,
'ShipLines': shipline_types
}
order_dtypes = {
'number': dtype('int64'),
'total_price': dtype('float64'),
'subtotal_price': dtype('float64'),
'total_weight': dtype('float64'),
'total_tax': dtype('float64'),
'total_discounts': dtype('float64'),
'total_line_items_price': dtype('float64'),
'name': dtype('O'),
'total_price_usd': dtype('float64'),
'order_number': dtype('int64'),
'processing_method': dtype('O'),
'source_name': dtype('O'),
'fulfillment_status': dtype('O'),
'email': dtype('O')
}
ref_keys = [
'created_at',
'id',
'order_id'
]
ref_dtypes = {
'id': dtype('int64'),
'order_id': dtype('int64')
}
refli_keys = [
'id',
'refund_id',
'order_id',
'line_item_id',
'quantity',
'variant_id',
'subtotal',
'total_tax'
]
refli_dtypes = {
'id': dtype('int64'),
'refund_id': dtype('int64'),
'order_id': dtype('int64'),
'line_item_id': dtype('int64'),
'quantity': dtype('int64'),
'variant_id': dtype('int64'),
'subtotal': dtype('float64'),
'total_tax': dtype('float64')
}
adj_keys = [
'id',
'refund_id',
'order_id',
'amount',
'tax_amount',
'kind',
'reason'
]
adj_dtypes = {
'id': dtype('int64'),
'refund_id': dtype('int64'),
'order_id': dtype('int64'),
'amount': dtype('float64'),
'tax_amount': dtype('float64'),
'kind': dtype('O'),
'reason': dtype('O')
}
item_keys = [
'id',
'order_id',
'variant_id',
'quantity',
'price',
'order_date',
'name',
'product_id',
'sku',
'title',
'total_discount',
'variant_title',
]
item_dtypes = {
'id': dtype('int64'),
'order_id': dtype('int64'),
'variant_id': dtype('int64'),
'quantity': dtype('float64'),
'price': dtype('float64'),
'name': dtype('O'),
'product_id': dtype('int64'),
'sku': dtype('O'),
'title': dtype('O'),
'total_discount': dtype('float64'),
'variant_title': dtype('O')
}
trans_keys = [
'id',
'source_order_id',
'type',
'fee',
'amount',
'processed_at'
]
trans_dtypes = {
'id': dtype('int64'),
'source_order_id': dtype('int64'),
'type': dtype('O'),
'fee': dtype('float64'),
'amount': dtype('float64')
}
cust_dtypes = {
'order_id': dtype('int64'),
'customer_id': dtype('int64'),
'orders_count': dtype('int64'),
'email': dtype('O'),
'total_spent': dtype('float64')
}
cust_keys = [
'id',
'order_date',
'customer_id',
'orders_count',
'email',
'created_at',
'total_spent'
]
cust_cols = [
'id',
'created_at',
'customer_id',
'customer_orders_count',
'customer_email',
'customer_created_at',
'customer_total_spent'
]
cust_map = {
'id': 'order_id',
'created_at': 'order_date',
'customer_id': 'customer_id',
'customer_orders_count': 'orders_count',
'customer_email': 'email',
'customer_created_at': 'created_at',
'customer_total_spent': 'total_spent'
}
discapp_keys = [
'order_id',
'order_date',
'type',
'code',
'title',
'description',
'value',
'value_type',
'allocation_method',
'target_selection',
'target_type'
]
discapp_dtypes = {
'order_id': dtype('int64'),
'type': dtype('O'),
'title': dtype('O'),
'value': dtype('float64'),
'value_type': dtype('O'),
'allocation_method': dtype('O'),
'target_selection': dtype('O'),
'target_type': dtype('O'),
'code': dtype('O')
}
discapp_map = {
'orders_id': 'order_id',
'orders_created_at': 'order_date'
}
disccode_keys = [
'order_id',
'created_at',
'code',
'amount',
'type'
]
disccode_dtypes = {
'order_id': 'int64',
'code': 'string',
'type': 'string',
'amount': 'float64'
}
disccode_map = {
'orders_id': 'order_id',
'orders_created_at': 'order_date'
}
shipline_keys = [
'id',
'carrier_identifier',
'code',
'delivery_category',
'discounted_price',
'phone',
'price',
'discounted_price',
'requested_fulfillment_id',
'source',
'title',
'orders.id',
'orders.created_at'
]
shipline_dtypes = {
'id': 'string',
'carrier_identifier': 'string',
'code': 'string',
'delivery_category': 'string',
'ship_discount_price': 'float64',
'phone': 'string',
'ship_price': 'float64',
'requested_fulfillment_id': 'string',
'source': 'string',
'title': 'string',
'order_id': 'int64',
}
shipline_map = {
'orders.id': 'order_id',
'orders.created_at': 'order_date',
'price': 'ship_price',
'discounted_price': 'ship_discount_price'
}
proc_dict = {
'Orders': 'orders_update',
'Refunds': 'refunds_update',
'LineItems': 'lineitems_update',
'RefundLineItem': 'reflineitem_update',
'Adjustments': 'adjustments_update',
'Customers': 'cust_update',
'DiscountApps': 'discapp_update',
'DiscountCodes': 'disccode_update',
'ShipLines': 'shipline_update'
}
|
python
|
from setuptools import find_packages, setup
setup(
name='serverlessworkflow_sdk',
packages=find_packages(include=['serverlessworkflow_sdk']),
version='0.1.0',
description='Serverless Workflow Specification - Python SDK',
author='Serverless Workflow Contributors',
license='http://www.apache.org/licenses/LICENSE-2.0.txt',
install_requires=[],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
)
|
python
|
#!/usr/bin/env python3
# coding: utf-8
# PSMN: $Id: 02.py 1.3 $
# SPDX-License-Identifier: CECILL-B OR BSD-2-Clause
""" https://github.com/OpenClassrooms-Student-Center/demarrez_votre_projet_avec_python/
Bonus 1, json
"""
import json
import random
def read_values_from_json(fichier, key):
""" create an new empty list
open a json file
load all data
add each item in the list
return completed list
"""
values = []
with open(fichier) as f:
data = json.load(f)
for entry in data:
values.append(entry[key])
return values
def message(character, quote):
""" retourne une citation d'un personnage """
n_character = character.capitalize()
n_quote = quote.capitalize()
return "{} a dit : {}".format(n_character, n_quote)
def get_random_item_in(my_list):
""" retourne un item au hasard dans la liste """
rand_numb = random.randint(0, len(my_list) - 1)
item = my_list[rand_numb] # get a quote from a list
return item # return the item
def get_random_quote():
""" retourne une citation """
all_values = read_values_from_json('quotes.json', 'quote')
return get_random_item_in(all_values)
def get_random_character():
""" retourne un personnage """
all_values = read_values_from_json('characters.json', 'character')
return get_random_item_in(all_values)
if __name__ == '__main__':
""" ask user, print quote or quit """
user_answer = input('<Enter> pour afficher une autre citation ou Q pour quitter.')
while user_answer != "Q":
print(message(get_random_character(), get_random_quote()))
user_answer = input('<Enter> pour afficher une autre citation ou Q pour quitter.')
|
python
|
# encoding: utf8
from pygubu import BuilderObject, register_custom_property, register_widget
from pygubu.widgets.pathchooserinput import PathChooserInput
class PathChooserInputBuilder(BuilderObject):
class_ = PathChooserInput
OPTIONS_CUSTOM = ('type', 'path', 'image', 'textvariable', 'state',
'initialdir', 'mustexist', 'title',)
properties = OPTIONS_CUSTOM
virtual_events = ('<<PathChooserPathChanged>>',)
def _code_set_property(self, targetid, pname, value, code_bag):
if pname == 'type':
code_bag[pname] = "'{0}'".format(value)
elif pname in ('initialdir', 'mustexist', 'title'):
code_bag[pname] = "'{0}'".format(value)
elif pname == 'textvariable':
code_bag[pname] = self._code_set_tkvariable_property(pname, value)
else:
super(PathChooserInputBuilder, self)._code_set_property(
targetid, pname, value, code_bag)
_builder_id = 'pygubu.builder.widgets.pathchooserinput'
register_widget(_builder_id, PathChooserInputBuilder,
'PathChooserInput', ('ttk', 'Pygubu Widgets'))
_help = 'Dialog type'
register_custom_property(_builder_id, 'type', 'choice',
values=(PathChooserInput.FILE, PathChooserInput.DIR),
state='readonly',
default_value=PathChooserInput.FILE,
help=_help)
_help = 'Initial path value.'
register_custom_property(_builder_id, 'path', 'entry', help=_help)
_help = 'Image for the button.'
register_custom_property(_builder_id, 'image', 'imageentry', help=_help)
_help = 'Tk variable associated to the path property.'
register_custom_property(_builder_id, 'textvariable', 'tkvarentry',
help=_help)
_help = 'Path entry state.'
register_custom_property(_builder_id, 'state', 'choice',
values=('', 'normal', 'disabled', 'readonly'),
state='readonly',
help=_help)
_help = 'Dialog option. Determines if path must exist for directory dialog.'
register_custom_property(_builder_id, 'mustexist', 'choice',
values=('', 'false', 'true'),
state='readonly',
help=_help)
_help = 'Dialog option. Sets initial directory.'
register_custom_property(_builder_id, 'initialdir', 'entry', help=_help)
_help = 'Dialog option. Sets dialog title.'
register_custom_property(_builder_id, 'title', 'entry', help=_help)
|
python
|
"""
Anserini: A toolkit for reproducible information retrieval research built on Lucene
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import re
import argparse
import logging
import json
logging.basicConfig()
class XFoldValidate(object):
"""
Perform X-Fold cross validation for various
parameters and report the average effectiveness
for each fold. fold_mapping is an optional argument.
It can be a dictionary {qid:fold_id} that maps
each qid to its corresponding fold.
"""
def __init__(self,output_root,collection,
fold=5,fold_mapping=None):
self.logger = logging.getLogger('x_fold_cv.XFlodValidate')
self.output_root = output_root
self.eval_files_root = 'eval_files'
self.collection = collection
self.fold = fold
self.fold_mapping = fold_mapping
def _get_param_average(self):
# For each parameter set, get its
# average performances in each fold,
# metric, reranking model, and base
# ranking model
avg_performances = {}
eval_root_dir = os.path.join(self.output_root, self.collection,self.eval_files_root)
# do x-fold cv for the collection
for metric in os.listdir(eval_root_dir):
eval_dir = os.path.join(eval_root_dir,metric)
if os.path.isfile(eval_dir):
continue
# if it is a directory containing effectiveness
# for a metric, do x-fold cv for the metric
for fn in os.listdir(eval_dir):
model, param = fn.split('_', 1)
if model not in avg_performances:
avg_performances[model] = {}
param_avg_performances = self._get_param_avg_performances(os.path.join(eval_dir,fn))
for metric in param_avg_performances:
if metric not in avg_performances[model]:
avg_performances[model][metric] = {}
for fold_id in param_avg_performances[metric]:
if fold_id not in avg_performances[model][metric]:
avg_performances[model][metric][fold_id] = {}
avg_performances[model][metric][fold_id][param] = param_avg_performances[metric][fold_id]
return avg_performances
def _compute_fold_id(self,qid):
# compute fold id
if self.fold_mapping:
# use the fold mapping passed to it
return self.fold_mapping[qid]
else:
# compute the fold id based on qid
return int(qid) % self.fold
def tune(self,verbose):
# Tune parameter with x-fold. Use x-1 fold
# for training and 1 fold for testing. Do
# it for each fold and report average
avg_performances = self._get_param_average()
res = {}
for model in avg_performances:
res[model] = {}
for metric in avg_performances[model]:
if verbose:
print('model: {}, metric: {}'.format(model, metric))
metric_fold_performances = []
for test_idx in range(self.fold):
test_fold_performances = avg_performances[model][metric][test_idx]
training_data = {}
for train_idx in range(self.fold):
if train_idx == test_idx:
continue
fold_performance = avg_performances[model][metric][train_idx]
for param in fold_performance:
if param not in training_data:
training_data[param] = .0
training_data[param] += fold_performance[param]
# sort in descending order based on performance first, then use filenames(x[0]) to break ties
sorted_training_performance = sorted(training_data.items(),
key=lambda x:(x[1], x[0]),
reverse=True)
best_param = sorted_training_performance[0][0]
if verbose:
print('\tFold: {}'.format(test_idx))
print('\t\tBest param: {}'.format(best_param))
print('\t\ttest performance: {0:.4f}'.format(test_fold_performances[best_param]))
metric_fold_performances.append(test_fold_performances[best_param])
res[model][metric] = round(sum(metric_fold_performances) / len(metric_fold_performances), 4)
return res
def _get_param_avg_performances(self,file_path):
# Given a file, return its average effectiveness
# for each metric in each fold
param_performance_list = {}
for fold_id in range(self.fold):
param_performance_list[fold_id] = {}
with open(file_path) as f:
for line in f:
line = line.strip()
if line:
row = line.split()
metric = row[0]
if metric not in param_performance_list[0]:
for fold_id in param_performance_list:
param_performance_list[fold_id][metric] = []
qid = row[1]
try:
value = float(row[2])
except:
self.logger.error( 'Cannot parse %s' %(row[2]) )
continue
else:
if qid != 'all':
# compute fold id base on qid
fold_id = self._compute_fold_id(qid)
param_performance_list[fold_id][metric].append(value)
param_avg_performances = {}
for metric in param_performance_list[0].keys():
param_avg_performances[metric] = {}
for fold_id in param_performance_list:
param_avg_performances[metric][fold_id] = round(sum(param_performance_list[fold_id][metric])/len(param_performance_list[fold_id][metric]), 4)
return param_avg_performances
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output_root', default='fine_tuning_results', help='output directory of all results')
parser.add_argument('--fold', '-f', default=2, type=int, help='number of fold')
parser.add_argument('--verbose', '-v', action='store_true', help='output in verbose mode')
parser.add_argument('--collection', required=True, help='the collection key in yaml')
parser.add_argument('--fold_dir', help='directory of drr fold files')
args=parser.parse_args()
fold_mapping = {}
if args.fold_dir:
from run_batch import load_drr_fold_mapping
fold_mapping = load_drr_fold_mapping(args.fold_dir)
print(json.dumps(XFoldValidate(args.output_root, args.collection, args.fold, fold_mapping).tune(args.verbose), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
|
python
|
from picamera import PiCamera
from time import sleep
def record_video(sec):
pi_cam = PiCamera()
pi_cam.start_preview()
pi_cam.start_recording('./video.mp4')
sleep(sec)
pi_cam.stop_recording()
pi_cam.stop_preview()
record_video(5)
|
python
|
import logging
from hearthstone.enums import CardType, Zone, GameTag
from hslog import LogParser, packets
from hslog.export import EntityTreeExporter
from entity.game_entity import GameEntity
from entity.hero_entity import HeroEntity
from entity.spell_entity import SpellEntity
# import entity.cards as ecards
logger = logging.getLogger()
class LogUtil:
def __init__(self, log_path):
self.log_path = log_path
self.parser = LogParser()
self.game = None
# parse 完后可直接拿来用
self.game_entity = None
def read_log(self):
with open(self.log_path, encoding='utf-8') as f:
self.parser.read(f)
self.parser.flush()
# 最近一场战斗
packet_tree = self.parser.games[-1]
exporter = EntityTreeExporter(packet_tree, player_manager=self.parser.player_manager)
ee = exporter.export()
self.game = ee.game
def parse_game(self) -> GameEntity:
self.read_log()
for e in self.game.entities:
# 以下为游戏状态
if e.type == CardType.GAME:
# print(e, e.tags, end='\n\n\n')
# player = e.players
# for p in player:
# print(p.tags, end='\n\n')
self.game_entity = GameEntity(e)
pass
elif e.type == CardType.MINION:
minion = HeroEntity(e)
# print(e, e.tags, end='\n\n\n')
self.game_entity.add_hero(minion)
pass
# 佣兵技能信息
elif e.type == CardType.LETTUCE_ABILITY:
# print(e, e.tags, end='\n\n\n')
owner = e.tags.get(GameTag.LETTUCE_ABILITY_OWNER)
# print(e.card_id)
if owner in self.game_entity.hero_entities.keys():
# hcid = self.game_entity.hero_entities[owner].card_id[:-3]
# cid = e.card_id[:-3]
# cname = 'ecards.' + hcid + '.' + cid + '.' + cid + '(e)'
# print(cname)
# try:
# spell_entity = eval(cname)
# except Exception as ex:
# logger.warning(ex)
spell_entity = SpellEntity(e)
# spell_entity = SpellEntity(e)
self.game_entity.hero_entities[owner].add_spell(spell_entity)
pass
# 对战技能记录
elif e.type == CardType.SPELL:
# print(e, e.tags, end='\n\n\n')
pass
# for h in self.game_entity.my_hero:
# if h.card_id[:-3] not in HEROS.keys():
# continue
# hd = HEROS[h.card_id[:-3]]
# for i, s in enumerate(h.spell):
# if i > 2:
# break
# s.read_from_config(hd[3][i])
return self.game_entity
pass
if __name__ == '__main__':
path = "C:/var/Hearthstone/Logs/Power.log"
hs_log = LogUtil(path)
game_entity = hs_log.parse_game()
for i in game_entity.my_hero:
print(i)
for i in game_entity.enemy_hero:
print(i)
pass
|
python
|
''' Train all cort models
Usage:
train_all.py [--num_processes=<n>] --type=<t> <consolidated_conll_dir> <out_dir>
'''
import os
from cort.core.corpora import Corpus
import codecs
import random
import subprocess
from cort_driver import train
from joblib import Parallel, delayed
import sys
import itertools
from docopt import docopt
def main(type_, inp_dir, out_dir, num_processes):
assert type_ in ('pair', 'latent', 'tree'), "Invalid type: %s" %type_
os.makedirs(out_dir, exist_ok=True)
results = Parallel(n_jobs=num_processes, backend="threading", verbose=10)(train_jobs(type_, inp_dir, out_dir))
assert all(results)
def train_jobs(system, inp_dir, out_dir):
manipulations = sorted(os.listdir(inp_dir))
for manipulation in manipulations:
yield delayed(train_single_system)(system, inp_dir, out_dir, manipulation)
def train_single_system(system, inp_dir, out_dir, manipulation):
conll_path = os.path.join(inp_dir, manipulation, 'train.m_gold_conll')
out_model_path = os.path.join(out_dir, 'model-%s-%s.obj' %(system, manipulation))
print('Training %s on %s ...' %(system, conll_path), file=sys.stderr)
if train(system, os.path.abspath(conll_path), os.path.abspath(out_model_path)) == 0:
print('Model written to %s' %out_model_path, file=sys.stderr)
return out_model_path
if __name__ == '__main__':
args = docopt(__doc__)
main(args['--type'], args['<consolidated_conll_dir>'], args['<out_dir>'],
int(args.get('--num_processes') or 1))
|
python
|
""" Yoga style module """
from enum import Enum
from typing import List
class YogaStyle(Enum):
""" Yoga style enum """
undefined = 0
hatha = 1
yin = 2
chair = 3
def get_all_yoga_styles() -> List[YogaStyle]:
""" Returns a list of all yoga styles in the enum """
return [YogaStyle.hatha, YogaStyle.yin, YogaStyle.chair]
def str_to_yoga_style(name: str) -> YogaStyle:
""" Converts a string to yoga style enum """
if name == YogaStyle.chair.name:
return YogaStyle.chair
if name == YogaStyle.hatha.name:
return YogaStyle.hatha
if name == YogaStyle.yin.name:
return YogaStyle.yin
return YogaStyle.undefined
|
python
|
#!/usr/bin/python
import os
import sys
import argparse
from collections import defaultdict
import re
import fileUtils
def isBegining(line):
m = re.match(r'^[A-Za-z]+.*', line)
return True if m else False
def isEnd(line):
return True if line.startswith('#end') else False
def getItem(item):
m = re.match(r'[\"\[]*([-\.0-9e]*)[\]\"\,]*', item)
if m:
return m.group(1)
else:
raise ValueError('pattern failed here {}'.format(item))
def readline(line):
tmpList = line.split(' ')
tmpList = list(filter(lambda x: x!='', tmpList))
rtnList = []
for item in tmpList:
item = item.strip()
if item.startswith('"[') or item.endswith(']","'):
item = getItem(item)
if not item:
continue
try:
rtnList.append(fileUtils.str2float(item))
except ValueError:
raise ValueError('item is: {} and line is: {}'.format(item, line))
return rtnList
def loadData(fpath):
vecDict = defaultdict(list)
vecList = []
with open(fpath, 'r') as f:
for line in f:
if line == '"\n':
continue
if isBegining(line):
tmp = line.split(',')
title = tmp[0]
title = title.replace(' ', '_')
title = title.replace('?', '')
title = title.replace('.', '')
title = title.lower()
tmpList = readline(tmp[1])
vecList.extend(tmpList)
elif isEnd(line):
vecDict[title] = vecList
vecList = []
title = ''
else:
tmpList = readline(line)
vecList.extend(tmpList)
return vecDict
def main(opts):
import pdb
pdb.set_trace()
dataList = loadData(opts.file)
print(dataList)
def parseOpts(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='file to word2vec file')
opts = parser.parse_args()
return opts
if __name__ == "__main__":
opts = parseOpts(sys.argv)
main(opts)
|
python
|
import speech_recognition as sr
import pyaudio #optional
# get audio from the microphone
while True: #this loop runs the below code infinite times until any inturrupt is generated
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Speak:")
audio = r.listen(source)
try:
output= r.recognize_google(audio,language='en') #can change language to your desired language
print("You said: " +output)
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results.Please check your internet connection and try again; {0}".format(e))
if output=='exit': #if the input voice is matched with the passed argument the loop terminates
break
else: #loop continues to run
continue
|
python
|
from django.conf.urls import url
from django.urls import path
from . import views
from . import dal_views
from .models import *
app_name = 'vocabs'
urlpatterns = [
url(
r'^altname-autocomplete/$', dal_views.AlternativeNameAC.as_view(
model=AlternativeName,),
name='altname-autocomplete',
),
url(
r'^place-autocomplete/$', dal_views.PlaceAC.as_view(
model=Place,),
name='place-autocomplete',
),
url(
r'^crash-place-autocomplete/$', dal_views.CrashPlaceAC.as_view(
model=Place,),
name='crash-place-autocomplete',
),
url(
r'^search-place-autocomplete/$', dal_views.PlaceAC.as_view(),
name='search-place-autocomplete',
),
url(
r'^search-region-autocomplete/$', dal_views.Region.as_view(),
name='search-region-autocomplete',
),
url(
r'^person-autocomplete/$', dal_views.PersonAC.as_view(
model=Place,),
name='person-autocomplete',
),
url(
r'^institution-autocomplete/$', dal_views.InstitutionAC.as_view(
model=Institution,),
name='institution-autocomplete',
),
url(
r'^bomberplanetype-autocomplete/$', dal_views.BomberPlaneTypeAC.as_view(
model=SkosConcept),
name='bomberplanetype-autocomplete',
),
url(
r'^bombersquadron-autocomplete/$', dal_views.BomberSquadronAC.as_view(
model=Institution,),
name='bombersquadron-autocomplete',
),
url(
r'^bomberreasonofcrash-autocomplete/$', dal_views.BomberReasonOfCrashAC.as_view(
model=SkosConcept),
name='bomberreasonofcrash-autocomplete',
),
url(
r'^personpartofbomber-autocomplete/$', dal_views.PersonPartOfBomberAC.as_view(
model=Bomber),
name='personpartofbomber-autocomplete',
),
url(
r'^personrank-autocomplete/$', dal_views.PersonRankAC.as_view(
model=SkosConcept),
name='personrank-autocomplete',
),
url(
r'^persondestinystated-autocomplete/$', dal_views.PersonDestinyStatedAC.as_view(
model=SkosConcept),
name='persondestinystated-autocomplete',
),
url(
r'^persondestinychecked-autocomplete/$', dal_views.PersonDestinyCheckedAC.as_view(
model=SkosConcept),
name='persondestinychecked-autocomplete',
),
url(
r'^personmia-autocomplete/$', dal_views.PersonMIAAC.as_view(
model=SkosConcept),
name='personmia-autocomplete',
),
url(
r'^onlineressourcerelatedpersons-autocomplete/$', dal_views.OnlineRessourceRelatedPersonsAC.as_view(
model=Person,),
name='onlineressourcerelatedpersons-autocomplete',
),
url(
r'^onlineressourcerelatedbombers-autocomplete/$', dal_views.OnlineRessourceRelatedBombersAC.as_view(
model=Bomber,),
name='onlineressourcerelatedbombers-autocomplete',
),
url(
r'^onlineressourcerelatedwarcrimecases-autocomplete/$', dal_views.OnlineRessourceRelatedWarCrimeCasesAC.as_view(
model=WarCrimeCase,),
name='onlineressourcerelatedwarcrimecases-autocomplete',
),
url(
r'^personwarcrimecaserelatedpersons-autocomplete/$', dal_views.PersonWarCrimeCaseRelatedPersonsAC.as_view(
model=Person,),
name='personwarcrimecaserelatedpersons-autocomplete',
),
url(
r'^personwarcrimecaserelatedcases-autocomplete/$', dal_views.PersonWarCrimeCaseRelatedCasesAC.as_view(
model=WarCrimeCase,),
name='personwarcrimecaserelatedcases-autocomplete',
),
url(
r'^personwarcrimecaserelationtype-autocomplete/$', dal_views.PersonWarCrimeCaseRelationTypeAC.as_view(
model=SkosConcept),
name='personwarcrimecaserelationtype-autocomplete',
),
url(
r'^warcrimecaserelatedpersons-autocomplete/$', dal_views.WarCrimeCaseRelatedPersonsAC.as_view(
model=Person,),
name='warcrimecaserelatedpersons-autocomplete',
),
url(
r'^warcrimecaserelatedcases-autocomplete/$', dal_views.WarCrimeCaseRelatedCasesAC.as_view(
model=WarCrimeCase,),
name='warcrimecaserelatedcases-autocomplete',
),
url(
r'^warcrimecaserelatedplaces-autocomplete/$', dal_views.WarCrimeCaseRelatedPlacesAC.as_view(
model=Place,),
name='warcrimecaserelatedplaces-autocomplete',
),
url(
r'^warcrimecasecrimetype-autocomplete/$', dal_views.WarCrimeCaseCrimeTypeAC.as_view(
model=SkosConcept),
name='warcrimecasecrimetype-autocomplete',
),
url(
r'^airstriketarget-autocomplete/$', dal_views.AirstrikeTargetAC.as_view(
model=Place),
name='airstriketarget-autocomplete',
),
url(
r'^airstrikeplanetype-autocomplete/$', dal_views.AirstrikePlaneTypeAC.as_view(
model=SkosConcept),
name='airstrikeplanetype-autocomplete',
),
url(
r'^airstrikeairforce-autocomplete/$', dal_views.AirstrikeAirforceAC.as_view(
model=Institution,),
name='airstrikeairforce-autocomplete',
),
path(
r'specific-place-ac/<str:lookup>', dal_views.PlaceConstraintAC.as_view(
model=Place),
name='specific-place-ac',
),
path(
r'specific-person-ac/<str:lookup>', dal_views.PersonConstraintAC.as_view(
model=Place),
name='specific-person-ac',
)
]
|
python
|
from dancerl.models.base import CreateCNN,CreateMLP
import torch.nn as nn
if __name__ == '__main__':
mlp=CreateMLP(model_config=[[4,32,nn.ReLU()],
[32,64,nn.ReLU()],
[64,3,nn.Identity()]])
print(mlp)
cnn=CreateCNN(model_config=[[4,32,3,2,1,nn.ReLU()],
[32,32,3,2,1,nn.ReLU()],
[32,32,3,2,1,nn.ReLU()],
[32,32,3,2,1,nn.ReLU()]])
print(cnn)
cnn1=CreateCNN(model_config=[[4,32,(3,2),(2,1),(1,2),nn.ReLU()],
[32,32,(3,2),(2,1),(1,2),nn.ReLU()],
[32,32,(3,2),(2,1),(1,2),nn.ReLU()],
[32,32,(3,2),(2,1),(1,2),nn.ReLU()]])
print(cnn1)
cnn2=CreateCNN(model_config=[[4,32,3,2,1,nn.ReLU()],
[32,32,3,2,1,nn.ReLU()],
[32,32,3,2,1,nn.ReLU()],
[32,32,3,2,1,nn.ReLU()]],
post_fcnet_config=[[32*6*6,512,nn.ReLU()],
[512,3,nn.Identity()]])
print(cnn2)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.