max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Python3/131.py
|
rakhi2001/ecom7
| 854 |
94215
|
<gh_stars>100-1000
__________________________________________________________________________________________________
sample 56 ms submission
class Solution:
def partition(self, s: str) -> List[List[str]]:
def make_results(index,pallindromes, result, results):
if index >= len(s):
results += result[:],
else:
for pallindrome in pallindromes[index]:
make_results(index + len(pallindrome), pallindromes, result + [pallindrome], results)
n = len(s)
is_pallindrome = set()
pallindromes = collections.defaultdict(list)
for i in range(0,len(s)):
for j in range(i+1):
if s[i] == s[j] and ((i-j) <= 1 or (j+1,i-1) in is_pallindrome ):
is_pallindrome.add( (j,i) )
substring = s[j:i+1]
pallindromes[j] += substring,
results = []
make_results(0,pallindromes,[],results)
return results
__________________________________________________________________________________________________
sample 13288 kb submission
class Solution:
rst = []
def partition(self, s: str) -> List[List[str]]:
self.rst = []
n = len(s)
out = []
dp = [[False for _ in range(n)] for _ in range(n)]
for i in range(n):
for j in range(0, i + 1):
if s[i] == s[j] and (i - j <= 2 or dp[j+1][i-1]):
dp[j][i] = True
self.helper(s, 0, dp, out)
return self.rst
def helper(self, s, start, dp, out):
out = out[:]
if start == len(s):
self.rst.append(out)
return
for i in range(start, len(s)):
if not dp[start][i]:
continue
out.append(s[start:i + 1])
self.helper(s, i + 1, dp, out)
out.pop()
__________________________________________________________________________________________________
|
incomplete/rasterizer/rasterizer/shape.py
|
choosewhatulike/500lines
| 26,185 |
94289
|
<gh_stars>1000+
from color import Color
from itertools import product
from geometry import Vector
import random
class SceneObject:
def draw(self, image):
raise NotImplementedError("Undefined method")
class Shape(SceneObject):
def __init__(self, color=None):
self.color = color if color is not None else Color()
self.bound = None
def contains(self, p):
raise NotImplementedError("Undefined method")
def signed_distance_bound(self, p):
raise NotImplementedError("Undefined method")
def draw(self, image, super_sampling = 6):
if not self.bound.overlaps(image.bounds()):
return
color = self.color
r = float(image.resolution)
jitter = [Vector((x + random.random()) / super_sampling / r,
(y + random.random()) / super_sampling / r)
for (x, y) in product(xrange(super_sampling), repeat=2)]
lj = len(jitter)
l_x = max(int(self.bound.low.x * r), 0)
l_y = max(int(self.bound.low.y * r), 0)
h_x = min(int(self.bound.high.x * r), r-1)
h_y = min(int(self.bound.high.y * r), r-1)
for y in xrange(l_y, int(h_y+1)):
x = l_x
while x <= h_x:
corner = Vector(x / r, y / r)
b = self.signed_distance_bound(corner)
pixel_diameter = (2 ** 0.5) / r
if b > pixel_diameter:
steps = int(r * (b - (pixel_diameter - 1.0/r)))
for x_ in xrange(x, min(x + steps, int(h_x+1))):
image.pixels[y][x_].draw(color)
x += steps
elif b < -pixel_diameter:
steps = int(r * (-b - (pixel_diameter - 1.0/r)))
x += steps
else:
coverage = 0
for j in jitter:
if self.contains(corner + j):
coverage += 1.0
image.pixels[y][x].draw(color.fainter(coverage / lj))
x += 1
|
Chapter03/email_spam.py
|
karim7262/Python-Machine-Learning-By-Example
| 106 |
94307
|
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import names
from nltk.stem import WordNetLemmatizer
import glob
import os
import numpy as np
file_path = 'enron1/ham/0007.1999-12-14.farmer.ham.txt'
with open(file_path, 'r') as infile:
ham_sample = infile.read()
print(ham_sample)
file_path = 'enron1/spam/0058.2003-12-21.GP.spam.txt'
with open(file_path, 'r') as infile:
spam_sample = infile.read()
print(spam_sample)
cv = CountVectorizer(stop_words="english", max_features=500)
emails, labels = [], []
file_path = 'enron1/spam/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(1)
file_path = 'enron1/ham/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(0)
def letters_only(astr):
return astr.isalpha()
all_names = set(names.words())
lemmatizer = WordNetLemmatizer()
def clean_text(docs):
cleaned_docs = []
for doc in docs:
cleaned_docs.append(' '.join([lemmatizer.lemmatize(word.lower())
for word in doc.split()
if letters_only(word)
and word not in all_names]))
return cleaned_docs
cleaned_emails = clean_text(emails)
term_docs = cv.fit_transform(cleaned_emails)
print(term_docs [0])
feature_mapping = cv.vocabulary
feature_names = cv.get_feature_names()
def get_label_index(labels):
from collections import defaultdict
label_index = defaultdict(list)
for index, label in enumerate(labels):
label_index[label].append(index)
return label_index
def get_prior(label_index):
""" Compute prior based on training samples
Args:
label_index (grouped sample indices by class)
Returns:
dictionary, with class label as key, corresponding prior as the value
"""
prior = {label: len(index) for label, index in label_index.items()}
total_count = sum(prior.values())
for label in prior:
prior[label] /= float(total_count)
return prior
def get_likelihood(term_document_matrix, label_index, smoothing=0):
""" Compute likelihood based on training samples
Args:
term_document_matrix (sparse matrix)
label_index (grouped sample indices by class)
smoothing (integer, additive Laplace smoothing parameter)
Returns:
dictionary, with class as key, corresponding conditional probability P(feature|class) vector as value
"""
likelihood = {}
for label, index in label_index.items():
likelihood[label] = term_document_matrix[index, :].sum(axis=0) + smoothing
likelihood[label] = np.asarray(likelihood[label])[0]
total_count = likelihood[label].sum()
likelihood[label] = likelihood[label] / float(total_count)
return likelihood
feature_names[:5]
def get_posterior(term_document_matrix, prior, likelihood):
""" Compute posterior of testing samples, based on prior and likelihood
Args:
term_document_matrix (sparse matrix)
prior (dictionary, with class label as key, corresponding prior as the value)
likelihood (dictionary, with class label as key, corresponding conditional probability vector as value)
Returns:
dictionary, with class label as key, corresponding posterior as value
"""
num_docs = term_document_matrix.shape[0]
posteriors = []
for i in range(num_docs):
# posterior is proportional to prior * likelihood
# = exp(log(prior * likelihood))
# = exp(log(prior) + log(likelihood))
posterior = {key: np.log(prior_label) for key, prior_label in prior.items()}
for label, likelihood_label in likelihood.items():
term_document_vector = term_document_matrix.getrow(i)
counts = term_document_vector.data
indices = term_document_vector.indices
for count, index in zip(counts, indices):
posterior[label] += np.log(likelihood_label[index]) * count
# exp(-1000):exp(-999) will cause zero division error,
# however it equates to exp(0):exp(1)
min_log_posterior = min(posterior.values())
for label in posterior:
try:
posterior[label] = np.exp(posterior[label] - min_log_posterior)
except:
# if one's log value is excessively large, assign it infinity
posterior[label] = float('inf')
# normalize so that all sums up to 1
sum_posterior = sum(posterior.values())
for label in posterior:
if posterior[label] == float('inf'):
posterior[label] = 1.0
else:
posterior[label] /= sum_posterior
posteriors.append(posterior.copy())
return posteriors
label_index = get_label_index(labels)
prior = get_prior(label_index)
smoothing = 1
likelihood = get_likelihood(term_docs, label_index, smoothing)
emails_test = [
'''Subject: flat screens
hello ,
please call or contact regarding the other flat screens requested .
<NAME> - eb 3132 b
<NAME> - eb 3132 a
also the sun blocker that was taken away from eb 3131 a .
trisha should two monitors also michael .
thanks
<NAME>''',
'''Subject: having problems in bed ? we can help !
cialis allows men to enjoy a fully normal sex life without having to plan the sexual act .
if we let things terrify us , life will not be worth living .
brevity is the soul of lingerie .
suspicion always haunts the guilty mind .''',
]
cleaned_test = clean_text(emails_test)
term_docs_test = cv.transform(cleaned_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
print(posterior)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(cleaned_emails, labels, test_size=0.33, random_state=42)
len(X_train), len(Y_train)
len(X_test), len(Y_test)
term_docs_train = cv.fit_transform(X_train)
label_index = get_label_index(Y_train)
prior = get_prior(label_index)
likelihood = get_likelihood(term_docs_train, label_index, smoothing)
term_docs_test = cv.transform(X_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
correct = 0.0
for pred, actual in zip(posterior, Y_test):
if actual == 1:
if pred[1] >= 0.5:
correct += 1
elif pred[0] > 0.5:
correct += 1
print('The accuracy on {0} testing samples is: {1:.1f}%'.format(len(Y_test), correct/len(Y_test)*100))
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB(alpha=1.0, fit_prior=True)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
prediction_prob[0:10]
prediction = clf.predict(term_docs_test)
prediction[:10]
accuracy = clf.score(term_docs_test, Y_test)
print('The accuracy using MultinomialNB is: {0:.1f}%'.format(accuracy*100))
from sklearn.metrics import confusion_matrix
confusion_matrix(Y_test, prediction, labels=[0, 1])
from sklearn.metrics import precision_score, recall_score, f1_score
precision_score(Y_test, prediction, pos_label=1)
recall_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=0)
from sklearn.metrics import classification_report
report = classification_report(Y_test, prediction)
print(report)
pos_prob = prediction_prob[:, 1]
thresholds = np.arange(0.0, 1.2, 0.1)
true_pos, false_pos = [0]*len(thresholds), [0]*len(thresholds)
for pred, y in zip(pos_prob, Y_test):
for i, threshold in enumerate(thresholds):
if pred >= threshold:
if y == 1:
true_pos[i] += 1
else:
false_pos[i] += 1
else:
break
true_pos_rate = [tp / 516.0 for tp in true_pos]
false_pos_rate = [fp / 1191.0 for fp in false_pos]
import matplotlib.pyplot as plt
plt.figure()
lw = 2
plt.plot(false_pos_rate, true_pos_rate, color='darkorange',
lw=lw)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(Y_test, pos_prob)
from sklearn.model_selection import StratifiedKFold
k = 10
k_fold = StratifiedKFold(n_splits=k)
# convert to numpy array for more efficient slicing
cleaned_emails_np = np.array(cleaned_emails)
labels_np = np.array(labels)
max_features_option = [2000, 4000, 8000]
smoothing_factor_option = [0.5, 1.0, 1.5, 2.0]
fit_prior_option = [True, False]
auc_record = {}
for train_indices, test_indices in k_fold.split(cleaned_emails, labels):
X_train, X_test = cleaned_emails_np[train_indices], cleaned_emails_np[test_indices]
Y_train, Y_test = labels_np[train_indices], labels_np[test_indices]
for max_features in max_features_option:
if max_features not in auc_record:
auc_record[max_features] = {}
cv = CountVectorizer(stop_words="english", max_features=max_features)
term_docs_train = cv.fit_transform(X_train)
term_docs_test = cv.transform(X_test)
for smoothing_factor in smoothing_factor_option:
if smoothing_factor not in auc_record[max_features]:
auc_record[max_features][smoothing_factor] = {}
for fit_prior in fit_prior_option:
clf = MultinomialNB(alpha=smoothing_factor, fit_prior=fit_prior)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
pos_prob = prediction_prob[:, 1]
auc = roc_auc_score(Y_test, pos_prob)
auc_record[max_features][smoothing_factor][fit_prior] \
= auc + auc_record[max_features][smoothing_factor].get(fit_prior, 0.0)
print(auc_record)
print('max features smoothing fit prior auc')
for max_features, max_feature_record in auc_record.items():
for smoothing, smoothing_record in max_feature_record.items():
for fit_prior, auc in smoothing_record.items():
print(' {0} {1} {2} {3:.4f}'.format(max_features, smoothing, fit_prior, auc/k))
|
MIDI_CLUE_BLE_Glove/code.py
|
gamblor21/Adafruit_Learning_System_Guides
| 665 |
94354
|
<reponame>gamblor21/Adafruit_Learning_System_Guides
"""
CLUE BLE MIDI
Sends MIDI CC values based on accelerometer x & y and proximity sensor
Touch #0 switches Bank/Preset patches
Touch #1 picks among the three CC lines w A&B buttons adjusting CC numbers
Touch #2 starts/stops sending CC messages (still allows Program Change)
"""
import time
from adafruit_clue import clue
import adafruit_ble
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
import adafruit_ble_midi
import adafruit_midi
from adafruit_midi.control_change import ControlChange
from adafruit_midi.program_change import ProgramChange
# from adafruit_midi.note_on import NoteOn
# from adafruit_midi.pitch_bend import PitchBend
import simpleio
import displayio
import terminalio
from adafruit_display_text import label
from adafruit_display_shapes.rect import Rect
# --- Pick your midi out channel here ---
midi_channel = 1
# --- Pick your MIDI CC numbers here ---
cc_x_num = 71
cc_y_num = 72
cc_prox_num = 73
# --- Pick Bank & Preset pairs here ---
touch_patch = [ # first number is the Bank, second is the Preset
(4, 16), # minimoog: Leads > Original MINI
(5, 8), # Pads > Intergalactic Pass
(0, 13), # Bass > Kraft Bass
(6, 9), # Percussion > Space Hat
]
patch_count = len(touch_patch)
patch_index = (
patch_count - 1
) # start on the last one so first time it is pressed it goes to first
cc_x = 0
cc_y = 0
cc_prox = 0
# Use default HID descriptor
midi_service = adafruit_ble_midi.MIDIService()
advertisement = ProvideServicesAdvertisement(midi_service)
ble = adafruit_ble.BLERadio()
if ble.connected:
for c in ble.connections:
c.disconnect()
midi = adafruit_midi.MIDI(midi_out=midi_service, out_channel=midi_channel - 1)
print("advertising")
ble.name = "CLUE BLE MIDI"
ble.start_advertising(advertisement)
clue.display.brightness = 1.0
clue.pixel.brightness = 0.2
screen = displayio.Group()
ORANGE = 0xCE6136
GRAY = 0x080808
BLACK = 0x121212
BLUE = 0x668190
SILVER = 0xAAAAAA
BROWN = 0x805D40
# --- Setup screen ---
# BG
color_bitmap = displayio.Bitmap(240, 240, 1)
color_palette = displayio.Palette(1)
color_palette[0] = GRAY
bg_sprite = displayio.TileGrid(color_bitmap, x=0, y=0, pixel_shader=color_palette)
screen.append(bg_sprite)
column_a = 20
column_b = 168
# positions that are distributed relative to cc_x and cc_prox y positions
row_a = 80
row_c = 170
row_b = int(row_a + ((row_c - row_a) / 2))
line_row_a = int(row_a + ((row_b - row_a) / 2))
line_row_b = int(row_b + ((row_c - row_b) / 2))
picker_box_row = [row_a, row_b, row_c]
# trim
top_trim_box = Rect(0, 0, 240, 8, fill=BROWN, outline=None)
screen.append(top_trim_box)
bottom_trim_box = Rect(0, 232, 240, 8, fill=BROWN, outline=None)
screen.append(bottom_trim_box)
# title text
title_label = label.Label(terminalio.FONT, text="MIDI CLUE", scale=4, color=SILVER)
title_label.x = 14
title_label.y = 27
screen.append(title_label)
# title box
title_box = Rect(0, 54, 240, 8, fill=BROWN, outline=None)
screen.append(title_box)
# cc x num
cc_x_num_label = label.Label(
terminalio.FONT,
text=("CC {}".format(cc_x_num)),
scale=3,
color=ORANGE,
)
cc_x_num_label.x = column_a
cc_x_num_label.y = row_a
screen.append(cc_x_num_label)
# cc x value
cc_x_label = label.Label(terminalio.FONT, text=str(cc_x), scale=3, color=ORANGE)
cc_x_label.x = column_b
cc_x_label.y = row_a
screen.append(cc_x_label)
# picker box
picker_box = Rect(3, row_a, 6, 6, fill=ORANGE, outline=None)
screen.append(picker_box)
# mid line
mid_line_a = Rect(0, line_row_a, 240, 2, fill=SILVER, outline=None)
screen.append(mid_line_a)
# cc y num
cc_y_num_label = label.Label(
terminalio.FONT, text=("CC {}".format(cc_y_num)), scale=3, color=BLUE
)
cc_y_num_label.x = column_a
cc_y_num_label.y = row_b
screen.append(cc_y_num_label)
# cc y value text
cc_y_label = label.Label(terminalio.FONT, text=str(cc_y), scale=3, color=BLUE)
cc_y_label.x = column_b
cc_y_label.y = row_b
screen.append(cc_y_label)
# mid line
mid_line_b = Rect(0, line_row_b, 240, 2, fill=SILVER, outline=None)
screen.append(mid_line_b)
# cc prox num text
cc_prox_num_label = label.Label(
terminalio.FONT,
text=("CC {}".format(cc_prox_num)),
scale=3,
color=SILVER,
)
cc_prox_num_label.x = column_a
cc_prox_num_label.y = row_c
screen.append(cc_prox_num_label)
# cc prox value text
cc_prox_label = label.Label(terminalio.FONT, text=str(cc_prox), scale=3, color=SILVER)
cc_prox_label.x = column_b
cc_prox_label.y = row_c
screen.append(cc_prox_label)
# footer line
footer_line = Rect(0, 192, 240, 2, fill=SILVER, outline=None)
screen.append(footer_line)
# patch label
patch_label = label.Label(terminalio.FONT, text="Patch _", scale=2, color=BLUE)
patch_label.x = 4
patch_label.y = 216
screen.append(patch_label)
# footer label
footer_label = label.Label(terminalio.FONT, text="connect BLE", scale=2, color=ORANGE)
footer_label.x = 102
footer_label.y = 216
screen.append(footer_label)
# show the screen
clue.display.show(screen)
cc_num_pick_toggle = 0 # which cc to adjust w buttons
cc_send_toggle = True # to start and stop sending cc
debug = False # set debug mode True to test raw values, set False to run BLE MIDI
while True:
if debug:
accel_data = clue.acceleration # get accelerometer reading
accel_x = accel_data[0]
accel_y = accel_data[1]
prox_data = clue.proximity
print("x:{} y:{}".format(accel_x, accel_y,))
print("proximity: {}".format(clue.proximity))
time.sleep(0.2)
else:
print("Waiting for connection")
while not ble.connected:
pass
print("Connected")
footer_label.x = 80
footer_label.color = BLUE
footer_label.text = "BLE Connected"
time.sleep(2)
footer_label.x = 110
footer_label.color = SILVER
footer_label.text = "sending CC"
while ble.connected:
# Clue sensor readings to CC
accel_data = clue.acceleration # get accelerometer reading
accel_x = accel_data[0]
accel_y = accel_data[1]
prox_data = clue.proximity
# Remap analog readings to cc range
cc_x = int(simpleio.map_range(accel_x, -9, 9, 0, 127))
cc_y = int(simpleio.map_range(accel_y, 0, 9, 0, 127))
cc_prox = int(simpleio.map_range(prox_data, 0, 255, 0, 127))
# send all the midi messages in a list
if cc_send_toggle:
midi.send(
[
ControlChange(cc_x_num, cc_x),
ControlChange(cc_y_num, cc_y),
ControlChange(cc_prox_num, cc_prox),
]
)
cc_x_label.text = str(cc_x)
cc_y_label.text = str(cc_y)
cc_prox_label.text = str(cc_prox)
# If you want to send NoteOn or Pitch Bend, here are examples:
# midi.send(NoteOn(44, 1column_a)) # G sharp 2nd octave
# a_pitch_bend = PitchBend(random.randint(0, 16383))
# midi.send(a_pitch_bend)
if clue.button_a:
if cc_num_pick_toggle == 0:
cc_x_num = cc_x_num - 1
cc_x_num_label.text = "CC {}".format(cc_x_num)
time.sleep(0.05) # Debounce
elif cc_num_pick_toggle == 1:
cc_y_num = cc_y_num - 1
cc_y_num_label.text = "CC {}".format(cc_y_num)
time.sleep(0.05)
else:
cc_prox_num = cc_prox_num - 1
cc_prox_num_label.text = "CC {}".format(cc_prox_num)
time.sleep(0.05)
if clue.button_b:
if cc_num_pick_toggle == 0:
cc_x_num = cc_x_num + 1
cc_x_num_label.text = "CC {}".format(cc_x_num)
time.sleep(0.05)
elif cc_num_pick_toggle == 1:
cc_y_num = cc_y_num + 1
cc_y_num_label.text = "CC {}".format(cc_y_num)
time.sleep(0.05)
else:
cc_prox_num = cc_prox_num + 1
cc_prox_num_label.text = "CC {}".format(cc_prox_num)
time.sleep(0.05)
if clue.touch_0:
patch_index = (patch_index + 1) % patch_count
midi.send( # Bank select
[
ControlChange(0, 0), # MSB
ControlChange(32, touch_patch[patch_index][0]), # LSB
]
)
midi.send(ProgramChange(touch_patch[patch_index][1])) # Program Change
patch_label.text = "Patch {}".format(patch_index + 1)
time.sleep(0.2)
if clue.touch_1:
cc_num_pick_toggle = (cc_num_pick_toggle + 1) % 3
picker_box.y = picker_box_row[cc_num_pick_toggle]
time.sleep(0.1)
if clue.touch_2:
cc_send_toggle = not cc_send_toggle
if cc_send_toggle:
footer_label.x = 110
footer_label.color = SILVER
footer_label.text = "sending CC"
else:
footer_label.x = 114
footer_label.color = ORANGE
footer_label.text = "CC paused"
time.sleep(0.1)
print("Disconnected")
print()
ble.start_advertising(advertisement)
|
axelrod/player.py
|
nandhinianandj/Axelrod
| 596 |
94397
|
import copy
import inspect
import itertools
import types
import warnings
from typing import Any, Dict
import numpy as np
from axelrod import _module_random
from axelrod.action import Action
from axelrod.game import DefaultGame
from axelrod.history import History
from axelrod.random_ import RandomGenerator
C, D = Action.C, Action.D
class PostInitCaller(type):
"""Metaclass to be able to handle post __init__ tasks.
If there is a DerivedPlayer class of Player that overrides
_post_init, as follows:
class Player(object, metaclass=PostInitCaller):
def __new__(cls, *args, **kwargs):
print("Player.__new__")
obj = super().__new__(cls)
return obj
def __init__(self):
print("Player.__init__")
def _post_init(self):
print("Player._post_init")
def _post_transform(self):
print("Player._post_transform")
class DerivedPlayer(Player):
def __init__(self):
print("DerivedPlayer.__init__")
super().__init__()
def _post_init(self):
print("DerivedPlayer._post_init")
super()._post_init()
dp = DerivedPlayer()
Then the call order is:
* PostInitCaller.__call__
* Player.__new__
* DerivedPlayer.__init__
* Player.__init__
* DerivedPlayer._post_init
* Player._post_init
* Player._post_transform
See here to learn more: https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/
"""
def __call__(cls, *args, **kwargs):
# This calls cls.__new__ and cls.__init__
obj = type.__call__(cls, *args, **kwargs)
# Next we do any post init or post transform tasks, like recomputing
# classifiers
# Note that subclasses inherit the metaclass, and subclasses my override
# or extend __init__ so it's necessary to do these tasks after all the
# __init__'s have run in the case of a post-transform reclassification.
obj._post_init()
obj._post_transform()
return obj
class Player(object, metaclass=PostInitCaller):
"""A class for a player in the tournament.
This is an abstract base class, not intended to be used directly.
"""
name = "Player"
classifier = {} # type: Dict[str, Any]
_reclassifiers = []
def __new__(cls, *args, **kwargs):
"""Caches arguments for Player cloning."""
obj = super().__new__(cls)
obj.init_kwargs = cls.init_params(*args, **kwargs)
return obj
@classmethod
def init_params(cls, *args, **kwargs):
"""
Return a dictionary containing the init parameters of a strategy
(without 'self').
Use *args and **kwargs as value if specified
and complete the rest with the default values.
"""
sig = inspect.signature(cls.__init__)
# The 'self' parameter needs to be removed or the first *args will be
# assigned to it
self_param = sig.parameters.get("self")
new_params = list(sig.parameters.values())
new_params.remove(self_param)
sig = sig.replace(parameters=new_params)
boundargs = sig.bind_partial(*args, **kwargs)
boundargs.apply_defaults()
return boundargs.arguments
def __init__(self):
"""Initial class setup."""
self._history = History()
self.classifier = copy.deepcopy(self.classifier)
self.set_match_attributes()
def _post_init(self):
"""Post initialization tasks such as reclassifying the strategy."""
pass
def _post_transform(self):
"""Handles post transform tasks such as further reclassifying."""
# Reclassify strategy post __init__, if needed.
for (reclassifier, args, kwargs) in self._reclassifiers:
self.classifier = reclassifier(self.classifier, *args, **kwargs)
def __eq__(self, other):
"""
Test if two players are equal, ignoring random seed and RNG state.
"""
if self.__repr__() != other.__repr__():
return False
for attribute in set(
list(self.__dict__.keys()) + list(other.__dict__.keys())
):
value = getattr(self, attribute, None)
other_value = getattr(other, attribute, None)
if attribute in ["_random", "_seed"]:
# Don't compare the random generators.
continue
if isinstance(value, np.ndarray):
if not (np.array_equal(value, other_value)):
return False
elif isinstance(value, types.GeneratorType) or isinstance(
value, itertools.cycle
):
# Split the original generator so it is not touched
generator, original_value = itertools.tee(value)
other_generator, original_other_value = itertools.tee(
other_value
)
if isinstance(value, types.GeneratorType):
setattr(self, attribute, (ele for ele in original_value))
setattr(
other, attribute, (ele for ele in original_other_value)
)
else:
setattr(self, attribute, itertools.cycle(original_value))
setattr(
other, attribute, itertools.cycle(original_other_value)
)
for _ in range(200):
try:
if next(generator) != next(other_generator):
return False
except StopIteration:
break
# Code for a strange edge case where each strategy points at each
# other
elif value is other and other_value is self:
pass
else:
if value != other_value:
return False
return True
def receive_match_attributes(self):
# Overwrite this function if your strategy needs
# to make use of match_attributes such as
# the game matrix, the number of rounds or the noise
pass
def set_match_attributes(self, length=-1, game=None, noise=0):
if not game:
game = DefaultGame
self.match_attributes = {"length": length, "game": game, "noise": noise}
self.receive_match_attributes()
def set_seed(self, seed):
"""Set a random seed for the player's random number generator."""
if seed is None:
warnings.warn(
"Initializing player with seed from Axelrod module random number generator. "
"Results may not be seed reproducible."
)
self._seed = _module_random.random_seed_int()
else:
self._seed = seed
self._random = RandomGenerator(seed=self._seed)
def __repr__(self):
"""The string method for the strategy.
Appends the `__init__` parameters to the strategy's name."""
name = self.name
prefix = ": "
gen = (
value for value in self.init_kwargs.values() if value is not None
)
for value in gen:
try:
if issubclass(value, Player):
value = value.name
except TypeError:
pass
name = "".join([name, prefix, str(value)])
prefix = ", "
return name
def __getstate__(self):
"""Used for pickling. Override if Player contains unpickleable attributes."""
return self.__dict__
def strategy(self, opponent):
"""This is a placeholder strategy."""
raise NotImplementedError()
def clone(self):
"""Clones the player without history, reapplying configuration
parameters as necessary."""
# You may be tempted to re-implement using the `copy` module
# Note that this would require a deepcopy in some cases and there may
# be significant changes required throughout the library.
# Consider overriding in special cases only if necessary
cls = self.__class__
new_player = cls(**self.init_kwargs)
new_player.match_attributes = copy.copy(self.match_attributes)
return new_player
def reset(self):
"""Resets a player to its initial state
This method is called at the beginning of each match (between a pair
of players) to reset a player's state to its initial starting point.
It ensures that no 'memory' of previous matches is carried forward.
"""
# This also resets the history.
self.__init__(**self.init_kwargs)
def update_history(self, play, coplay):
self.history.append(play, coplay)
@property
def history(self):
return self._history
# Properties maintained for legacy API, can refactor to self.history.X
# in 5.0.0 to reduce function call overhead.
@property
def cooperations(self):
return self._history.cooperations
@property
def defections(self):
return self._history.defections
@property
def state_distribution(self):
return self._history.state_distribution
|
models/necks/utils.py
|
AlexandreDh/ACAR-Net
| 162 |
94437
|
<filename>models/necks/utils.py
import numpy as np
def bbox_jitter(bbox, num, delta):
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
if num == 1:
jitter = np.random.uniform(-delta, delta, 4)
bboxes = [[max(bbox[0] + jitter[0] * w, 0.), min(bbox[1] + jitter[1] * h, 1.),
max(bbox[2] + jitter[2] * w, 0.), min(bbox[3] + jitter[3] * h, 1.)]]
return bboxes
bboxes = [bbox]
jitter = np.random.uniform(-delta, delta, [num - 1, 4])
for i in range(num - 1):
bboxes.append([max(bbox[0] + jitter[i][0] * w, 0.), min(bbox[1] + jitter[i][1] * h, 1.),
max(bbox[2] + jitter[i][2] * w, 0.), min(bbox[3] + jitter[i][3] * h, 1.)])
return bboxes
def get_bbox_after_aug(aug_info, bbox, aug_threshold=0.3):
if aug_info is None:
return bbox
cbox = aug_info['crop_box']
w = cbox[2] - cbox[0]
h = cbox[3] - cbox[1]
l = max(min(bbox[0], cbox[2]), cbox[0])
r = max(min(bbox[2], cbox[2]), cbox[0])
t = max(min(bbox[1], cbox[3]), cbox[1])
b = max(min(bbox[3], cbox[3]), cbox[1])
if (b-t) * (r-l) <= (bbox[3]-bbox[1]) * (bbox[2]-bbox[0]) * aug_threshold:
return None
ret = [(l-cbox[0]) / w, (t-cbox[1]) / h, (r-cbox[0]) / w, (b-cbox[1]) / h]
if aug_info['flip']:
ret = [1. - ret[2], ret[1], 1. - ret[0], ret[3]]
pad_ratio = aug_info['pad_ratio']
ret = [ret[0] / pad_ratio[0], ret[1] / pad_ratio[1], ret[2] / pad_ratio[0], ret[3] / pad_ratio[1]]
return ret
|
scripts/data_analysis/active_areas.py
|
lixiny/ContactPose
| 199 |
94460
|
<reponame>lixiny/ContactPose<filename>scripts/data_analysis/active_areas.py
# Copyright (c) Facebook, Inc. and its affiliates.
# Code by <NAME>
"""
Discovers 'active areas' i.e. areas on the object surface most frequently
touched by a certain part of the hand. See Figure 7 in the paper
https://arxiv.org/pdf/2007.09545.pdf.
"""
import init_paths
from utilities.import_open3d import * # need to import open3d before others
import json
from matplotlib import cm
import numpy as np
import os
from random import shuffle
from utilities.dataset import get_p_nums
import utilities.misc as mutils
osp = os.path
def discover_active_areas(finger_idx, part_idx, object_name, intent, p_nums=None,
color_thresh=0.4):
"""
finger_idx: 0->4 : thumb->little
part_idx: 0->3 : proximal to distal phalanges, 3 = finger tip
"""
p_nums = p_nums or get_p_nums(object_name, intent)
shuffle(p_nums)
data_dir = osp.join('data', 'contactpose_data')
# read object mesh
vertices = None
for p_num in p_nums:
filename = osp.join(data_dir, f'full{p_num}_{intent}', object_name,
f'{object_name}.ply')
if osp.isfile(filename):
mesh = o3dio.read_triangle_mesh(filename)
else:
print('{:s} does not exist'.format(filename))
continue
vertices = np.asarray(mesh.vertices)
break
if vertices is None:
print("no object model found")
return
line_ids = mutils.get_hand_line_ids()
n_lines_per_hand = len(line_ids)
n_parts_per_finger = 4
touched_by_part = np.zeros(len(vertices))
count = 0
for p_num in p_nums:
print(f'Processing full{p_num}_{intent} {object_name}')
# read contact from the mesh
filename = osp.join(data_dir, f'full{p_num}_{intent}', object_name,
f'{object_name}.ply')
if osp.isfile(filename):
mesh = o3dio.read_triangle_mesh(filename)
else:
print('{:s} does not exist'.format(filename))
continue
tex = np.asarray(mesh.vertex_colors)[:, 0]
tex = mutils.texture_proc(tex)
# read joints
filename = osp.join(data_dir, f'full{p_num}_{intent}', object_name,
'annotations.json')
try:
with open(filename, 'r') as f:
annotations = json.load(f)
except FileNotFoundError:
print('{:s} does not exist'.format(filename))
continue
ds = []
for hand_idx, hand in enumerate(annotations['hands']):
if hand['valid']:
joints = np.asarray(hand['joints'])
l0 = joints[line_ids[:, 0]]
l1 = joints[line_ids[:, 1]]
pl = mutils.closest_linesegment_point(l0, l1, vertices)
d = pl - vertices[:, np.newaxis, :]
d = np.linalg.norm(d, axis=2)
else:
d = np.inf * np.ones((len(vertices), n_lines_per_hand))
ds.append(d)
ds = np.hstack(ds)
hand_idxs, line_idxs = divmod(np.argmin(ds, axis=1), n_lines_per_hand)
finger_idxs, part_idxs = divmod(line_idxs, n_parts_per_finger)
this_touched_by_part = np.logical_and(
tex > color_thresh, np.logical_and(hand_idxs >= 0,
np.logical_and(finger_idxs == finger_idx, part_idxs == part_idx)))
touched_by_part += this_touched_by_part
count += 1
touched_by_part /= count
touched_by_part /= touched_by_part.max()
filename = osp.join('data',
f'{object_name}_{intent}_{finger_idx}_{part_idx}_active_areas.npy')
np.save(filename, touched_by_part)
print('{:s} saved'.format(filename))
def show_active_areas(finger_idx, part_idx, object_name, intent):
filename = osp.join('data', 'object_models', f'{object_name}.ply')
mesh = o3dio.read_triangle_mesh(filename)
mesh.compute_vertex_normals()
filename = osp.join('data',
f'{object_name}_{intent}_{finger_idx}_{part_idx}_active_areas.npy')
c = np.load(filename)
mesh.vertex_colors = o3du.Vector3dVector(cm.bwr(c)[:, :3])
o3dv.draw_geometries([mesh])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--finger_idx', type=int, required=True,
help='0->4 : thumb->little', choices=(0, 1, 2, 3, 4))
parser.add_argument('--part_idx', type=int, required=True, choices=(0, 1, 2, 3),
help='0->3 : proximal to distal phalanges, 3 = finger tip')
parser.add_argument('--object_name', required=True)
parser.add_argument('--intent', required=True, choices=('use', 'handoff'))
parser.add_argument('--p_nums', default='1-50',
help='Participant numbers, comma or - separated.'
'Skipping means all participants')
parser.add_argument('--show', action='store_true')
args = parser.parse_args()
p_nums = args.p_nums
if '-' in p_nums:
first, last = p_nums.split('-')
p_nums = list(range(int(first), int(last)+1))
else:
p_nums = [int(p) for p in p_nums.split(',')]
if args.show:
show_active_areas(args.finger_idx, args.part_idx, args.object_name,
args.intent)
else:
discover_active_areas(args.finger_idx, args.part_idx, args.object_name,
args.intent, p_nums)
|
sparse_merkle_tree/new_bintrie_hex.py
|
kevaundray/research
| 1,351 |
94477
|
from ethereum.utils import sha3, encode_hex
class EphemDB():
def __init__(self, kv=None):
self.reads = 0
self.writes = 0
self.kv = kv or {}
def get(self, k):
self.reads += 1
return self.kv.get(k, None)
def put(self, k, v):
self.writes += 1
self.kv[k] = v
def delete(self, k):
del self.kv[k]
# Hashes of empty subtrees
zerohashes = [b'\x00' * 32]
for i in range(256):
zerohashes.insert(0, sha3(zerohashes[0] + zerohashes[0]))
# Create a new empty tree
def new_tree(db):
return zerohashes[0]
# Convert a binary key into an integer path value
def key_to_path(k):
return int.from_bytes(k, 'big')
tt256m1 = 2**256 - 1
# And convert back
def path_to_key(k):
return (k & tt256m1).to_bytes(32, 'big')
# Read a key from a given tree
def get(db, root, key):
v = root
path = key_to_path(key)
for i in range(0, 256, 4):
if v == zerohashes[i]:
return b'\x00' * 32
child = db.get(v)
if len(child) == 65:
if (path % 2**256) == key_to_path(child[1:33]):
return child[33:]
else:
return b'\x00' * 32
else:
index = (path >> 252) & 15
v = child[32*index: 32*index+32]
path <<= 4
return v
# Make a root hash of a (sub)tree with a single key/value pair
def make_single_key_hash(path, depth, value):
if depth == 256:
return value
elif (path >> 255) & 1:
return sha3(zerohashes[depth+1] + make_single_key_hash(path << 1, depth + 1, value))
else:
return sha3(make_single_key_hash(path << 1, depth + 1, value) + zerohashes[depth+1])
# Hash together 16 elements
def hash_16_els(vals):
assert len(vals) == 16
for _ in range(4):
vals = [sha3(vals[i] + vals[i+1]) for i in range(0, len(vals), 2)]
return vals[0]
# Make a root hash of a (sub)tree with two key/value pairs, and save intermediate nodes in the DB
def make_double_key_hash(db, path1, path2, depth, value1, value2):
if depth == 256:
raise Exception("Cannot fit two values into one slot!")
if ((path1 >> 252) & 15) == ((path2 >> 252) & 15):
children = [zerohashes[depth+4]] * 16
children[(path1 >> 252) & 15] = make_double_key_hash(db, path1 << 4, path2 << 4, depth + 4, value1, value2)
else:
Lkey = ((path1 >> 252) & 15)
L = make_single_key_hash(path1 << 4, depth + 4, value1)
Rkey = ((path2 >> 252) & 15)
R = make_single_key_hash(path2 << 4, depth + 4, value2)
db.put(L, b'\x01' + path_to_key(path1 << 4) + value1)
db.put(R, b'\x01' + path_to_key(path2 << 4) + value2)
children = [zerohashes[depth+4]] * 16
children[Lkey] = L
children[Rkey] = R
h = hash_16_els(children)
db.put(h, b''.join(children))
return h
# Update a tree with a given key/value pair
def update(db, root, key, value):
return _update(db, root, key_to_path(key), 0, value)
def _update(db, root, path, depth, value):
if depth == 256:
return value
# Update an empty subtree: make a single-key subtree
if root == zerohashes[depth]:
k = make_single_key_hash(path, depth, value)
db.put(k, b'\x01' + path_to_key(path) + value)
return k
child = db.get(root)
# Update a single-key subtree: make a double-key subtree
if len(child) == 65:
origpath, origvalue = key_to_path(child[1:33]), child[33:]
return make_double_key_hash(db, path, origpath, depth, value, origvalue)
# Update a multi-key subtree: recurse down
else:
assert len(child) == 512
index = (path >> 252) & 15
new_value = _update(db, child[index*32: index*32+32], path << 4, depth + 4, value)
new_children = [new_value if i == index else child[32*i:32*i+32] for i in range(16)]
h = hash_16_els(new_children)
db.put(h, b''.join(new_children))
return h
def multi_update(db, root, keys, values):
for k, v in zip(keys, values):
root = update(db, root, k, v)
return root
|
rltime/policies/torch/distributions/distribution_layer.py
|
frederikschubert/rltime
| 147 |
94503
|
import torch
class DistributionLayer(torch.nn.Module):
"""A distribution layer for action selection (e.g. for actor-critic)"""
def __init__(self, action_space, input_size):
"""Initializes the distribution layer for the given action space and
input_size (i.e. the output size of the model)
"""
super().__init__()
def forward(self, x):
"""Returns the relevant pytorch distribution output for input x,
which can be used for action selection and distribution data
"""
raise NotImplementedError
|
notes_index.py
|
PrajvalRaval/PlainNotes
| 300 |
94510
|
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import os, fnmatch, re
TAB_SIZE = 2
COL_WIDTH = 30
def settings():
return sublime.load_settings('Notes.sublime-settings')
def get_root():
project_settings = sublime.active_window().active_view().settings().get('PlainNotes')
if project_settings:
return os.path.normpath(os.path.expanduser(project_settings.get('root',settings().get("root"))))
else:
return os.path.normpath(os.path.expanduser(settings().get("root")))
def brain_dir():
brain_settings = settings().get("jotter_dir")
if brain_settings:
return brain_settings
else:
return ".brain"
class NotesBufferCommand(sublime_plugin.WindowCommand):
def run(self):
view = self.window.new_file()
view.set_scratch(True)
view.set_name(u"✎ Notes Index")
view.set_syntax_file('Packages/PlainNotes/Notes Index.hidden-tmLanguage')
view.settings().set('color_scheme', 'Packages/PlainNotes/Color Schemes/Notes-Index.hidden-tmTheme')
self.window.focus_view(view)
view.run_command('notes_buffer_refresh')
class NotesBufferRefreshCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
v.set_read_only(False)
v.erase(edit, sublime.Region(0, self.view.size()))
root = get_root()
lines = self.list_files(root)
v.settings().set('notes_buffer_files', lines)
v.insert(edit, 0, u"\n".join([f[0] for f in lines]))
v.set_read_only(True)
def list_files(self, path):
lines = []
for root, dirs, files in os.walk(path, topdown=False):
level = root.replace(path, '').count(os.sep) - 1
indent = ' ' * TAB_SIZE * (level)
relpath = os.path.relpath(root, path)
if not relpath.startswith("."):
line_str = u'{0}▣ {1}'.format(indent, os.path.relpath(root, path))
lines.append((line_str, root))
if relpath.startswith(settings().get("archive_dir")):
line_str = u'{0}▣ {1}'.format(indent, 'Archive')
lines.append((line_str, root))
if not relpath.startswith(brain_dir()):
subindent = ' ' * TAB_SIZE * (level + 1)
for f in files:
for ext in settings().get("note_file_extensions"): # display only files with given extension
if fnmatch.fnmatch(f, "*." + ext):
line_str = u'{0}≡ {1}'.format(subindent, re.sub(r'\.note$', '', f))
line_path = os.path.normpath(os.path.join(root, f))
lines.append((line_str, line_path))
return lines
class NotesBufferOpenCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
for sel in v.sel():
file_index = v.rowcol(sel.a)[0]
files = v.settings().get('notes_buffer_files')
file_path = files[file_index][1]
sublime.run_command("notes_open", {"file_path": file_path})
|
tests/archive/__init__.py
|
ZabeMath/pywikibot
| 326 |
94519
|
<gh_stars>100-1000
"""THIS DIRECTORY IS TO HOLD TESTS FOR ARCHIVED SCRIPTS."""
|
tests/transformer/test_import.py
|
rahulbahal7/restricted-python
| 236 |
94550
|
<gh_stars>100-1000
from RestrictedPython import compile_restricted_exec
import_errmsg = (
'Line 1: "%s" is an invalid variable name because it starts with "_"')
def test_RestrictingNodeTransformer__visit_Import__1():
"""It allows importing a module."""
result = compile_restricted_exec('import a')
assert result.errors == ()
assert result.code is not None
def test_RestrictingNodeTransformer__visit_Import__2():
"""It denies importing a module starting with `_`."""
result = compile_restricted_exec('import _a')
assert result.errors == (import_errmsg % '_a',)
def test_RestrictingNodeTransformer__visit_Import__3():
"""It denies importing a module starting with `_` as something."""
result = compile_restricted_exec('import _a as m')
assert result.errors == (import_errmsg % '_a',)
def test_RestrictingNodeTransformer__visit_Import__4():
"""It denies importing a module as something starting with `_`."""
result = compile_restricted_exec('import a as _m')
assert result.errors == (import_errmsg % '_m',)
def test_RestrictingNodeTransformer__visit_Import__5():
"""It allows importing from a module."""
result = compile_restricted_exec('from a import m')
assert result.errors == ()
assert result.code is not None
def test_RestrictingNodeTransformer__visit_Import_6():
"""It allows importing from a module starting with `_`."""
result = compile_restricted_exec('from _a import m')
assert result.errors == ()
assert result.code is not None
def test_RestrictingNodeTransformer__visit_Import__7():
"""It denies importing from a module as something starting with `_`."""
result = compile_restricted_exec('from a import m as _n')
assert result.errors == (import_errmsg % '_n',)
def test_RestrictingNodeTransformer__visit_Import__8():
"""It denies as-importing something starting with `_` from a module."""
result = compile_restricted_exec('from a import _m as n')
assert result.errors == (import_errmsg % '_m',)
def test_RestrictingNodeTransformer__visit_Import__9():
"""It denies relative from importing as something starting with `_`."""
result = compile_restricted_exec('from .x import y as _leading_underscore')
assert result.errors == (import_errmsg % '_leading_underscore',)
def test_RestrictingNodeTransformer__visit_Import_star__1():
"""Importing `*` is a SyntaxError in Python itself."""
result = compile_restricted_exec('import *')
assert result.errors == (
"Line 1: SyntaxError: invalid syntax at statement: 'import *'",)
assert result.code is None
def test_RestrictingNodeTransformer__visit_Import_star__2():
"""It denies importing `*` from a module."""
result = compile_restricted_exec('from a import *')
assert result.errors == ('Line 1: "*" imports are not allowed.',)
assert result.code is None
|
autobahn/wamp/gen/wamp/proto/PublisherFeatures.py
|
rapyuta-robotics/autobahn-python
| 1,670 |
94563
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
class PublisherFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsPublisherFeatures(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PublisherFeatures()
x.Init(buf, n + offset)
return x
# PublisherFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PublisherFeatures
def PublisherIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def PublisherExclusion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def SubscriberBlackwhiteListing(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def AcknowledgeEventReceived(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def PublisherFeaturesStart(builder): builder.StartObject(6)
def PublisherFeaturesAddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def PublisherFeaturesAddPublisherExclusion(builder, publisherExclusion): builder.PrependBoolSlot(1, publisherExclusion, 0)
def PublisherFeaturesAddSubscriberBlackwhiteListing(builder, subscriberBlackwhiteListing): builder.PrependBoolSlot(2, subscriberBlackwhiteListing, 0)
def PublisherFeaturesAddAcknowledgeEventReceived(builder, acknowledgeEventReceived): builder.PrependBoolSlot(3, acknowledgeEventReceived, 0)
def PublisherFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(4, payloadTransparency, 0)
def PublisherFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(5, payloadEncryptionCryptobox, 0)
def PublisherFeaturesEnd(builder): return builder.EndObject()
|
samples/nta_enable_disable_cbqos_sources.py
|
john-westcott-iv/orionsdk-python
| 177 |
94564
|
<reponame>john-westcott-iv/orionsdk-python
from __future__ import print_function
import re
import requests
import pprint
from orionsdk import SwisClient
def main():
# Connect to SWIS
server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(server, username, password)
# Disable/Enable CBQoS Sources
node_caption = 'My testing router'
query_results = swis.query('SELECT NodeID FROM Orion.Nodes WHERE Caption = @nodecaption_par', nodecaption_par=node_caption)
node_id = query_results['results'][0]['NodeID']
query_results = swis.query('SELECT Uri FROM Orion.Netflow.CBQoSSource WHERE NodeID = @nodeid_par', nodeid_par = node_id)
enabled_flag = False # Change this value to True if you want to enable sources
props = {
'Enabled': enabled_flag
}
for row in query_results['results']:
swis.update(row['Uri'], **props)
# Print results
query_results = swis.query('SELECT CBQoSSourceID FROM Orion.Netflow.CBQoSSource WHERE NodeID = @nodeid_par and Enabled = @enabled_par',
nodeid_par=node_id, enabled_par=enabled_flag)
print('Changed enabled status to {0} for {1} CBQoS sources for node with ID {2}'
.format(enabled_flag, len(query_results['results']), node_id))
if __name__ == '__main__':
main()
|
Examples/AppKit/CocoaBindings/ControlledPreferences/FontNameToDisplayNameTransformer.py
|
Khan/pyobjc-framework-Cocoa
| 132 |
94578
|
<gh_stars>100-1000
#
# FontNameToDisplayNameTransformer.py
# ControlledPreferences
#
# Converted by u.fiedler on 04.02.05.
# with great help from <NAME> - Thank you Bob!
#
# The original version was written in Objective-C by <NAME>
# at http://homepage.mac.com/mmalc/CocoaExamples/controllers.html
from Foundation import *
from AppKit import *
class FontNameToDisplayNameTransformer(NSValueTransformer):
"""
Takes as input the fontName of a font as stored in user defaults,
returns the displayed font name of the font to show to the user.
"""
def transformedValueClass(cls):
return NSString
transformedValueClass = classmethod(transformedValueClass)
def allowsReverseTransformation(cls):
return False
allowsReverseTransformation = classmethod(allowsReverseTransformation)
def transformedValue_(self, aValue):
font = NSFont.fontWithName_size_(aValue, 12)
return font.displayName()
|
analysis/tensorflow/fast_em.py
|
LinziJay/rappor
| 797 |
94629
|
#!/usr/bin/python
"""
fast_em.py: Tensorflow implementation of expectation maximization for RAPPOR
association analysis.
TODO:
- Use TensorFlow ops for reading input (so that reading input can be
distributed)
- Reduce the number of ops (currently proportional to the number of reports).
May require new TensorFlow ops.
- Fix performance bug (v_split is probably being recomputed on every
iteration):
bin$ ./test.sh decode-assoc-cpp - 1.1 seconds (single-threaded C++)
bin$ ./test.sh decode-assoc-tensorflow - 226 seconds on GPU
"""
import sys
import numpy as np
import tensorflow as tf
def log(msg, *args):
if args:
msg = msg % args
print >>sys.stderr, msg
def ExpectTag(f, expected):
"""Read and consume a 4 byte tag from the given file."""
b = f.read(4)
if b != expected:
raise RuntimeError('Expected %r, got %r' % (expected, b))
def ReadListOfMatrices(f):
"""
Read a big list of conditional probability matrices from a binary file.
"""
ExpectTag(f, 'ne \0')
num_entries = np.fromfile(f, np.uint32, count=1)[0]
log('Number of entries: %d', num_entries)
ExpectTag(f, 'es \0')
entry_size = np.fromfile(f, np.uint32, count=1)[0]
log('Entry size: %d', entry_size)
ExpectTag(f, 'dat\0')
vec_length = num_entries * entry_size
v = np.fromfile(f, np.float64, count=vec_length)
log('Values read: %d', len(v))
log('v: %s', v[:10])
#print 'SUM', sum(v)
# NOTE: We're not reshaping because we're using one TensorFlow tensor object
# per matrix, since it makes the algorithm expressible with current
# TensorFlow ops.
#v = v.reshape((num_entries, entry_size))
return num_entries, entry_size, v
def WriteTag(f, tag):
if len(tag) != 3:
raise AssertionError("Tags should be 3 bytes. Got %r" % tag)
f.write(tag + '\0') # NUL terminated
def WriteResult(f, num_em_iters, pij):
WriteTag(f, 'emi')
emi = np.array([num_em_iters], np.uint32)
emi.tofile(f)
WriteTag(f, 'pij')
pij.tofile(f)
def DebugSum(num_entries, entry_size, v):
"""Sum the entries as a sanity check."""
cond_prob = tf.placeholder(tf.float64, shape=(num_entries * entry_size,))
debug_sum = tf.reduce_sum(cond_prob)
with tf.Session() as sess:
s = sess.run(debug_sum, feed_dict={cond_prob: v})
log('Debug sum: %f', s)
def BuildEmIter(num_entries, entry_size, v):
# Placeholder for the value from the previous iteration.
pij_in = tf.placeholder(tf.float64, shape=(entry_size,))
# split along dimension 0
# TODO:
# - make sure this doesn't get run for every EM iteration
# - investigate using tf.tile() instead? (this may cost more memory)
v_split = tf.split(0, num_entries, v)
z_numerator = [report * pij_in for report in v_split]
sum_z = [tf.reduce_sum(report) for report in z_numerator]
z = [z_numerator[i] / sum_z[i] for i in xrange(num_entries)]
# Concat per-report tensors and reshape. This is probably inefficient?
z_concat = tf.concat(0, z)
z_concat = tf.reshape(z_concat, [num_entries, entry_size])
# This whole expression represents an EM iteration. Bind the pij_in
# placeholder, and get a new estimation of Pij.
em_iter_expr = tf.reduce_sum(z_concat, 0) / num_entries
return pij_in, em_iter_expr
def RunEm(pij_in, entry_size, em_iter_expr, max_em_iters, epsilon=1e-6):
"""Run the iterative EM algorithm (using the TensorFlow API).
Args:
num_entries: number of matrices (one per report)
entry_size: total number of cells in each matrix
v: numpy.ndarray (e.g. 7000 x 8 matrix)
max_em_iters: maximum number of EM iterations
Returns:
pij: numpy.ndarray (e.g. vector of length 8)
"""
# Initial value is the uniform distribution
pij = np.ones(entry_size) / entry_size
i = 0 # visible outside loop
# Do EM iterations.
with tf.Session() as sess:
for i in xrange(max_em_iters):
print 'PIJ', pij
new_pij = sess.run(em_iter_expr, feed_dict={pij_in: pij})
dif = max(abs(new_pij - pij))
log('EM iteration %d, dif = %e', i, dif)
pij = new_pij
if dif < epsilon:
log('Early EM termination: %e < %e', max_dif, epsilon)
break
# If i = 9, then we did 10 iteratinos.
return i + 1, pij
def sep():
print '-' * 80
def main(argv):
input_path = argv[1]
output_path = argv[2]
max_em_iters = int(argv[3])
sep()
with open(input_path) as f:
num_entries, entry_size, cond_prob = ReadListOfMatrices(f)
sep()
DebugSum(num_entries, entry_size, cond_prob)
sep()
pij_in, em_iter_expr = BuildEmIter(num_entries, entry_size, cond_prob)
num_em_iters, pij = RunEm(pij_in, entry_size, em_iter_expr, max_em_iters)
sep()
log('Final Pij: %s', pij)
with open(output_path, 'wb') as f:
WriteResult(f, num_em_iters, pij)
log('Wrote %s', output_path)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError, e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
|
tests/generic_relations/migrations/0001_initial.py
|
daleione/django-rest-framework
| 17,395 |
94631
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.SlugField()),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=models.CASCADE, to='contenttypes.ContentType')),
],
),
]
|
docs/examples/viz_fiber_odf.py
|
iamansoni/fury
| 149 |
94653
|
<gh_stars>100-1000
"""
====================================
Brain Fiber ODF Visualisation
====================================
This example demonstrate how to create a simple viewer for fiber
orientation distribution functions (ODF) using fury's odf_slicer.
"""
# First, we import some useful modules and methods.
import numpy as np
import nibabel as nib
from fury import actor, window, ui
from fury.data import read_viz_dmri, fetch_viz_dmri, fetch_viz_icons
from fury.utils import fix_winding_order
from dipy.reconst.shm import sh_to_sf_matrix
from dipy.data import get_sphere
###############################################################################
# Here, we fetch and load the fiber ODF volume to display. The ODF are
# expressed as spherical harmonics (SH) coefficients in a 3D grid.
fetch_viz_dmri()
fetch_viz_icons()
fodf_img = nib.load(read_viz_dmri('fodf.nii.gz'))
sh = fodf_img.get_fdata()
affine = fodf_img.affine
grid_shape = sh.shape[:-1]
###############################################################################
# We then define a low resolution sphere used to visualize SH coefficients
# as spherical functions (SF) as well as a matrix `B_low` to project SH
# onto the sphere.
sphere_low = get_sphere('repulsion100')
B_low = sh_to_sf_matrix(sphere_low, 8, return_inv=False)
###############################################################################
# Now, we create a slicer for each orientation to display a slice in
# the middle of the volume and we add them to a `scene`.
# Change these values to test various parameters combinations.
scale = 0.5
norm = False
colormap = None
radial_scale = True
opacity = 1.0
global_cm = False
# ODF slicer for axial slice
odf_actor_z = actor.odf_slicer(sh, affine=affine, sphere=sphere_low,
scale=scale, norm=norm,
radial_scale=radial_scale, opacity=opacity,
colormap=colormap, global_cm=global_cm,
B_matrix=B_low)
# ODF slicer for coronal slice
odf_actor_y = actor.odf_slicer(sh, affine=affine, sphere=sphere_low,
scale=scale, norm=norm,
radial_scale=radial_scale, opacity=opacity,
colormap=colormap, global_cm=global_cm,
B_matrix=B_low)
odf_actor_y.display_extent(0, grid_shape[0] - 1, grid_shape[1]//2,
grid_shape[1]//2, 0, grid_shape[2] - 1)
# ODF slicer for sagittal slice
odf_actor_x = actor.odf_slicer(sh, affine=affine, sphere=sphere_low,
scale=scale, norm=norm,
radial_scale=radial_scale, opacity=opacity,
colormap=colormap, global_cm=global_cm,
B_matrix=B_low)
odf_actor_x.display_extent(grid_shape[0]//2, grid_shape[0]//2, 0,
grid_shape[1] - 1, 0, grid_shape[2] - 1)
scene = window.Scene()
scene.add(odf_actor_z)
scene.add(odf_actor_y)
scene.add(odf_actor_x)
show_m = window.ShowManager(scene, reset_camera=True, size=(1200, 900))
show_m.initialize()
###############################################################################
# Now that we have a `ShowManager` containing our slicer, we can go on and
# configure our UI for changing the slices to visualize.
line_slider_z = ui.LineSlider2D(min_value=0,
max_value=grid_shape[2] - 1,
initial_value=grid_shape[2] / 2,
text_template="{value:.0f}",
length=140)
line_slider_y = ui.LineSlider2D(min_value=0,
max_value=grid_shape[1] - 1,
initial_value=grid_shape[1] / 2,
text_template="{value:.0f}",
length=140)
line_slider_x = ui.LineSlider2D(min_value=0,
max_value=grid_shape[0] - 1,
initial_value=grid_shape[0] / 2,
text_template="{value:.0f}",
length=140)
###############################################################################
# We also define a high resolution sphere to demonstrate the capability to
# dynamically change the sphere used for SH to SF projection.
sphere_high = get_sphere('symmetric362')
# We fix the order of the faces' three vertices to a clockwise winding. This
# ensures all faces have a normal going away from the center of the sphere.
sphere_high.faces = fix_winding_order(sphere_high.vertices,
sphere_high.faces, True)
B_high = sh_to_sf_matrix(sphere_high, 8, return_inv=False)
###############################################################################
# We add a combobox for choosing the sphere resolution during execution.
sphere_dict = {'Low resolution': (sphere_low, B_low),
'High resolution': (sphere_high, B_high)}
combobox = ui.ComboBox2D(items=list(sphere_dict))
scene.add(combobox)
###############################################################################
# Here we will write callbacks for the sliders and combo box and register them.
def change_slice_z(slider):
i = int(np.round(slider.value))
odf_actor_z.slice_along_axis(i)
def change_slice_y(slider):
i = int(np.round(slider.value))
odf_actor_y.slice_along_axis(i, 'yaxis')
def change_slice_x(slider):
i = int(np.round(slider.value))
odf_actor_x.slice_along_axis(i, 'xaxis')
def change_sphere(combobox):
sphere, B = sphere_dict[combobox.selected_text]
odf_actor_x.update_sphere(sphere.vertices, sphere.faces, B)
odf_actor_y.update_sphere(sphere.vertices, sphere.faces, B)
odf_actor_z.update_sphere(sphere.vertices, sphere.faces, B)
line_slider_z.on_change = change_slice_z
line_slider_y.on_change = change_slice_y
line_slider_x.on_change = change_slice_x
combobox.on_change = change_sphere
###############################################################################
# We then add labels for the sliders and position them inside a panel.
def build_label(text):
label = ui.TextBlock2D()
label.message = text
label.font_size = 18
label.font_family = 'Arial'
label.justification = 'left'
label.bold = False
label.italic = False
label.shadow = False
label.background_color = (0, 0, 0)
label.color = (1, 1, 1)
return label
line_slider_label_z = build_label(text="Z Slice")
line_slider_label_y = build_label(text="Y Slice")
line_slider_label_x = build_label(text="X Slice")
panel = ui.Panel2D(size=(300, 200),
color=(1, 1, 1),
opacity=0.1,
align="right")
panel.center = (1030, 120)
panel.add_element(line_slider_label_x, (0.1, 0.75))
panel.add_element(line_slider_x, (0.38, 0.75))
panel.add_element(line_slider_label_y, (0.1, 0.55))
panel.add_element(line_slider_y, (0.38, 0.55))
panel.add_element(line_slider_label_z, (0.1, 0.35))
panel.add_element(line_slider_z, (0.38, 0.35))
show_m.scene.add(panel)
###############################################################################
# Then, we can render all the widgets and everything else in the screen and
# start the interaction using ``show_m.start()``.
#
# However, if you change the window size, the panel will not update its
# position properly. The solution to this issue is to update the position of
# the panel using its ``re_align`` method every time the window size changes.
size = scene.GetSize()
def win_callback(obj, _event):
global size
if size != obj.GetSize():
size_old = size
size = obj.GetSize()
size_change = [size[0] - size_old[0], 0]
panel.re_align(size_change)
show_m.initialize()
###############################################################################
# Finally, please set the following variable to ``True`` to interact with the
# datasets in 3D.
interactive = False
if interactive:
show_m.add_window_callback(win_callback)
show_m.render()
show_m.start()
else:
window.record(scene, out_path='odf_slicer_3D.png', size=(1200, 900),
reset_camera=False)
del show_m
|
doc/samples/uptodate_callable.py
|
m4ta1l/doit
| 1,390 |
94721
|
def fake_get_value_from_db():
return 5
def check_outdated():
total = fake_get_value_from_db()
return total > 10
def task_put_more_stuff_in_db():
def put_stuff(): pass
return {'actions': [put_stuff],
'uptodate': [check_outdated],
}
|
stochastic/processes/__init__.py
|
zaczw/stochastic
| 268 |
94736
|
from stochastic.processes.continuous import *
from stochastic.processes.diffusion import *
from stochastic.processes.discrete import *
from stochastic.processes.noise import *
|
samcli/commands/delete/__init__.py
|
torresxb1/aws-sam-cli
| 2,959 |
94739
|
"""
`sam delete` command
"""
# Expose the cli object here
from .command import cli # noqa
|
scripts/compute_rmse.py
|
yarny/gbdt
| 335 |
94750
|
<filename>scripts/compute_rmse.py
#!/usr/bin/python
import math
import sys
def ComputeMse(scores, responses):
return math.sqrt(sum([(s-r)*(s-r) for s, r in zip(scores, responses)]) / len(scores))
def main():
scores = [float(line) for line in open(sys.argv[1]).readlines() if not line.startswith('#')]
responses = [float(line) for line in open(sys.argv[2]).readlines() if not line.startswith('#')]
print ComputeMse(scores, responses)
assert(len(scores) == len(responses))
if __name__ == '__main__':
main()
|
tools/calculate_weights.py
|
swafe/DeepSegmentor
| 150 |
94802
|
<gh_stars>100-1000
# -*- using: utf-8 -*-
# Author: <NAME> <<EMAIL>>
import os
import glob
import cv2
import numpy as np
import statistics
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='',
help='/path/to/segmentation')
args = parser.parse_args()
def get_weights(labels_dict):
total_pixels = 0
for lab in labels_dict:
total_pixels += labels_dict[lab]
for lab in labels_dict:
labels_dict[lab] /= float(total_pixels)
return labels_dict
def calculate_weights(im_path):
assert os.path.isdir(im_path)
img_list = glob.glob(os.path.join(im_path, '*.png'))
labels_dict = {}
for im_path in img_list:
im = cv2.imread(im_path, cv2.IMREAD_UNCHANGED)
labels, counts = np.unique(im, return_counts=True)
for lab, cnt in zip(labels, counts):
if lab not in labels_dict:
labels_dict[lab] = 0
labels_dict[lab] += cnt
return get_weights(labels_dict)
def reverse_weight(w):
"""
Median Frequency Balancing: alpha_c = median_freq/freq(c).
median_freq is the median of these frequencies
freq(c) is the number of pixles of class c divided by the total number of pixels in images where c is present
"""
assert len(w) > 0, "Expected a non-empty weight dict."
values = [w[k] for k in w]
if len(w) == 1:
value = 1.0
elif len(w) == 2:
value = min(values)
else:
# Median Frequency Balancing
value = statistics.median(values)
for k in w:
w[k] = value/(w[k]+1e-10)
return w
if __name__ == '__main__':
weights = calculate_weights(args.data_path)
print(weights)
# {0: 0.9708725873161764, 255: 0.02912741268382353}
print(reverse_weight(weights))
# {0: 0.030001272114749396, 255: 0.9999999965668079}
|
mainapp/migrations/0033_auto_20180817_1413.py
|
sndp487/rescuekerala
| 657 |
94808
|
<gh_stars>100-1000
# Generated by Django 2.1 on 2018-08-17 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0032_auto_20180817_0444"),
]
operations = [
migrations.CreateModel(
name="Announcements",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID",),),
("dateadded", models.DateTimeField(auto_now_add=True)),
("name", models.CharField(max_length=50)),
("link", models.CharField(max_length=100)),
(
"district",
models.CharField(
choices=[
("alp", "Alappuzha - ആലപ്പുഴ"),
("ekm", "Ernakulam - എറണാകുളം"),
("idk", "Idukki - ഇടുക്കി"),
("knr", "Kannur - കണ്ണൂർ"),
("ksr", "Kasaragod - കാസർഗോഡ്"),
("kol", "Kollam - കൊല്ലം"),
("ktm", "Kottayam - കോട്ടയം"),
("koz", "Kozhikode - കോഴിക്കോട്"),
("mpm", "Malappuram - മലപ്പുറം"),
("pkd", "Palakkad - പാലക്കാട്"),
("ptm", "Pathanamthitta - പത്തനംതിട്ട"),
("tvm", "Thiruvananthapuram - തിരുവനന്തപുരം"),
("tcr", "Thrissur - തൃശ്ശൂർ"),
("wnd", "Wayanad - വയനാട്"),
],
max_length=15,
verbose_name="Districts - ജില്ല",
),
),
(
"category",
models.IntegerField(
choices=[(0, "General"), (1, "Food"), (2, "Camps"), (3, "Weather"),], verbose_name="Type",
),
),
],
),
migrations.AlterModelOptions(name="rescuecamp", options={"verbose_name": "Relief Camp"},),
]
|
cloudconvert/environment_vars.py
|
claudep/cloudconvert-python
| 153 |
94814
|
"""Environment Variables to be used inside the CloudConvert-Python-REST-SDK"""
CLOUDCONVERT_API_KEY = "API_KEY"
"""Environment variable defining the Cloud Convert REST API default
credentials as Access Token."""
CLOUDCONVERT_SANDBOX = "true"
"""Environment variable defining if the sandbox API is used instead of the live API"""
|
leetcode/10.regular-expression-matching.py
|
geemaple/algorithm
| 177 |
94816
|
# f[i][j] = f[i - 1][j - 1] where s[i - 1] == p[j - 1] || p[j - 1] == '.' case p[j - 1] != '*'
# f[i][j] = f[i][j - 2] or f[i - 1][j] where s[i - 1] == p[j - 2] || p[j - 2] == '.' case p[j - 1] == '*'
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
m = len(s)
n = len(p)
table = [[False for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 and j == 0:
table[i][j] = True
continue
if j == 0:
table[i][j] = False
continue
if p[j - 1] != '*':
if i - 1 >= 0 and (p[j - 1] == '.' or p[j - 1] == s[i- 1]):
table[i][j] = table[i - 1][j - 1]
else:
if j - 2 >= 0:
table[i][j] = table[i][j - 2]
if i - 1 >= 0 and (p[j - 2] == '.' or p[j - 2] == s[i - 1]):
table[i][j] = table[i][j] or table[i - 1][j]
return table[m][n]
|
server/bsdfs/models.py
|
paulu/opensurfaces
| 137 |
94823
|
import math
import json
from colormath.color_objects import RGBColor
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from common.models import UserBase, ResultBase
from common.utils import save_obj_attr_base64_image, get_content_tuple, \
get_opensurfaces_storage
from shapes.models import Shape, MaterialShape, MaterialShapeLabelBase
BSDF_VERSIONS = ("wd", ) # just Ward for now
STORAGE = get_opensurfaces_storage()
class EnvironmentMap(UserBase):
""" Environment map used with a BRDF """
name = models.CharField(max_length=128, unique=True)
# Tonemapping parameters for [Reinhard 2002, Equation 4]
# The log_average luminance is baked into the scale as a
# precomputation.
# scale: key / log_average luminance
# white: values higher than this will be set to pure white
tonemap_scale = models.FloatField()
tonemap_white = models.FloatField()
class ShapeBsdfLabelBase(MaterialShapeLabelBase):
""" Base class of BSDF labels"""
# json-encoded dictionary of counts (where each count is the number of
# times a UI element was adjusted)
edit_dict = models.TextField(blank=True)
# sum of the values in edit_dict
edit_sum = models.IntegerField(default=0)
# number of nonzero values in edit_dict
edit_nnz = models.IntegerField(default=0)
# environment map used to light the blob
envmap = models.ForeignKey(EnvironmentMap, null=True, blank=True)
# screenshot
image_blob = models.ImageField(
upload_to='blobs', null=True, blank=True, max_length=255,
storage=STORAGE)
# option to give up
give_up = models.BooleanField(default=False)
give_up_msg = models.TextField(blank=True)
# reverse generic relationship for quality votes
qualities = generic.GenericRelation(
'ShapeBsdfQuality', content_type_field='content_type',
object_id_field='object_id')
# first voting stage: color matches?
color_correct = models.NullBooleanField()
# further from 0: more confident in assignment of color_correct
color_correct_score = models.FloatField(null=True, blank=True)
# second voting stage: gloss matches?
gloss_correct = models.NullBooleanField()
# further from 0: more confident in assignment of gloss_correct
gloss_correct_score = models.FloatField(null=True, blank=True)
# The method by which the reflectance widget was initialized
INIT_METHODS = (
('KM', 'k-means color, middle value gloss'),
('KR', 'k-means color, random gloss')
)
init_method_to_str = dict((k, v) for (k, v) in INIT_METHODS)
init_method = models.CharField(max_length=2, choices=INIT_METHODS)
# L*a*b* colorspace for matching blobs
color_L = models.FloatField(blank=True, null=True)
color_a = models.FloatField(blank=True, null=True)
color_b = models.FloatField(blank=True, null=True)
def better_than(self, other):
if self is other:
return False
elif not other:
return True
elif self.invalid != other.invalid:
return not self.invalid
elif bool(self.color_correct) != bool(other.color_correct):
return bool(self.color_correct)
elif bool(self.gloss_correct) != bool(other.gloss_correct):
return bool(self.gloss_correct)
else:
try:
return (self.color_correct_score + self.gloss_correct_score >
other.color_correct_score + other.gloss_correct_score)
except TypeError:
return True
def get_entry_dict(self):
return {'id': self.id, 'shape': self.shape.get_entry_dict()}
def mark_invalid(self, *args, **kwargs):
self.color_correct = False
self.gloss_correct = False
super(Shape, self).mark_invalid(*args, **kwargs)
class Meta:
abstract = True
ordering = ['-edit_nnz', '-time_ms']
#@classmethod
#def mturk_needs_more(cls, instance):
#""" Return True if more of this object should be scheduled """
#correct_list = cls.objects \
#.filter(shape=instance.shape) \
#.values_list('color_correct', 'gloss_correct')
## only schedule more if all were graded and all were rejected
#return ((not correct_list) or
#all((c[0] is False or c[1] is False) for c in correct_list))
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if (not labels or any((l.color_correct_score is None and
#l.gloss_correct_score is None) for l in labels)):
#return None
#bad = sum(1 for l in labels if
#l.admin_score <= -2 or
#l.time_ms is None or
#l.edit_nnz <= 1 or
#(l.color_correct_score is not None and
#l.color_correct_score < -0.5 and l.time_ms < 60000) or
#(l.gloss_correct_score is not None and
#l.gloss_correct_score < -0.5 and l.time_ms < 60000))
#if bad > 0 or any(l.color_correct_score < 0 or l.gloss_correct_score < 0 for l in labels):
#return float(bad) / float(len(labels))
## reward good rectifications
#return sum(-1.0 for l in labels if
#l.color_correct_score > 0.5 and
#l.gloss_correct_score > 0.5 and
#l.time_ms > 10000)
class ShapeBsdfLabel_mf(ShapeBsdfLabelBase):
"""
Microfacet BSDF model
** CURRENTLY UNUSED **
"""
shape = models.ForeignKey(MaterialShape, related_name='bsdfs_mf')
BSDF_TYPES = (('P', 'plastic'), ('C', 'conductor'))
bsdf_type_to_str = {k: v for k, v in BSDF_TYPES}
str_to_bsdf_type = {v: k for k, v in BSDF_TYPES}
# plastic or conductor
bsdf_type = models.CharField(max_length=1, choices=BSDF_TYPES)
alpha_index = models.IntegerField() # integer index into roughness table
specular = models.FloatField() # specular weight
color_sRGB = models.CharField(max_length=6) # "RRGGBB" hex
def type_name(self):
return ShapeBsdfLabel_mf.bsdf_type_to_str[self.bsdf_type]
@staticmethod
def version():
return 'mf'
def __unicode__(self):
return '%s alpha_index=%s color=%s' % (
self.bsdf_type, self.alpha, self.color)
def get_thumb_template(self):
return 'bsdf_mf_shape_thumb.html'
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
raise NotImplementedError("TODO")
class ShapeBsdfLabel_wd(ShapeBsdfLabelBase):
"""
Ward BSDF model.
Note: This is the "balanced" ward-duel model with energy balance at all angles
from [<NAME>., and <NAME>. A new ward brdf model with bounded
albedo. In Computer Graphics Forum (2010), vol. 29, Wiley Online Library,
pp. 1391-1398.]. We use the implementation from Mitsuba available at
http://www.mitsuba-renderer.org.
"""
shape = models.ForeignKey(MaterialShape, related_name='bsdfs_wd')
# c in [0, 1]
contrast = models.FloatField()
# d in [0, 15] discretized alpha
doi = models.IntegerField()
# true if the 'rho_s only' was selected, false if traditional ward
metallic = models.BooleanField(default=False)
# color in "#RRGGBB" sRGB hex format
color = models.CharField(max_length=7)
@staticmethod
def version():
return 'wd'
def __unicode__(self):
return 'ward sRGB=%s' % (self.color)
def get_thumb_template(self):
return 'bsdf_wd_shape_thumb.html'
def c(self):
return self.contrast
def d(self):
return 1 - (0.001 + (15 - self.doi) * 0.2 / 15)
def d_edits(self):
return json.loads(self.edit_dict)['doi']
def c_edits(self):
return json.loads(self.edit_dict)['contrast']
def alpha(self):
return 1 - self.d()
def rho_s(self):
rho_s = self.rho()[1]
return '%0.3f, %0.3f, %0.3f' % rho_s
def rho_d(self):
rho_d = self.rho()[0]
return '%0.3f, %0.3f, %0.3f' % rho_d
def rho(self):
if not hasattr(self, '_rho'):
rgb = self.colormath_rgb()
v = self.v()
# approximate cielab_inverse_f.
# we have V instead of L, so the same inverse formula doesn't
# apply anyway.
finv = v ** 3
if self.metallic:
rho_s = finv
s = rho_s / (v * 255.0) if v > 0 else 0
self._rho = (
(0, 0, 0),
(s * rgb.rgb_r, s * rgb.rgb_g, s * rgb.rgb_b),
)
else:
rho_d = finv
t = self.contrast + (rho_d * 0.5) ** (1.0 / 3.0)
rho_s = t ** 3 - rho_d * 0.5
rho_t = rho_s + rho_d
if rho_t > 1:
rho_s /= rho_t
rho_d /= rho_t
s = rho_d / (v * 255.0) if v > 0 else 0
self._rho = (
(s * rgb.rgb_r, s * rgb.rgb_g, s * rgb.rgb_b),
(rho_s, rho_s, rho_s)
)
return self._rho
def v(self):
""" Return the V component of HSV, in the range [0, 1] """
rgb = self.colormath_rgb()
return max(rgb.rgb_r, rgb.rgb_b, rgb.rgb_g) / 255.0
def colormath_rgb(self):
if not hasattr(self, '_colormath_rgb'):
self._colormath_rgb = RGBColor()
self._colormath_rgb.set_from_rgb_hex(self.color)
return self._colormath_rgb
def colormath_lab(self):
if not hasattr(self, '_colormath_lab'):
self._colormath_lab = self.colormath_rgb().convert_to('lab')
return self._colormath_lab
def color_distance(self, bsdf):
return math.sqrt((self.color_L - bsdf.color_L) ** 2 +
(self.color_a - bsdf.color_a) ** 2 +
(self.color_b - bsdf.color_b) ** 2)
def gloss_distance(self, bsdf):
return math.sqrt((self.c() - bsdf.c()) ** 2 +
(1.78 * (self.d() - bsdf.d())) ** 2)
def save(self, *args, **kwargs):
if (self.color_L is None) or (self.color_a is None) or (self.color_b is None):
c = RGBColor()
c.set_from_rgb_hex(self.color)
c = c.convert_to('lab')
self.color_L = c.lab_l
self.color_a = c.lab_a
self.color_b = c.lab_b
super(ShapeBsdfLabel_wd, self).save(*args, **kwargs)
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
experiment, mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
new_objects = {}
for shape in hit_contents:
d = results[unicode(shape.id)]
shape_time_ms = time_ms[unicode(shape.id)]
shape_time_active_ms = time_active_ms[unicode(shape.id)]
edit_dict = d[u'edit']
edit_sum = sum(int(edit_dict[k]) for k in edit_dict)
edit_nnz = sum(int(int(edit_dict[k]) > 0) for k in edit_dict)
init_method = 'KR'
envmap = EnvironmentMap.objects.get(
id=json.loads(experiment.variant)['envmap_id'])
doi = int(d[u'doi'])
contrast = float(d[u'contrast'])
metallic = (int(d[u'type']) == 1)
color = d['color']
give_up = d[u'give_up']
give_up_msg = d[u'give_up_msg']
bsdf, bsdf_created = shape.bsdfs_wd.get_or_create(
user=user,
mturk_assignment=mturk_assignment,
time_ms=shape_time_ms,
time_active_ms=shape_time_active_ms,
doi=doi,
contrast=contrast,
metallic=metallic,
color=color,
give_up=give_up,
give_up_msg=give_up_msg,
edit_dict=json.dumps(edit_dict),
edit_sum=edit_sum,
edit_nnz=edit_nnz,
envmap=envmap,
init_method=init_method,
)
if bsdf_created:
new_objects[get_content_tuple(shape)] = [bsdf]
if ((not bsdf.image_blob) and 'screenshot' in d and d['screenshot'].startswith('data:image/')):
save_obj_attr_base64_image(bsdf, 'image_blob', d['screenshot'])
return new_objects
class ShapeBsdfQuality(ResultBase):
""" Vote on whether or not a BSDF matches its shape. The foreign key to
the BSDF is generic since there are multiple BSDF models. """
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField(db_index=True)
bsdf = generic.GenericForeignKey('content_type', 'object_id')
color_correct = models.NullBooleanField()
gloss_correct = models.NullBooleanField()
canttell = models.NullBooleanField()
def __unicode__(self):
if self.canttell:
return "can't tell"
else:
if self.has_color():
if self.has_gloss():
return 'gloss: %s, color: %s' % (
self.gloss_correct, self.color_correct,
)
return 'color: %s' % self.color_correct
elif self.has_gloss:
return 'gloss: %s' % self.gloss_correct
else:
return 'INVALID LABEL'
def get_thumb_template(self):
return 'bsdf_%s_shape_label_thumb.html' % (
self.content_type.model_class().version())
def has_color(self):
return self.color_correct is not None
def has_gloss(self):
return self.gloss_correct is not None
class Meta:
verbose_name = "BSDF quality vote"
verbose_name_plural = "BSDF quality votes"
#@classmethod
#def mturk_badness(cls, mturk_assignment):
#""" Return fraction of bad responses for this assignment """
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#if not labels:
#return None
#if any((l.color_correct is not None and
#l.bsdf.color_correct_score is None) or
#(l.gloss_correct is not None and
#l.bsdf.gloss_correct_score is None)
#for l in labels):
#return None
#bad = sum(1 for l in labels if
#(l.color_correct is not None and
#l.color_correct != l.bsdf.color_correct and
#abs(l.bsdf.color_correct_score) > 0.5) or
#(l.gloss_correct is not None and
#l.gloss_correct != l.bsdf.gloss_correct and
#abs(l.bsdf.color_correct_score) > 0.5))
#return float(bad) / float(len(labels))
#@classmethod
#def mturk_badness_reason(cls, mturk_assignment):
#labels = cls.objects.filter(mturk_assignment=mturk_assignment)
#T = sum(1 for l in labels if
#(l.color_correct is True and l.bsdf.color_correct is False) or
#(l.gloss_correct is True and l.bsdf.gloss_correct is False))
#F = sum(1 for l in labels if
#(l.color_correct is False and l.bsdf.color_correct is True) or
#(l.gloss_correct is False and l.bsdf.gloss_correct is True))
#if T > F * 1.5:
#return 'T'
#elif F > T * 1.5:
#return 'F'
#return None
@staticmethod
def mturk_submit(user, hit_contents, results, time_ms, time_active_ms, version,
experiment, mturk_assignment=None, **kwargs):
""" Add new instances from a mturk HIT after the user clicks [submit] """
if unicode(version) != u'1.0':
raise ValueError("Unknown version: '%s'" % version)
if not hit_contents:
return {}
# best we can do is average
avg_time_ms = time_ms / len(hit_contents)
avg_time_active_ms = time_active_ms / len(hit_contents)
new_objects = {}
for bsdf in hit_contents:
selected = (str(results[unicode(bsdf.id)]['selected']).lower()
== 'true')
canttell = (str(results[unicode(bsdf.id)]['canttell']).lower()
== 'true')
color_correct = None
gloss_correct = None
if 'color' in experiment.slug:
color_correct = selected
elif 'gloss' in experiment.slug:
gloss_correct = selected
content_tuple = get_content_tuple(bsdf)
new_obj, created = ShapeBsdfQuality.objects.get_or_create(
content_type=ContentType.objects.get_for_id(content_tuple[0]),
object_id=content_tuple[1],
user=user,
mturk_assignment=mturk_assignment,
time_ms=avg_time_ms,
time_active_ms=avg_time_active_ms,
color_correct=color_correct,
gloss_correct=gloss_correct,
canttell=canttell
)
if created:
new_objects[content_tuple] = [new_obj]
return new_objects
|
example/paywall/apps.py
|
prog32/django-getpaid
| 220 |
94855
|
from django.apps import AppConfig
class Config(AppConfig):
name = "paywall"
verbose_name = "paywall simulator"
label = "paywall"
|
tests/modules/matrix_attention/cosine_matrix_attention_test.py
|
MSLars/allennlp
| 11,433 |
94860
|
import torch
from numpy.testing import assert_almost_equal
import numpy
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.matrix_attention import CosineMatrixAttention
from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention
class TestCosineMatrixAttention(AllenNlpTestCase):
def test_can_init_cosine(self):
legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
isinstance(legacy_attention, CosineMatrixAttention)
def test_cosine_similarity(self):
# example use case: a batch of size 2.
# With a time element component (e.g. sentences of length 2) each word is a vector of length 3.
# It is comparing this with another input of the same type
output = CosineMatrixAttention()(
torch.FloatTensor([[[0, 0, 0], [4, 5, 6]], [[-7, -8, -9], [10, 11, 12]]]),
torch.FloatTensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]),
)
# For the first batch there is
# no correlation between the first words of the input matrix
# but perfect correlation for the second word
# For the second batch there is
# negative correlation for the first words
# correlation for the second word
assert_almost_equal(
output.numpy(), numpy.array([[[0, 0], [0.97, 1]], [[-1, -0.99], [0.99, 1]]]), decimal=2
)
|
forge/blade/systems/recipe.py
|
jarbus/neural-mmo
| 1,450 |
94872
|
from forge.blade import lib
class Recipe:
def __init__(self, *args, amtMade=1):
self.amtMade = amtMade
self.blueprint = lib.MultiSet()
for i in range(0, len(args), 2):
inp = args[i]
amt = args[i+1]
self.blueprint.add(inp, amt)
|
server/www/packages/packages-windows/x86/ldap3/version.py
|
zhoulhb/teleport
| 640 |
94889
|
<filename>server/www/packages/packages-windows/x86/ldap3/version.py
# THIS FILE IS AUTO-GENERATED. PLEASE DO NOT MODIFY# version file for ldap3
# generated on 2018-08-01 17:55:24.174707
# on system uname_result(system='Windows', node='ELITE10GC', release='10', version='10.0.17134', machine='AMD64', processor='Intel64 Family 6 Model 58 Stepping 9, GenuineIntel')
# with Python 3.7.0 - ('v3.7.0:1bf9cc5093', 'Jun 27 2018 04:59:51') - MSC v.1914 64 bit (AMD64)
#
__version__ = '2.5.1'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__url__ = 'https://github.com/cannatag/ldap3'
__description__ = 'A strictly RFC 4510 conforming LDAP V3 pure Python client library'
__status__ = '5 - Production/Stable'
__license__ = 'LGPL v3'
|
Python/OOP/InitClass.py
|
piovezan/SOpt
| 148 |
94917
|
<filename>Python/OOP/InitClass.py
class A(object):
def __init__(self):
print("init")
def __call__(self):
print("call ")
a = A() #imprime init
A() #imprime call
#https://pt.stackoverflow.com/q/109813/101
|
transfer_model/losses/losses.py
|
ribeiro-hugo/smplx
| 776 |
94920
|
<filename>transfer_model/losses/losses.py
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2020 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <NAME>, <EMAIL>
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
import time
from typing import Callable, Iterator, Union, Optional, List
import os.path as osp
import yaml
from loguru import logger
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from .utils import get_reduction_method
__all__ = [
'VertexEdgeLoss',
'build_loss',
]
def build_loss(type='l2', reduction='mean', **kwargs) -> nn.Module:
logger.debug(f'Building loss: {type}')
if type == 'l2':
return WeightedMSELoss(reduction=reduction, **kwargs)
elif type == 'vertex-edge':
return VertexEdgeLoss(reduction=reduction, **kwargs)
elif type == 'l1':
return nn.L1Loss()
else:
raise ValueError(f'Unknown loss type: {type}')
class WeightedMSELoss(nn.Module):
def __init__(self, reduction='mean', **kwargs):
super(WeightedMSELoss, self).__init__()
self.reduce_str = reduction
self.reduce = get_reduction_method(reduction)
def forward(self, input, target, weights=None):
diff = input - target
if weights is None:
return diff.pow(2).sum() / diff.shape[0]
else:
return (
weights.unsqueeze(dim=-1) * diff.pow(2)).sum() / diff.shape[0]
class VertexEdgeLoss(nn.Module):
def __init__(self, norm_type='l2',
gt_edges=None,
gt_edge_path='',
est_edges=None,
est_edge_path='',
robustifier=None,
edge_thresh=0.0, epsilon=1e-8,
reduction='sum',
**kwargs):
super(VertexEdgeLoss, self).__init__()
assert norm_type in ['l1', 'l2'], 'Norm type must be [l1, l2]'
self.norm_type = norm_type
self.epsilon = epsilon
self.reduction = reduction
assert self.reduction in ['sum', 'mean']
logger.info(f'Building edge loss with'
f' norm_type={norm_type},'
f' reduction={reduction},'
)
gt_edge_path = osp.expandvars(gt_edge_path)
est_edge_path = osp.expandvars(est_edge_path)
assert osp.exists(gt_edge_path) or gt_edges is not None, (
'gt_edges must not be None or gt_edge_path must exist'
)
assert osp.exists(est_edge_path) or est_edges is not None, (
'est_edges must not be None or est_edge_path must exist'
)
if osp.exists(gt_edge_path) and gt_edges is None:
gt_edges = np.load(gt_edge_path)
if osp.exists(est_edge_path) and est_edges is None:
est_edges = np.load(est_edge_path)
self.register_buffer(
'gt_connections', torch.tensor(gt_edges, dtype=torch.long))
self.register_buffer(
'est_connections', torch.tensor(est_edges, dtype=torch.long))
def extra_repr(self):
msg = [
f'Norm type: {self.norm_type}',
]
if self.has_connections:
msg.append(
f'GT Connections shape: {self.gt_connections.shape}'
)
msg.append(
f'Est Connections shape: {self.est_connections.shape}'
)
return '\n'.join(msg)
def compute_edges(self, points, connections):
edge_points = torch.index_select(
points, 1, connections.view(-1)).reshape(points.shape[0], -1, 2, 3)
return edge_points[:, :, 1] - edge_points[:, :, 0]
def forward(self, gt_vertices, est_vertices, weights=None):
gt_edges = self.compute_edges(
gt_vertices, connections=self.gt_connections)
est_edges = self.compute_edges(
est_vertices, connections=self.est_connections)
raw_edge_diff = (gt_edges - est_edges)
batch_size = gt_vertices.shape[0]
if self.norm_type == 'l2':
edge_diff = raw_edge_diff.pow(2)
elif self.norm_type == 'l1':
edge_diff = raw_edge_diff.abs()
else:
raise NotImplementedError(
f'Loss type not implemented: {self.loss_type}')
if self.reduction == 'sum':
return edge_diff.sum()
elif self.reduction == 'mean':
return edge_diff.sum() / batch_size
|
backend/projects/models.py
|
LucasSantosGuedes/App-Gestao
| 142 |
94932
|
<gh_stars>100-1000
from boards.models import Board
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.utils import timezone
from users.models import User
class Project(models.Model):
owner = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='owned_projects')
title = models.CharField(max_length=255, blank=False, null=False)
description = models.TextField(blank=True, null=False)
created_at = models.DateTimeField(default=timezone.now)
members = models.ManyToManyField(
User, through='ProjectMembership', through_fields=('project', 'member'))
boards = GenericRelation(
Board, object_id_field='owner_id', content_type_field='owner_model')
def __str__(self):
return self.title
class ProjectMembership(models.Model):
class Access(models.IntegerChoices):
MEMBER = 1 # Can view and create and move only own items
ADMIN = 2 # Can remove members and modify project settings.
project = models.ForeignKey(
Project, on_delete=models.CASCADE)
member = models.ForeignKey(
User, on_delete=models.CASCADE)
access_level = models.IntegerField(choices=Access.choices, default=1)
created_at = models.DateTimeField(default=timezone.now)
def __str__(self):
return f'{self.member.full_name} , {self.project.title}'
class Meta:
unique_together = ('project', 'member')
|
Python/ch5-3.py
|
andjor/deep-learning-with-csharp-and-cntk
| 120 |
94978
|
import os
import sys
try:
base_directory = os.path.split(sys.executable)[0]
os.environ['PATH'] += ';' + base_directory
import cntk
os.environ['KERAS_BACKEND'] = 'cntk'
except ImportError:
print('CNTK not installed')
import keras
import keras.utils
import keras.datasets
import keras.models
import keras.layers
import keras.applications
import keras.preprocessing.image
import numpy as np
import matplotlib.pyplot as plt
import os
import numpy as np
base_dir = 'C:/Users/anastasios/Desktop/cats_and_dogs'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
def extract_features(directory, sample_count):
conv_base = keras.applications.VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
conv_base.summary()
datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
batch_size = 20
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i % 4 == 0:
print('{0}, processed {1} images'.format(directory, i*batch_size))
if i * batch_size >= sample_count:
# Note that since generators yield data indefinitely in a loop,
# we must `break` after every image has been seen once.
break
return features, labels
def save_npy_files(features, labels, prefix):
np.save(prefix+'_features.npy', features)
np.save(prefix+'_labels', labels)
def load_npy_files(prefix):
result = (np.load(prefix+'_features.npy'), np.load(prefix+'_labels.npy'))
print('Loaded {0}_features.npy, {0}_labels.npy'.format(prefix))
return result
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def train_with_extracted_features():
if os.path.isfile('test_features.npy'):
train_features, train_labels = load_npy_files('train')
validation_features, validation_labels = load_npy_files('validation')
test_features, test_labels = load_npy_files('test')
else:
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)
train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))
save_npy_files(train_features, train_labels, 'train')
save_npy_files(validation_features, validation_labels, 'validation')
save_npy_files(test_features, test_labels, 'test')
model = keras.models.Sequential()
model.add(keras.layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=keras.optimizers.RMSprop(lr=2e-5), loss='binary_crossentropy', metrics=['acc'])
history = model.fit(train_features, train_labels, epochs=5, batch_size=20, validation_data=(validation_features, validation_labels))
plot_history(history)
def train_with_augmentation(use_finetuning):
conv_base = keras.applications.VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
model = keras.models.Sequential()
model.add(conv_base)
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
print('This is the number of trainable weights before freezing the conv base:', len(model.trainable_weights))
if use_finetuning:
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
else:
conv_base.trainable = False
print('This is the number of trainable weights after freezing the conv base:', len(model.trainable_weights))
model.summary()
train_datagen = keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Note that the validation data should not be augmented!
test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.RMSprop(lr=2e-5), metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
plot_history(history)
if __name__ == '__main__':
train_with_extracted_features()
train_with_augmentation(use_finetuning=True)
|
function/python/brightics/function/transform/test/sample_test.py
|
parkjh80/studio
| 202 |
94988
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import pandas as pd
import numpy as np
from brightics.function.transform import random_sampling
from brightics.common.datasets import load_iris
import HtmlTestRunner
import os
class TestRandomSampling(unittest.TestCase):
def test_default(self):
df_iris = load_iris()
df_res = random_sampling(table=df_iris, num_or_frac='num', num=1, frac=50, replace=False, seed=12345)['table']
self.assertListEqual([5.6, 2.5, 3.9, 1.1, 'versicolor'], df_res.loc[69].tolist(), 'incorrect sample')
def test_frac_replace(self):
df_iris = load_iris()
df_res = random_sampling(table=df_iris, num_or_frac='frac', num=10, frac=50, replace=True, seed=12345)['table'].reset_index(drop=True)
self.assertListEqual([4.7, 3.2, 1.6, 0.2, 'setosa'], df_res.loc[0].tolist(), 'incorrect sample[0]')
self.assertListEqual([7.2, 3.0, 5.8, 1.6, 'virginica'], df_res.loc[1].tolist(), 'incorrect sample[1]')
self.assertListEqual([6.2, 2.8, 4.8, 1.8, 'virginica'], df_res.loc[2].tolist(), 'incorrect sample[2]')
self.assertListEqual([5.8, 2.7, 5.1, 1.9, 'virginica'], df_res.loc[3].tolist(), 'incorrect sample[3]')
self.assertListEqual([4.9, 3.1, 1.5, 0.1, 'setosa'], df_res.loc[4].tolist(), 'incorrect sample[4]')
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
|
fs/expose/wsgi/serve_home.py
|
jwilk-forks/pyfilesystem
| 314 |
94993
|
from wsgiref.simple_server import make_server
from fs.osfs import OSFS
from wsgi import serve_fs
osfs = OSFS('~/')
application = serve_fs(osfs)
httpd = make_server('', 8000, application)
print "Serving on http://127.0.0.1:8000"
httpd.serve_forever()
|
source/oleTypes.py
|
marlon-sousa/nvda
| 1,592 |
95008
|
# typelib <unable to determine filename>
_lcid = 0 # change this if required
from ctypes import *
WSTRING = c_wchar_p
from comtypes import IUnknown
LONG_PTR = c_int
from comtypes import GUID
from comtypes import IUnknown
from ctypes import HRESULT
from comtypes import helpstring
from comtypes import COMMETHOD
from comtypes import dispid
from comtypes.persist import IPersist
from comtypes import wireHWND
from comtypes import GUID
from comtypes import _COAUTHIDENTITY
UINT_PTR = c_ulong
from comtypes import _COSERVERINFO
from comtypes import _COAUTHINFO
from comtypes import tagBIND_OPTS2
from objidl import IBindCtx, IMoniker
class tagDVTARGETDEVICE(Structure):
pass
tagDVTARGETDEVICE._fields_ = [
('tdSize', c_ulong),
('tdDriverNameOffset', c_ushort),
('tdDeviceNameOffset', c_ushort),
('tdPortNameOffset', c_ushort),
('tdExtDevmodeOffset', c_ushort),
('tdData', POINTER(c_ubyte)),
]
assert sizeof(tagDVTARGETDEVICE) == 16, sizeof(tagDVTARGETDEVICE)
assert alignment(tagDVTARGETDEVICE) == 4, alignment(tagDVTARGETDEVICE)
class _BYTE_BLOB(Structure):
pass
_BYTE_BLOB._fields_ = [
('clSize', c_ulong),
('abData', POINTER(c_ubyte)),
]
assert sizeof(_BYTE_BLOB) == 8, sizeof(_BYTE_BLOB)
assert alignment(_BYTE_BLOB) == 4, alignment(_BYTE_BLOB)
class tagRECT(Structure):
pass
tagRECT._fields_ = [
('left', c_int),
('top', c_int),
('right', c_int),
('bottom', c_int),
]
assert sizeof(tagRECT) == 16, sizeof(tagRECT)
assert alignment(tagRECT) == 4, alignment(tagRECT)
class __MIDL_IWinTypes_0003(Union):
pass
class _FLAGGED_BYTE_BLOB(Structure):
pass
__MIDL_IWinTypes_0003._fields_ = [
('hInproc', c_int),
('hRemote', POINTER(_FLAGGED_BYTE_BLOB)),
('hInproc64', c_longlong),
]
assert sizeof(__MIDL_IWinTypes_0003) == 8, sizeof(__MIDL_IWinTypes_0003)
assert alignment(__MIDL_IWinTypes_0003) == 8, alignment(__MIDL_IWinTypes_0003)
class _userSTGMEDIUM(Structure):
pass
class _STGMEDIUM_UNION(Structure):
pass
class __MIDL_IAdviseSink_0003(Union):
pass
class _userHMETAFILEPICT(Structure):
pass
class _userHENHMETAFILE(Structure):
pass
class _GDI_OBJECT(Structure):
pass
class _userHGLOBAL(Structure):
pass
__MIDL_IAdviseSink_0003._fields_ = [
('hMetaFilePict', POINTER(_userHMETAFILEPICT)),
('hHEnhMetaFile', POINTER(_userHENHMETAFILE)),
('hGdiHandle', POINTER(_GDI_OBJECT)),
('hGlobal', POINTER(_userHGLOBAL)),
('lpszFileName', WSTRING),
('pstm', POINTER(_BYTE_BLOB)),
('pstg', POINTER(_BYTE_BLOB)),
]
assert sizeof(__MIDL_IAdviseSink_0003) == 4, sizeof(__MIDL_IAdviseSink_0003)
assert alignment(__MIDL_IAdviseSink_0003) == 4, alignment(__MIDL_IAdviseSink_0003)
_STGMEDIUM_UNION._fields_ = [
('tymed', c_ulong),
('u', __MIDL_IAdviseSink_0003),
]
assert sizeof(_STGMEDIUM_UNION) == 8, sizeof(_STGMEDIUM_UNION)
assert alignment(_STGMEDIUM_UNION) == 4, alignment(_STGMEDIUM_UNION)
_userSTGMEDIUM._fields_ = [
('__MIDL__IAdviseSink0003', _STGMEDIUM_UNION),
('pUnkForRelease', POINTER(IUnknown)),
]
assert sizeof(_userSTGMEDIUM) == 12, sizeof(_userSTGMEDIUM)
assert alignment(_userSTGMEDIUM) == 4, alignment(_userSTGMEDIUM)
class tagLOGPALETTE(Structure):
pass
class tagPALETTEENTRY(Structure):
pass
tagLOGPALETTE._pack_ = 2
tagLOGPALETTE._fields_ = [
('palVersion', c_ushort),
('palNumEntries', c_ushort),
('palPalEntry', POINTER(tagPALETTEENTRY)),
]
assert sizeof(tagLOGPALETTE) == 8, sizeof(tagLOGPALETTE)
assert alignment(tagLOGPALETTE) == 2, alignment(tagLOGPALETTE)
wireASYNC_STGMEDIUM = POINTER(_userSTGMEDIUM)
class IEnumOLEVERB(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{00000104-0000-0000-C000-000000000046}')
_idlflags_ = []
class tagOLEVERB(Structure):
pass
IEnumOLEVERB._methods_ = [
COMMETHOD([], HRESULT, 'RemoteNext',
( ['in'], c_ulong, 'celt' ),
( ['out'], POINTER(tagOLEVERB), 'rgelt' ),
( ['out'], POINTER(c_ulong), 'pceltFetched' )),
COMMETHOD([], HRESULT, 'Skip',
( ['in'], c_ulong, 'celt' )),
COMMETHOD([], HRESULT, 'Reset'),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IEnumOLEVERB)), 'ppenum' )),
]
class IEnumUnknown(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{00000100-0000-0000-C000-000000000046}')
_idlflags_ = []
IEnumUnknown._methods_ = [
COMMETHOD([], HRESULT, 'RemoteNext',
( ['in'], c_ulong, 'celt' ),
( ['out'], POINTER(POINTER(IUnknown)), 'rgelt' ),
( ['out'], POINTER(c_ulong), 'pceltFetched' )),
COMMETHOD([], HRESULT, 'Skip',
( ['in'], c_ulong, 'celt' )),
COMMETHOD([], HRESULT, 'Reset'),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IEnumUnknown)), 'ppenum' )),
]
class _RemotableHandle(Structure):
pass
class __MIDL_IWinTypes_0009(Union):
pass
__MIDL_IWinTypes_0009._fields_ = [
('hInproc', c_int),
('hRemote', c_int),
]
assert sizeof(__MIDL_IWinTypes_0009) == 4, sizeof(__MIDL_IWinTypes_0009)
assert alignment(__MIDL_IWinTypes_0009) == 4, alignment(__MIDL_IWinTypes_0009)
_RemotableHandle._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0009),
]
assert sizeof(_RemotableHandle) == 8, sizeof(_RemotableHandle)
assert alignment(_RemotableHandle) == 4, alignment(_RemotableHandle)
class _userHMETAFILE(Structure):
pass
class __MIDL_IWinTypes_0004(Union):
pass
__MIDL_IWinTypes_0004._fields_ = [
('hInproc', c_int),
('hRemote', POINTER(_BYTE_BLOB)),
('hInproc64', c_longlong),
]
assert sizeof(__MIDL_IWinTypes_0004) == 8, sizeof(__MIDL_IWinTypes_0004)
assert alignment(__MIDL_IWinTypes_0004) == 8, alignment(__MIDL_IWinTypes_0004)
_userHMETAFILE._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0004),
]
assert sizeof(_userHMETAFILE) == 16, sizeof(_userHMETAFILE)
assert alignment(_userHMETAFILE) == 8, alignment(_userHMETAFILE)
wireSTGMEDIUM = POINTER(_userSTGMEDIUM)
class _userHPALETTE(Structure):
pass
class __MIDL_IWinTypes_0008(Union):
pass
__MIDL_IWinTypes_0008._fields_ = [
('hInproc', c_int),
('hRemote', POINTER(tagLOGPALETTE)),
('hInproc64', c_longlong),
]
assert sizeof(__MIDL_IWinTypes_0008) == 8, sizeof(__MIDL_IWinTypes_0008)
assert alignment(__MIDL_IWinTypes_0008) == 8, alignment(__MIDL_IWinTypes_0008)
_userHPALETTE._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0008),
]
assert sizeof(_userHPALETTE) == 16, sizeof(_userHPALETTE)
assert alignment(_userHPALETTE) == 8, alignment(_userHPALETTE)
class __MIDL_IWinTypes_0007(Union):
pass
class _userBITMAP(Structure):
pass
__MIDL_IWinTypes_0007._fields_ = [
('hInproc', c_int),
('hRemote', POINTER(_userBITMAP)),
('hInproc64', c_longlong),
]
assert sizeof(__MIDL_IWinTypes_0007) == 8, sizeof(__MIDL_IWinTypes_0007)
assert alignment(__MIDL_IWinTypes_0007) == 8, alignment(__MIDL_IWinTypes_0007)
class IParseDisplayName(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{0000011A-0000-0000-C000-000000000046}')
_idlflags_ = []
class IOleContainer(IParseDisplayName):
_case_insensitive_ = True
_iid_ = GUID('{0000011B-0000-0000-C000-000000000046}')
_idlflags_ = []
IParseDisplayName._methods_ = [
COMMETHOD([], HRESULT, 'ParseDisplayName',
( ['in'], POINTER(IBindCtx), 'pbc' ),
( ['in'], WSTRING, 'pszDisplayName' ),
( ['out'], POINTER(c_ulong), 'pchEaten' ),
( ['out'], POINTER(POINTER(IMoniker)), 'ppmkOut' )),
]
IOleContainer._methods_ = [
COMMETHOD([], HRESULT, 'EnumObjects',
( ['in'], c_ulong, 'grfFlags' ),
( ['out'], POINTER(POINTER(IEnumUnknown)), 'ppenum' )),
COMMETHOD([], HRESULT, 'LockContainer',
( ['in'], c_int, 'fLock' )),
]
class IOleObject(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{00000112-0000-0000-C000-000000000046}')
_idlflags_ = []
class IOleClientSite(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{00000118-0000-0000-C000-000000000046}')
_idlflags_ = []
class IDataObject(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{0000010E-0000-0000-C000-000000000046}')
_idlflags_ = []
class tagMSG(Structure):
pass
class tagSIZEL(Structure):
pass
class IAdviseSink(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{0000010F-0000-0000-C000-000000000046}')
_idlflags_ = []
class IEnumSTATDATA(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{00000105-0000-0000-C000-000000000046}')
_idlflags_ = []
IOleObject._methods_ = [
COMMETHOD([], HRESULT, 'SetClientSite',
( ['in'], POINTER(IOleClientSite), 'pClientSite' )),
COMMETHOD([], HRESULT, 'GetClientSite',
( ['out'], POINTER(POINTER(IOleClientSite)), 'ppClientSite' )),
COMMETHOD([], HRESULT, 'SetHostNames',
( ['in'], WSTRING, 'szContainerApp' ),
( ['in'], WSTRING, 'szContainerObj' )),
COMMETHOD([], HRESULT, 'Close',
( ['in'], c_ulong, 'dwSaveOption' )),
COMMETHOD([], HRESULT, 'SetMoniker',
( ['in'], c_ulong, 'dwWhichMoniker' ),
( ['in'], POINTER(IMoniker), 'pmk' )),
COMMETHOD([], HRESULT, 'GetMoniker',
( ['in'], c_ulong, 'dwAssign' ),
( ['in'], c_ulong, 'dwWhichMoniker' ),
( ['out'], POINTER(POINTER(IMoniker)), 'ppmk' )),
COMMETHOD([], HRESULT, 'InitFromData',
( ['in'], POINTER(IDataObject), 'pDataObject' ),
( ['in'], c_int, 'fCreation' ),
( ['in'], c_ulong, 'dwReserved' )),
COMMETHOD([], HRESULT, 'GetClipboardData',
( ['in'], c_ulong, 'dwReserved' ),
( ['out'], POINTER(POINTER(IDataObject)), 'ppDataObject' )),
COMMETHOD([], HRESULT, 'DoVerb',
( ['in'], c_int, 'iVerb' ),
( ['in'], POINTER(tagMSG), 'lpmsg' ),
( ['in'], POINTER(IOleClientSite), 'pActiveSite' ),
( ['in'], c_int, 'lindex' ),
( ['in'], wireHWND, 'hwndParent' ),
( ['in'], POINTER(tagRECT), 'lprcPosRect' )),
COMMETHOD([], HRESULT, 'EnumVerbs',
( ['out'], POINTER(POINTER(IEnumOLEVERB)), 'ppEnumOleVerb' )),
COMMETHOD([], HRESULT, 'Update'),
COMMETHOD([], HRESULT, 'IsUpToDate'),
COMMETHOD([], HRESULT, 'GetUserClassID',
( ['out'], POINTER(GUID), 'pClsid' )),
COMMETHOD([], HRESULT, 'GetUserType',
( ['in'], c_ulong, 'dwFormOfType' ),
( ['out'], POINTER(WSTRING), 'pszUserType' )),
COMMETHOD([], HRESULT, 'SetExtent',
( ['in'], c_ulong, 'dwDrawAspect' ),
( ['in'], POINTER(tagSIZEL), 'psizel' )),
COMMETHOD([], HRESULT, 'GetExtent',
( ['in'], c_ulong, 'dwDrawAspect' ),
( ['out'], POINTER(tagSIZEL), 'psizel' )),
COMMETHOD([], HRESULT, 'Advise',
( ['in'], POINTER(IAdviseSink), 'pAdvSink' ),
( ['out'], POINTER(c_ulong), 'pdwConnection' )),
COMMETHOD([], HRESULT, 'Unadvise',
( ['in'], c_ulong, 'dwConnection' )),
COMMETHOD([], HRESULT, 'EnumAdvise',
( ['out'], POINTER(POINTER(IEnumSTATDATA)), 'ppenumAdvise' )),
COMMETHOD([], HRESULT, 'GetMiscStatus',
( ['in'], c_ulong, 'dwAspect' ),
( ['out'], POINTER(c_ulong), 'pdwStatus' )),
COMMETHOD([], HRESULT, 'SetColorScheme',
( ['in'], POINTER(tagLOGPALETTE), 'pLogpal' )),
]
class __MIDL_IWinTypes_0005(Union):
pass
class _remoteMETAFILEPICT(Structure):
pass
__MIDL_IWinTypes_0005._fields_ = [
('hInproc', c_int),
('hRemote', POINTER(_remoteMETAFILEPICT)),
('hInproc64', c_longlong),
]
assert sizeof(__MIDL_IWinTypes_0005) == 8, sizeof(__MIDL_IWinTypes_0005)
assert alignment(__MIDL_IWinTypes_0005) == 8, alignment(__MIDL_IWinTypes_0005)
_userHMETAFILEPICT._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0005),
]
assert sizeof(_userHMETAFILEPICT) == 16, sizeof(_userHMETAFILEPICT)
assert alignment(_userHMETAFILEPICT) == 8, alignment(_userHMETAFILEPICT)
tagOLEVERB._fields_ = [
('lVerb', c_int),
('lpszVerbName', WSTRING),
('fuFlags', c_ulong),
('grfAttribs', c_ulong),
]
assert sizeof(tagOLEVERB) == 16, sizeof(tagOLEVERB)
assert alignment(tagOLEVERB) == 4, alignment(tagOLEVERB)
class _userCLIPFORMAT(Structure):
pass
class __MIDL_IWinTypes_0001(Union):
pass
__MIDL_IWinTypes_0001._fields_ = [
('dwValue', c_ulong),
('pwszName', WSTRING),
]
assert sizeof(__MIDL_IWinTypes_0001) == 4, sizeof(__MIDL_IWinTypes_0001)
assert alignment(__MIDL_IWinTypes_0001) == 4, alignment(__MIDL_IWinTypes_0001)
_userCLIPFORMAT._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0001),
]
assert sizeof(_userCLIPFORMAT) == 8, sizeof(_userCLIPFORMAT)
assert alignment(_userCLIPFORMAT) == 4, alignment(_userCLIPFORMAT)
wireCLIPFORMAT = POINTER(_userCLIPFORMAT)
class __MIDL_IWinTypes_0006(Union):
pass
__MIDL_IWinTypes_0006._fields_ = [
('hInproc', c_int),
('hRemote', POINTER(_BYTE_BLOB)),
('hInproc64', c_longlong),
]
assert sizeof(__MIDL_IWinTypes_0006) == 8, sizeof(__MIDL_IWinTypes_0006)
assert alignment(__MIDL_IWinTypes_0006) == 8, alignment(__MIDL_IWinTypes_0006)
_userHENHMETAFILE._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0006),
]
assert sizeof(_userHENHMETAFILE) == 16, sizeof(_userHENHMETAFILE)
assert alignment(_userHENHMETAFILE) == 8, alignment(_userHENHMETAFILE)
class tagFORMATETC(Structure):
pass
tagFORMATETC._fields_ = [
('cfFormat', wireCLIPFORMAT),
('ptd', POINTER(tagDVTARGETDEVICE)),
('dwAspect', c_ulong),
('lindex', c_int),
('tymed', c_ulong),
]
assert sizeof(tagFORMATETC) == 20, sizeof(tagFORMATETC)
assert alignment(tagFORMATETC) == 4, alignment(tagFORMATETC)
_userHGLOBAL._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0003),
]
assert sizeof(_userHGLOBAL) == 16, sizeof(_userHGLOBAL)
assert alignment(_userHGLOBAL) == 8, alignment(_userHGLOBAL)
IOleClientSite._methods_ = [
COMMETHOD([], HRESULT, 'SaveObject'),
COMMETHOD([], HRESULT, 'GetMoniker',
( ['in'], c_ulong, 'dwAssign' ),
( ['in'], c_ulong, 'dwWhichMoniker' ),
( ['out'], POINTER(POINTER(IMoniker)), 'ppmk' )),
COMMETHOD([], HRESULT, 'GetContainer',
( ['out'], POINTER(POINTER(IOleContainer)), 'ppContainer' )),
COMMETHOD([], HRESULT, 'ShowObject'),
COMMETHOD([], HRESULT, 'OnShowWindow',
( ['in'], c_int, 'fShow' )),
COMMETHOD([], HRESULT, 'RequestNewObjectLayout'),
]
class __MIDL_IAdviseSink_0002(Union):
pass
class _userHBITMAP(Structure):
pass
__MIDL_IAdviseSink_0002._fields_ = [
('hBitmap', POINTER(_userHBITMAP)),
('hPalette', POINTER(_userHPALETTE)),
('hGeneric', POINTER(_userHGLOBAL)),
]
assert sizeof(__MIDL_IAdviseSink_0002) == 4, sizeof(__MIDL_IAdviseSink_0002)
assert alignment(__MIDL_IAdviseSink_0002) == 4, alignment(__MIDL_IAdviseSink_0002)
_GDI_OBJECT._fields_ = [
('ObjectType', c_ulong),
('u', __MIDL_IAdviseSink_0002),
]
assert sizeof(_GDI_OBJECT) == 8, sizeof(_GDI_OBJECT)
assert alignment(_GDI_OBJECT) == 4, alignment(_GDI_OBJECT)
class _userFLAG_STGMEDIUM(Structure):
pass
wireFLAG_STGMEDIUM = POINTER(_userFLAG_STGMEDIUM)
class IEnumFORMATETC(IUnknown):
_case_insensitive_ = True
_iid_ = GUID('{00000103-0000-0000-C000-000000000046}')
_idlflags_ = []
IDataObject._methods_ = [
COMMETHOD([], HRESULT, 'GetData',
( ['in'], POINTER(tagFORMATETC), 'pformatetcIn' ),
( ['out'], POINTER(wireSTGMEDIUM), 'pmedium' )),
COMMETHOD([], HRESULT, 'RemoteGetData',
( ['in'], POINTER(tagFORMATETC), 'pformatetcIn' ),
( ['out'], POINTER(wireSTGMEDIUM), 'pRemoteMedium' )),
COMMETHOD([], HRESULT, 'RemoteGetDataHere',
( ['in'], POINTER(tagFORMATETC), 'pformatetc' ),
( ['in', 'out'], POINTER(wireSTGMEDIUM), 'pRemoteMedium' )),
COMMETHOD([], HRESULT, 'QueryGetData',
( ['in'], POINTER(tagFORMATETC), 'pformatetc' )),
COMMETHOD([], HRESULT, 'GetCanonicalFormatEtc',
( ['in'], POINTER(tagFORMATETC), 'pformatectIn' ),
( ['out'], POINTER(tagFORMATETC), 'pformatetcOut' )),
COMMETHOD([], HRESULT, 'RemoteSetData',
( ['in'], POINTER(tagFORMATETC), 'pformatetc' ),
( ['in'], POINTER(wireFLAG_STGMEDIUM), 'pmedium' ),
( ['in'], c_int, 'fRelease' )),
COMMETHOD([], HRESULT, 'EnumFormatEtc',
( ['in'], c_ulong, 'dwDirection' ),
( ['out'], POINTER(POINTER(IEnumFORMATETC)), 'ppenumFormatEtc' )),
COMMETHOD([], HRESULT, 'DAdvise',
( ['in'], POINTER(tagFORMATETC), 'pformatetc' ),
( ['in'], c_ulong, 'advf' ),
( ['in'], POINTER(IAdviseSink), 'pAdvSink' ),
( ['out'], POINTER(c_ulong), 'pdwConnection' )),
COMMETHOD([], HRESULT, 'DUnadvise',
( ['in'], c_ulong, 'dwConnection' )),
COMMETHOD([], HRESULT, 'EnumDAdvise',
( ['out'], POINTER(POINTER(IEnumSTATDATA)), 'ppenumAdvise' )),
]
class tagPOINT(Structure):
pass
tagPOINT._fields_ = [
('x', c_int),
('y', c_int),
]
assert sizeof(tagPOINT) == 8, sizeof(tagPOINT)
assert alignment(tagPOINT) == 4, alignment(tagPOINT)
IAdviseSink._methods_ = [
COMMETHOD([], HRESULT, 'RemoteOnDataChange',
( ['in'], POINTER(tagFORMATETC), 'pformatetc' ),
( ['in'], POINTER(wireASYNC_STGMEDIUM), 'pStgmed' )),
COMMETHOD([], HRESULT, 'RemoteOnViewChange',
( ['in'], c_ulong, 'dwAspect' ),
( ['in'], c_int, 'lindex' )),
COMMETHOD([], HRESULT, 'RemoteOnRename',
( ['in'], POINTER(IMoniker), 'pmk' )),
COMMETHOD([], HRESULT, 'RemoteOnSave'),
COMMETHOD([], HRESULT, 'RemoteOnClose'),
]
class __MIDL___MIDL_itf_oleTypes_0005_0001_0001(Structure):
pass
__MIDL___MIDL_itf_oleTypes_0005_0001_0001._fields_ = [
('Data1', c_ulong),
('Data2', c_ushort),
('Data3', c_ushort),
('Data4', c_ubyte * 8),
]
assert sizeof(__MIDL___MIDL_itf_oleTypes_0005_0001_0001) == 16, sizeof(__MIDL___MIDL_itf_oleTypes_0005_0001_0001)
assert alignment(__MIDL___MIDL_itf_oleTypes_0005_0001_0001) == 4, alignment(__MIDL___MIDL_itf_oleTypes_0005_0001_0001)
_userFLAG_STGMEDIUM._fields_ = [
('ContextFlags', c_int),
('fPassOwnership', c_int),
('Stgmed', _userSTGMEDIUM),
]
assert sizeof(_userFLAG_STGMEDIUM) == 20, sizeof(_userFLAG_STGMEDIUM)
assert alignment(_userFLAG_STGMEDIUM) == 4, alignment(_userFLAG_STGMEDIUM)
class tagSTATDATA(Structure):
pass
IEnumSTATDATA._methods_ = [
COMMETHOD([], HRESULT, 'RemoteNext',
( ['in'], c_ulong, 'celt' ),
( ['out'], POINTER(tagSTATDATA), 'rgelt' ),
( ['out'], POINTER(c_ulong), 'pceltFetched' )),
COMMETHOD([], HRESULT, 'Skip',
( ['in'], c_ulong, 'celt' )),
COMMETHOD([], HRESULT, 'Reset'),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IEnumSTATDATA)), 'ppenum' )),
]
tagSIZEL._fields_ = [
('cx', c_int),
('cy', c_int),
]
assert sizeof(tagSIZEL) == 8, sizeof(tagSIZEL)
assert alignment(tagSIZEL) == 4, alignment(tagSIZEL)
tagMSG._fields_ = [
('hwnd', wireHWND),
('message', c_uint),
('wParam', UINT_PTR),
('lParam', LONG_PTR),
('time', c_ulong),
('pt', tagPOINT),
]
assert sizeof(tagMSG) == 28, sizeof(tagMSG)
assert alignment(tagMSG) == 4, alignment(tagMSG)
IEnumFORMATETC._methods_ = [
COMMETHOD([], HRESULT, 'RemoteNext',
( ['in'], c_ulong, 'celt' ),
( ['out'], POINTER(tagFORMATETC), 'rgelt' ),
( ['out'], POINTER(c_ulong), 'pceltFetched' )),
COMMETHOD([], HRESULT, 'Skip',
( ['in'], c_ulong, 'celt' )),
COMMETHOD([], HRESULT, 'Reset'),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IEnumFORMATETC)), 'ppenum' )),
]
_FLAGGED_BYTE_BLOB._fields_ = [
('fFlags', c_ulong),
('clSize', c_ulong),
('abData', POINTER(c_ubyte)),
]
assert sizeof(_FLAGGED_BYTE_BLOB) == 12, sizeof(_FLAGGED_BYTE_BLOB)
assert alignment(_FLAGGED_BYTE_BLOB) == 4, alignment(_FLAGGED_BYTE_BLOB)
_userBITMAP._fields_ = [
('bmType', c_int),
('bmWidth', c_int),
('bmHeight', c_int),
('bmWidthBytes', c_int),
('bmPlanes', c_ushort),
('bmBitsPixel', c_ushort),
('cbSize', c_ulong),
('pBuffer', POINTER(c_ubyte)),
]
assert sizeof(_userBITMAP) == 28, sizeof(_userBITMAP)
assert alignment(_userBITMAP) == 4, alignment(_userBITMAP)
_remoteMETAFILEPICT._fields_ = [
('mm', c_int),
('xExt', c_int),
('yExt', c_int),
('hMF', POINTER(_userHMETAFILE)),
]
assert sizeof(_remoteMETAFILEPICT) == 16, sizeof(_remoteMETAFILEPICT)
assert alignment(_remoteMETAFILEPICT) == 4, alignment(_remoteMETAFILEPICT)
tagSTATDATA._fields_ = [
('formatetc', tagFORMATETC),
('advf', c_ulong),
('pAdvSink', POINTER(IAdviseSink)),
('dwConnection', c_ulong),
]
assert sizeof(tagSTATDATA) == 32, sizeof(tagSTATDATA)
assert alignment(tagSTATDATA) == 4, alignment(tagSTATDATA)
_userHBITMAP._fields_ = [
('fContext', c_int),
('u', __MIDL_IWinTypes_0007),
]
assert sizeof(_userHBITMAP) == 16, sizeof(_userHBITMAP)
assert alignment(_userHBITMAP) == 8, alignment(_userHBITMAP)
tagPALETTEENTRY._fields_ = [
('peRed', c_ubyte),
('peGreen', c_ubyte),
('peBlue', c_ubyte),
('peFlags', c_ubyte),
]
assert sizeof(tagPALETTEENTRY) == 4, sizeof(tagPALETTEENTRY)
assert alignment(tagPALETTEENTRY) == 1, alignment(tagPALETTEENTRY)
|
vnpy/gateway/comstar/comstar_gateway.py
|
funrunskypalace/vnpy
| 19,529 |
95069
|
<filename>vnpy/gateway/comstar/comstar_gateway.py
from datetime import datetime
from typing import Optional, Sequence, Dict
from enum import Enum
import pytz
from vnpy.event import EventEngine
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.constant import (
Exchange,
Product,
Offset,
OrderType,
Direction,
Status
)
from vnpy.trader.object import (
SubscribeRequest,
CancelRequest,
OrderRequest,
ContractData,
TickData,
OrderData,
TradeData,
LogData
)
from .comstar_api import TdApi
VN_ENUMS = {
"Exchange": Exchange,
"Product": Product,
"Offset": Offset,
"OrderType": OrderType,
"Direction": Direction,
"Status": Status
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class ComstarGateway(BaseGateway):
"""
VN Trader Gateway for Comstar service.
"""
default_setting = {
"交易服务器": "",
"用户名": "",
"密码": "",
"Key": ""
}
exchanges = [Exchange.CFETS]
def __init__(self, event_engine: EventEngine):
"""Constructor"""
super().__init__(event_engine, "COMSTAR")
self.api = UserApi(self)
def connect(self, setting: dict):
""""""
td_address = setting["交易服务器"]
username = setting["用户名"]
password = setting["密码"]
key = setting["Key"]
self.api.connect(username, password, key, td_address)
def subscribe(self, req: SubscribeRequest):
""""""
# Symbol format: 180406_T0 or 180406_T1
symbol, settle_type, *_ = req.symbol.split("_") + [""]
if settle_type not in {"T0", "T1"}:
self.write_log("请输入清算速度T0或T1")
return ""
data = vn_encode(req)
data["symbol"] = symbol
data["settle_type"] = settle_type
self.api.subscribe(data, self.gateway_name)
def send_order(self, req: OrderRequest):
""""""
# Offset is not supported for Comstar gateawy
req.offset = Offset.NONE
if req.type not in {OrderType.LIMIT, OrderType.FAK}:
self.write_log("仅支持限价单和FAK单")
return ""
symbol, settle_type, *_ = req.symbol.split("_") + [""]
if settle_type not in {"T0", "T1"}:
self.write_log("请输入清算速度T0或T1")
return ""
data = vn_encode(req)
data["symbol"] = symbol
data["settle_type"] = settle_type
data["strategy_name"] = data.pop("reference")
order_id = self.api.send_order(data, self.gateway_name)
# convert to vt_orderid
return f"{self.gateway_name}.{order_id}"
def cancel_order(self, req: CancelRequest):
""""""
data = vn_encode(req)
symbol, settle_type, *_ = req.symbol.split("_") + [""]
data["symbol"] = symbol
data["settle_type"] = settle_type
self.api.cancel_order(data, self.gateway_name)
def query_account(self):
""""""
pass
def query_position(self):
""""""
pass
def query_all(self):
""""""
self.api.get_all_contracts()
self.api.get_all_orders()
self.api.get_all_trades()
def close(self):
""""""
self.api.close()
class UserApi(TdApi):
"""
Implements Comstar API.
"""
def __init__(self, gateway: ComstarGateway):
"""Constructor"""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.trades: Dict[str, TradeData] = {}
self.orders: Dict[str, OrderData] = {}
def on_tick(self, tick: dict):
""""""
data = parse_tick(tick)
self.gateway.on_tick(data)
def on_order(self, order: dict):
""""""
data = parse_order(order)
# Filter duplicated order data push after reconnect
last_order = self.orders.get(data.vt_orderid, None)
if (
last_order
and data.traded == last_order.traded
and data.status == last_order.status
):
return
self.orders[data.vt_orderid] = data
self.gateway.on_order(data)
def on_trade(self, trade: dict):
""""""
data = parse_trade(trade)
# Filter duplicated trade data push after reconnect
if data.vt_tradeid in self.trades:
return
self.trades[data.vt_tradeid] = data
self.gateway.on_trade(data)
def on_log(self, log: dict):
data = parse_log(log)
self.gateway.on_log(data)
def on_login(self, data: dict):
""""""
if data["status"]:
self.gateway.query_all()
self.gateway.write_log("服务器登录成功")
else:
self.gateway.write_log("服务器登录失败")
def on_disconnected(self, reason: str):
""""""
self.gateway.write_log(reason)
def on_all_contracts(self, contracts: Sequence[dict]):
""""""
for data in contracts:
for settle_type in ("T0", "T1"):
contract = parse_contract(data, settle_type)
contract.gateway_name = self.gateway_name
self.gateway.on_contract(contract)
self.gateway.write_log("合约信息查询成功")
def on_all_orders(self, orders: Sequence[dict]):
""""""
for data in orders:
order = parse_order(data)
order.gateway_name = self.gateway_name
self.gateway.on_order(order)
self.gateway.write_log("委托信息查询成功")
def on_all_trades(self, trades: Sequence[dict]):
""""""
for data in trades:
trade = parse_trade(data)
trade.gateway_name = self.gateway_name
self.gateway.on_trade(trade)
self.gateway.write_log("成交信息查询成功")
def on_auth(self, status: bool):
""""""
if status:
self.gateway.write_log("服务器授权验证成功")
else:
self.gateway.write_log("服务器授权验证失败")
def parse_tick(data: dict) -> TickData:
"""
Convert json received from API to TickData object.
XBond Depth Data Notice:
1. Bid/Ask1 are public best price.
2. Bid/Ask2-6 are private price data.
"""
tick = TickData(
symbol=f"{data['symbol']}_{data['settle_type']}",
exchange=enum_decode(data["exchange"]),
datetime=parse_datetime(data["datetime"]),
name=data["name"],
volume=float(data["volume"]),
last_price=float(data["last_price"]),
open_price=float(data["open_price"]),
high_price=float(data["high_price"]),
low_price=float(data["low_price"]),
pre_close=float(data["pre_close"]),
bid_price_1=float(data["bid_price_2"]),
bid_price_2=float(data["bid_price_3"]),
bid_price_3=float(data["bid_price_4"]),
bid_price_4=float(data["bid_price_5"]),
bid_price_5=float(data["bid_price_6"]),
ask_price_1=float(data["ask_price_2"]),
ask_price_2=float(data["ask_price_3"]),
ask_price_3=float(data["ask_price_4"]),
ask_price_4=float(data["ask_price_5"]),
ask_price_5=float(data["ask_price_6"]),
bid_volume_1=float(data["bid_volume_2"]),
bid_volume_2=float(data["bid_volume_3"]),
bid_volume_3=float(data["bid_volume_4"]),
bid_volume_4=float(data["bid_volume_5"]),
bid_volume_5=float(data["bid_volume_6"]),
ask_volume_1=float(data["ask_volume_2"]),
ask_volume_2=float(data["ask_volume_3"]),
ask_volume_3=float(data["ask_volume_4"]),
ask_volume_4=float(data["ask_volume_5"]),
ask_volume_5=float(data["ask_volume_6"]),
gateway_name=data["gateway_name"]
)
tick.public_bid_price = float(data["bid_price_1"])
tick.public_ask_price = float(data["ask_price_1"])
tick.public_bid_volume = float(data["bid_volume_1"])
tick.public_ask_volume = float(data["ask_volume_1"])
return tick
def parse_order(data: dict) -> OrderData:
"""
Convert json received from API to OrderData object.
"""
order = OrderData(
symbol=f"{data['symbol']}_{data['settle_type']}",
exchange=enum_decode(data["exchange"]),
orderid=data["orderid"],
type=enum_decode(data["type"]),
direction=enum_decode(data["direction"]),
offset=Offset.NONE,
price=float(data["price"]),
volume=float(data["volume"]),
traded=float(data["traded"]),
status=enum_decode(data["status"]),
datetime=generate_datetime(data["time"]),
gateway_name=data["gateway_name"]
)
return order
def parse_trade(data: dict) -> TradeData:
"""
Convert json received from API to TradeData object.
"""
trade = TradeData(
symbol=f"{data['symbol']}_{data['settle_type']}",
exchange=enum_decode(data["exchange"]),
orderid=data["orderid"],
tradeid=data["tradeid"],
direction=enum_decode(data["direction"]),
offset=Offset.NONE,
price=float(data["price"]),
volume=float(data["volume"]),
datetime=generate_datetime(data["time"]),
gateway_name=data["gateway_name"]
)
return trade
def parse_contract(data: dict, settle_type: str) -> ContractData:
"""
Convert json received from API to ContractData object.
"""
contract = ContractData(
symbol=f"{data['symbol']}_{settle_type}",
exchange=enum_decode(data["exchange"]),
name=data["name"],
product=enum_decode(data["product"]),
size=int(data["size"]),
pricetick=float(data["pricetick"]),
min_volume=float(data["min_volume"]),
gateway_name=data["gateway_name"]
)
return contract
def parse_log(data: dict) -> LogData:
"""
从api收到的data里解析出LogData
"""
log = LogData(
msg=data["msg"],
level=data["level"],
gateway_name=data["gateway_name"]
)
log.time = parse_datetime(data["time"])
return log
def parse_datetime(s: str) -> datetime:
if "." in s:
dt = datetime.strptime(s, "%Y%m%d %H:%M:%S.%f")
elif len(s) > 0:
dt = datetime.strptime(s, "%Y%m%d %H:%M:%S")
else:
dt = datetime.now()
dt = CHINA_TZ.localize(dt)
return dt
def enum_decode(s: str) -> Optional[Enum]:
"""
Convert string into vn.py constant enum.
"""
if "." in s:
name, member = s.split(".")
return getattr(VN_ENUMS[name], member)
else:
return None
def vn_encode(obj: object) -> str or dict:
"""
Convert vn.py object into json format.
"""
if type(obj) in VN_ENUMS.values():
return str(obj)
else:
s = {}
for (k, v) in obj.__dict__.items():
if type(v) in VN_ENUMS.values():
s[k] = vn_encode(v)
else:
s[k] = str(v)
return s
def generate_datetime(time: str) -> datetime:
""""""
today = datetime.now().strftime("%Y%m%d")
timestamp = f"{today} {time}"
dt = parse_datetime(timestamp)
return dt
|
niftynet/layer/rand_flip.py
|
elias-1/NiftyNet
| 1,403 |
95131
|
<filename>niftynet/layer/rand_flip.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import warnings
import numpy as np
from niftynet.layer.base_layer import RandomisedLayer
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", RuntimeWarning)
class RandomFlipLayer(RandomisedLayer):
"""
Add a random flipping layer as pre-processing.
"""
def __init__(self,
flip_axes,
flip_probability=0.5,
name='random_flip'):
"""
:param flip_axes: a list of indices over which to flip
:param flip_probability: the probability of performing the flip
(default = 0.5)
:param name:
"""
super(RandomFlipLayer, self).__init__(name=name)
self._flip_axes = flip_axes
self._flip_probability = flip_probability
self._rand_flip = None
def randomise(self, spatial_rank=3):
spatial_rank = int(np.floor(spatial_rank))
self._rand_flip = np.random.random(
size=spatial_rank) < self._flip_probability
def _apply_transformation(self, image):
assert self._rand_flip is not None, "Flip is unset -- Error!"
for axis_number, do_flip in enumerate(self._rand_flip):
if axis_number in self._flip_axes and do_flip:
image = np.flip(image, axis=axis_number)
return image
def layer_op(self, inputs, interp_orders=None, *args, **kwargs):
if inputs is None:
return inputs
if isinstance(inputs, dict) and isinstance(interp_orders, dict):
for (field, image_data) in inputs.items():
assert (all([i < 0 for i in interp_orders[field]]) or
all([i >= 0 for i in interp_orders[field]])), \
'Cannot combine interpolatable and non-interpolatable data'
if interp_orders[field][0]<0:
continue
inputs[field] = self._apply_transformation(image_data)
else:
inputs = self._apply_transformation(inputs)
return inputs
|
tests/toranj/test-704-multi-radio-scan.py
|
AdityaHPatwardhan/openthread
| 2,962 |
95140
|
<gh_stars>1000+
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from wpan import verify
import wpan
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Test active scan with nodes supporting different radios
#
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 1
wpan.Node.set_time_speedup_factor(speedup)
n1 = wpan.Node(wpan.NODE_15_4)
n2 = wpan.Node(wpan.NODE_TREL)
n3 = wpan.Node(wpan.NODE_15_4_TREL)
s1 = wpan.Node(wpan.NODE_15_4)
s2 = wpan.Node(wpan.NODE_TREL)
s3 = wpan.Node(wpan.NODE_15_4_TREL)
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
n1.form("n1", channel='20')
n2.form("n2", channel='21')
n3.form("n3", channel='22')
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Scan by scanner nodes (no network)
# Scan by s1 (15.4 only), expect to see n1(15.4) and n3(15.4+trel)
result = wpan.parse_scan_result(s1.active_scan())
verify(n1.is_in_scan_result(result))
verify(not n2.is_in_scan_result(result))
verify(n3.is_in_scan_result(result))
# Scan by s2 (trel only), expect to see n2(trel) and n3(15.4+trel)
result = wpan.parse_scan_result(s2.active_scan())
verify(not n1.is_in_scan_result(result))
verify(n2.is_in_scan_result(result))
verify(n3.is_in_scan_result(result))
# Scan by s3 (trel+15.4), expect to see all nodes
result = wpan.parse_scan_result(s3.active_scan())
verify(n1.is_in_scan_result(result))
verify(n2.is_in_scan_result(result))
verify(n3.is_in_scan_result(result))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Scan by the nodes
# Scan by n1 (15.4 only), expect to see only n3(15.4+trel)
result = wpan.parse_scan_result(n1.active_scan())
verify(not n1.is_in_scan_result(result))
verify(not n2.is_in_scan_result(result))
verify(n3.is_in_scan_result(result))
# Scan by n2 (trel only), expect to see only n3(15.4+trel)
result = wpan.parse_scan_result(n2.active_scan())
verify(not n1.is_in_scan_result(result))
verify(not n2.is_in_scan_result(result))
verify(n3.is_in_scan_result(result))
# Scan by n3 (15.4+trel), expect to see n1(15.4) and n2(trel)
result = wpan.parse_scan_result(n3.active_scan())
verify(n1.is_in_scan_result(result))
verify(n2.is_in_scan_result(result))
verify(not n3.is_in_scan_result(result))
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
test/test_registry.py
|
wangjunyan305/homura
| 102 |
95250
|
<reponame>wangjunyan305/homura<gh_stars>100-1000
import pytest
from homura.register import Registry
def test_registry():
MODEL_REGISTRY = Registry('model')
MODEL_REGISTRY2 = Registry('model')
assert MODEL_REGISTRY is MODEL_REGISTRY2
@MODEL_REGISTRY.register
def something():
return 1
@MODEL_REGISTRY.register
def anything():
return 2
assert MODEL_REGISTRY('something')() == 1
with pytest.raises(KeyError):
@MODEL_REGISTRY.register
def something():
pass
|
tests/io/inputs/test_token_parser.py
|
Ivoz/cleo
| 859 |
95309
|
import pytest
from cleo.io.inputs.token_parser import TokenParser
@pytest.mark.parametrize(
"string, tokens",
[
("", []),
("foo", ["foo"]),
(" foo bar ", ["foo", "bar"]),
('"quoted"', ["quoted"]),
("'quoted'", ["quoted"]),
("'a\rb\nc\td'", ["a\rb\nc\td"]),
("'a'\r'b'\n'c'\t'd'", ["a", "b", "c", "d"]),
("\"quoted 'twice'\"", ["quoted 'twice'"]),
("'quoted \"twice\"'", ['quoted "twice"']),
("\\'escaped\\'", ["'escaped'"]),
('\\"escaped\\"', ['"escaped"']),
("\\'escaped more\\'", ["'escaped", "more'"]),
('\\"escaped more\\"', ['"escaped', 'more"']),
("-a", ["-a"]),
("-azc", ["-azc"]),
("-awithavalue", ["-awithavalue"]),
('-a"foo bar"', ["-afoo bar"]),
('-a"foo bar""foo bar"', ["-afoo barfoo bar"]),
("-a'foo bar'", ["-afoo bar"]),
("-a'foo bar''foo bar'", ["-afoo barfoo bar"]),
("-a'foo bar'\"foo bar\"", ["-afoo barfoo bar"]),
("--long-option", ["--long-option"]),
("--long-option=foo", ["--long-option=foo"]),
('--long-option="foo bar"', ["--long-option=foo bar"]),
('--long-option="foo bar""another"', ["--long-option=foo baranother"]),
("--long-option='foo bar'", ["--long-option=foo bar"]),
("--long-option='foo bar''another'", ["--long-option=foo baranother"]),
("--long-option='foo bar'\"another\"", ["--long-option=foo baranother"]),
("foo -a -ffoo --long bar", ["foo", "-a", "-ffoo", "--long", "bar"]),
("\\' \\\"", ["'", '"']),
],
)
def test_create(string, tokens):
assert TokenParser().parse(string) == tokens
|
test/modules/losses/test_commitment.py
|
facebookresearch/multimodal
| 128 |
95312
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from test.test_utils import assert_expected
from torchmultimodal.modules.losses.vqvae import CommitmentLoss
class TestCommitment(unittest.TestCase):
"""
Test the Commitment Loss
"""
def setUp(self):
self.quantized = torch.Tensor([[-1, 0, 1], [2, 1, 0]])
self.encoded = torch.Tensor([[-2, -1, 0], [0, 2, -2]])
self.commitment = CommitmentLoss()
def test_loss_value(self):
loss = self.commitment(self.quantized, self.encoded)
actual = loss.item()
expected = 2.0
assert_expected(actual, expected)
|
silver/views.py
|
DocTocToc/silver
| 222 |
95320
|
<gh_stars>100-1000
# Copyright (c) 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import operator
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
from six.moves import reduce
from furl import furl
from dal import autocomplete
from django.contrib.auth.decorators import login_required
from django.db.models.functions import Concat
from django.db.models import Q, F, Value
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from silver.models.plans import Plan
from silver.models.billing_entities import Customer, Provider
from silver.models.payment_methods import PaymentMethod
from silver.models.transactions import Transaction
from silver.models.documents import Proforma, Invoice
from silver.models.transactions.codes import FAIL_CODES
from silver.payment_processors import get_instance
from silver.utils.decorators import get_transaction_from_token
@login_required
def proforma_pdf(request, proforma_id):
proforma = get_object_or_404(Proforma, id=proforma_id)
return HttpResponseRedirect(proforma.pdf.url)
@login_required
def invoice_pdf(request, invoice_id):
invoice = get_object_or_404(Invoice, id=invoice_id)
return HttpResponseRedirect(invoice.pdf.url)
@csrf_exempt
@get_transaction_from_token
def complete_payment_view(request, transaction, expired=None):
if transaction.state == transaction.States.Initial:
payment_processor = get_instance(transaction.payment_processor)
payment_processor.handle_transaction_response(transaction, request)
if 'return_url' in request.GET:
redirect_url = six.moves.urllib.parse.unquote(
furl(request.GET['return_url']).add(
{
'transaction_uuid': transaction.uuid
}
).url
)
return HttpResponseRedirect(redirect_url)
else:
return render(request, 'transactions/complete_payment.html',
{
'transaction': transaction,
'document': transaction.document,
'fail_data': FAIL_CODES.get(transaction.fail_code),
})
@csrf_exempt
@get_transaction_from_token
def pay_transaction_view(request, transaction, expired=None):
if expired:
return render(request, 'transactions/expired_payment.html',
{
'document': transaction.document,
})
if transaction.state != Transaction.States.Initial:
return render(request, 'transactions/complete_payment.html',
{
'transaction': transaction,
'document': transaction.document,
'fail_data': FAIL_CODES.get(transaction.fail_code)
})
payment_processor = transaction.payment_method.get_payment_processor()
view = payment_processor.get_view(transaction, request)
if not view or not transaction.can_be_consumed:
return render(request, 'transactions/expired_payment.html',
{
'document': transaction.document,
})
transaction.last_access = timezone.now()
transaction.save()
try:
return view(request)
except NotImplementedError:
raise Http404
class DocumentAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not (self.request.user.is_authenticated and self.request.user.is_staff):
raise Http404
queryset = self.model.objects.all()
if self.q:
q = self.q.rsplit('-')
if len(q) == 2:
query = (Q(series=q[0]) | Q(number=q[1]))
else:
query = (Q(series__istartswith=self.q) |
Q(number__istartswith=self.q) |
Q(customer__first_name__icontains=self.q) |
Q(customer__last_name__icontains=self.q) |
Q(customer__company__icontains=self.q))
queryset = queryset.filter(query)
return queryset
class InvoiceAutocomplete(DocumentAutocomplete):
def __init__(self, **kwargs):
self.model = Invoice
super(InvoiceAutocomplete, self).__init__(**kwargs)
class ProformaAutocomplete(DocumentAutocomplete):
def __init__(self, **kwargs):
self.model = Proforma
super(ProformaAutocomplete, self).__init__(**kwargs)
class PlanAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not (self.request.user.is_authenticated and self.request.user.is_staff):
raise Http404
queryset = Plan.objects.exclude(enabled=False)
if self.q:
queryset = queryset.annotate(
name_provider__name__company=Concat(
F("name"), Value(" "), F("provider__name"), Value(" "), F("provider__company")
)
)
terms = self.q.split()
query = reduce(
operator.and_,
(Q(name_provider__name__company__icontains=term) for term in terms)
)
queryset = queryset.filter(query)
return queryset
class CustomerAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not (self.request.user.is_authenticated and self.request.user.is_staff):
raise Http404
queryset = Customer.objects.all()
if self.q:
queryset = queryset.annotate(
first_last_company_name=Concat(
F("first_name"), Value(" "), F("last_name"), Value(" "), F("company")
)
)
terms = self.q.split()
query = reduce(
operator.and_,
(Q(first_last_company_name__icontains=term) for term in terms)
)
queryset = queryset.filter(query)
return queryset
class ProviderAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not (self.request.user.is_authenticated and self.request.user.is_staff):
raise Http404
queryset = Provider.objects.all()
if self.q:
queryset = queryset.annotate(
name_company=Concat(
F("name"), Value(" "), F("company")
)
)
terms = self.q.split()
query = reduce(
operator.and_,
(Q(name_company__icontains=term) for term in terms)
)
queryset = queryset.filter(query)
return queryset
class PaymentMethodAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not (self.request.user.is_authenticated and self.request.user.is_staff):
raise Http404
queryset = PaymentMethod.objects.exclude(canceled=True)
if self.q:
query = (Q(customer__first_name__istartswith=self.q) |
Q(customer__last_name__istartswith=self.q) |
Q(payment_processor__istartswith=self.q) |
Q(display_info__istartswith=self.q))
queryset = queryset.filter(query)
return queryset
|
src/shared/constants.py
|
CaliberAI/neutralizing-bias
| 169 |
95331
|
<filename>src/shared/constants.py
"""
just a lil global variable so everybody knows whether
we have a gpu or not
"""
import torch
CUDA = (torch.cuda.device_count() > 0)
|
support/check_python_prereqs.py
|
kishorerv93/spinnaker-terraform
| 116 |
95365
|
#!/usr/bin/env python
import sys
import imp
import re
def main(argv):
cloud_provider = sys.argv[1]
reqd_module_names_and_versions = {}
reqd_module_names_and_versions['requests'] = '2.2.1'
reqd_module_names_and_versions['json'] = '2.0.9'
reqd_module_names_and_versions['docopt'] = '0.6.2'
if cloud_provider.lower() == 'aws':
reqd_module_names_and_versions['boto'] = '2.38.0'
#elif cloud_provider.lower() == 'gcp':
# reqd_module_names_and_versions['libcloud'] = '0.20.0'
ret_val = 0
for module_name in reqd_module_names_and_versions:
try:
__import__(module_name)
installed_version = str(__import__(module_name).__version__)
installed_version_formatted = int(re.sub(
"\.", '', str(__import__(module_name).__version__)))
reqd_version = int(re.sub(
"\.", '', reqd_module_names_and_versions[module_name]))
if installed_version_formatted < reqd_version:
print "ERROR: Module " + module_name + " is not of high enough version. You need: v" + reqd_module_names_and_versions[module_name] + ", you have: " + installed_version
ret_val = 1
except ImportError:
print "ERROR: Could not import required python module '" + module_name + "'. Please install it with pip."
ret_val = 1
sys.exit(ret_val)
if __name__ == "__main__":
main(sys.argv)
|
MicropolisCore/src/pyMicropolis/micropolisEngine/micropolisdrawingarea.py
|
mura/micropolis
| 775 |
95379
|
<reponame>mura/micropolis<gh_stars>100-1000
# micropolisdrawingarea.py
#
# Micropolis, Unix Version. This game was released for the Unix platform
# in or about 1990 and has been modified for inclusion in the One Laptop
# Per Child program. Copyright (C) 1989 - 2007 Electronic Arts Inc. If
# you need assistance with this program, you may contact:
# http://wiki.laptop.org/go/Micropolis or email <EMAIL>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details. You should have received a
# copy of the GNU General Public License along with this program. If
# not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS per GNU GPL Section 7
#
# No trademark or publicity rights are granted. This license does NOT
# give you any right, title or interest in the trademark SimCity or any
# other Electronic Arts trademark. You may not distribute any
# modification of this program using the trademark SimCity or claim any
# affliation or association with Electronic Arts Inc. or its employees.
#
# Any propagation or conveyance of this program must include this
# copyright notice and these terms.
#
# If you convey this program (or any modifications of it) and assume
# contractual liability for the program to recipients of it, you agree
# to indemnify Electronic Arts for any liability that those contractual
# assumptions impose on Electronic Arts.
#
# You may not misrepresent the origins of this program; modified
# versions of the program must be marked as such and not identified as
# the original program.
#
# This disclaimer supplements the one included in the General Public
# License. TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, THIS
# PROGRAM IS PROVIDED TO YOU "AS IS," WITH ALL FAULTS, WITHOUT WARRANTY
# OF ANY KIND, AND YOUR USE IS AT YOUR SOLE RISK. THE ENTIRE RISK OF
# SATISFACTORY QUALITY AND PERFORMANCE RESIDES WITH YOU. ELECTRONIC ARTS
# DISCLAIMS ANY AND ALL EXPRESS, IMPLIED OR STATUTORY WARRANTIES,
# INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY, SATISFACTORY QUALITY,
# FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT OF THIRD PARTY
# RIGHTS, AND WARRANTIES (IF ANY) ARISING FROM A COURSE OF DEALING,
# USAGE, OR TRADE PRACTICE. ELECTRONIC ARTS DOES NOT WARRANT AGAINST
# INTERFERENCE WITH YOUR ENJOYMENT OF THE PROGRAM; THAT THE PROGRAM WILL
# MEET YOUR REQUIREMENTS; THAT OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR-FREE, OR THAT THE PROGRAM WILL BE COMPATIBLE
# WITH THIRD PARTY SOFTWARE OR THAT ANY ERRORS IN THE PROGRAM WILL BE
# CORRECTED. NO ORAL OR WRITTEN ADVICE PROVIDED BY ELECTRONIC ARTS OR
# ANY AUTHORIZED REPRESENTATIVE SHALL CREATE A WARRANTY. SOME
# JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF OR LIMITATIONS ON IMPLIED
# WARRANTIES OR THE LIMITATIONS ON THE APPLICABLE STATUTORY RIGHTS OF A
# CONSUMER, SO SOME OR ALL OF THE ABOVE EXCLUSIONS AND LIMITATIONS MAY
# NOT APPLY TO YOU.
########################################################################
# Micropolis Drawing Area
# <NAME>
########################################################################
# Import stuff
import sys
import os
import time
import gtk
import gobject
import cairo
import pango
import math
import thread
import random
import array
########################################################################
# Import our modules
import micropolisengine
import micropolispiemenus
from pyMicropolis.tileEngine import tileengine, tiledrawingarea
import micropolistool
########################################################################
# Globals
# @todo This should go through some kind of a resource manager.
Sprites = [
{
'id': 1,
'name': 'train',
'frames': 5,
},
{
'id': 2,
'name': 'helicopter',
'frames': 8,
},
{
'id': 3,
'name': 'airplane',
'frames': 11,
},
{
'id': 4,
'name': 'boat',
'frames': 8,
},
{
'id': 5,
'name': 'monster',
'frames': 16,
},
{
'id': 6,
'name': 'tornado',
'frames': 3,
},
{
'id': 7,
'name': 'explosion',
'frames': 6,
},
{
'id': 8,
'name': 'bus',
'frames': 4,
},
]
for spriteData in Sprites:
images = []
spriteData['images'] = images
for i in range(0, spriteData['frames']):
fileName = 'images/micropolisEngine/obj%d-%d.png' % (
spriteData['id'],
i,
)
fileName = os.path.join(os.path.dirname(__file__), "../.." , fileName)
fileName = os.path.abspath(fileName)
image = cairo.ImageSurface.create_from_png(fileName)
images.append(image)
########################################################################
# Utilities
def PRINT(*args):
print args
########################################################################
class MicropolisDrawingArea(tiledrawingarea.TileDrawingArea):
def __init__(
self,
engine=None,
interests=('city', 'tick'),
sprite=micropolisengine.SPRITE_NOTUSED,
showData=True,
showRobots=True,
showSprites=True,
showChalk=True,
mapStyle='all',
overlayAlpha=0.5,
engaged=True,
**args):
args['tileCount'] = micropolisengine.TILE_COUNT
args['sourceTileSize'] = micropolisengine.BITS_PER_TILE
args['worldCols'] = micropolisengine.WORLD_W
args['worldRows'] = micropolisengine.WORLD_H
self.engine = engine
self.showData = showData
self.showRobots = showRobots
self.showSprites = showSprites
self.showChalk = showChalk
self.mapStyle = mapStyle
self.overlayAlpha = overlayAlpha
self.engaged = engaged
tiledrawingarea.TileDrawingArea.__init__(self, **args)
self.sprite = sprite
engine.expressInterest(
self,
interests)
engine.addView(self)
self.blinkFlag = True
self.reset()
def update(self, name, *args):
#print "MicropolisDrawingArea update", self, name, args
self.queue_draw()
def makeTileMap(self):
tiledrawingarea.TileDrawingArea.makeTileMap(self)
if False:
# Remap some of the tiles so we can see them for debugging.
self.tileMap[micropolisengine.REDGE] = micropolisengine.FIRE
self.tileMap[micropolisengine.CHANNEL] = micropolisengine.RADTILE
def reset(self):
self.selectToolByName('Bulldozer')
def configTileEngine(self, tengine):
engine = self.engine
buffer = engine.getMapBuffer()
#print "Map buffer", buffer
tengine.setBuffer(buffer)
tengine.width = micropolisengine.WORLD_W
tengine.height = micropolisengine.WORLD_H
from micropolisengine import ZONEBIT, PWRBIT, ALLBITS, LIGHTNINGBOLT
def tileFunction(col, row, tile):
if (tile & ZONEBIT) and not (tile & PWRBIT) and random.random() < 0.5:
tile = LIGHTNINGBOLT | (tile & ALLBITS)
return tile
self.tileFunction = tileFunction
# Unsigned short tile values, in column major order.
tengine.tileFormat = tileengine.TILE_FORMAT_SHORT_UNSIGNED
tengine.colBytes = micropolisengine.BYTES_PER_TILE * micropolisengine.WORLD_H
tengine.rowBytes = micropolisengine.BYTES_PER_TILE
tengine.tileMask = micropolisengine.LOMASK
def getCell(self, col, row):
return self.engine.getTile(col, row)
def beforeDraw(
self):
engine = self.engine
self.blinkFlag = (engine.tickCount() % 60) < 30
def drawOverlays(
self,
ctx):
if self.showData:
self.drawData(ctx)
if self.showRobots:
self.drawRobots(ctx)
if self.showSprites:
self.drawSprites(ctx)
if self.showChalk:
self.drawChalk(ctx)
if self.showCursor:
tool = self.getActiveTool()
if tool:
tool.drawCursor(self, ctx)
def setMapStyle(self, mapStyle):
self.mapStyle = mapStyle
def drawData(self, ctx):
mapStyle = self.mapStyle
engine = self.engine
dataImage, dataAlpha, width, height = \
engine.getDataImageAlphaSize(mapStyle)
if not dataImage:
return
width = 1.0 / width
height = 1.0 / height
ctx.save()
tileSize = self.tileSize
ctx.translate(self.panX, self.panY)
ctx.scale(
self.worldCols * tileSize,
self.worldRows * tileSize)
ctx.rectangle(0, 0, 1, 1)
ctx.clip()
imageWidth = dataImage.get_width()
imageHeight = dataImage.get_height()
ctx.scale(
width / imageWidth,
height / imageHeight)
ctx.set_source_surface(
dataImage,
0,
0)
ctx.paint_with_alpha(dataAlpha)
ctx.restore()
def drawSprites(self, ctx):
engine = self.engine
sprite = engine.spriteList
while True:
if not sprite:
break
self.drawSprite(ctx, sprite)
sprite = sprite.next
def drawSprite(self, ctx, sprite):
spriteType = sprite.type
spriteFrame = sprite.frame
if (spriteFrame == 0 or
spriteType == micropolisengine.SPRITE_NOTUSED or
spriteType >= micropolisengine.SPRITE_COUNT):
return
ctx.save()
x = sprite.x
y = sprite.y
width = sprite.width
height = sprite.height
tileSize = self.tileSize
ctx.translate(self.panX, self.panY)
ctx.scale(tileSize / 16.0, tileSize / 16.0)
ctx.translate(x + sprite.xOffset, y + sprite.yOffset)
image = Sprites[spriteType - 1]['images'][spriteFrame - 1]
ctx.set_source_surface(
image,
0,
0)
#ctx.rectangle(0, 0, 1, 1)
ctx.paint()
ctx.restore()
def drawRobots(self, ctx):
engine = self.engine
robots = engine.robots
if not robots:
return
ctx.save()
tileSize = self.tileSize
ctx.translate(self.panX, self.panY)
ctx.scale(tileSize / 16.0, tileSize / 16.0)
for robot in robots:
robot.draw(ctx)
ctx.restore()
def drawChalk(self, ctx):
pass # TODO: drawChalk
def tickEngine(self):
# Don't do anything! The engine ticks itself.
return
def makePie(self):
pie = micropolispiemenus.MakePie(lambda toolName: self.selectToolByName(toolName))
self.pie = pie
def handleButtonPress(
self,
widget,
event):
self.handlePieButtonPress(
widget,
event)
def handleKey(
self,
key):
if key == 'm':
self.engine.heatSteps = 1
self.engine.heatRule = 0
return True
elif key == 'n':
self.engine.heatSteps = 1
self.engine.heatRule = 1
return True
elif key == 'o':
self.engine.heatSteps = 0
return True
return False
def engage(self):
self.engaged = True
def disengage(self):
self.engaged = False
########################################################################
class EditableMicropolisDrawingArea(MicropolisDrawingArea):
pass
########################################################################
class NoticeMicropolisDrawingArea(MicropolisDrawingArea):
def __init__(
self,
follow=None,
centerOnTileHandler=None,
**args):
args['keyable'] = False
args['clickable'] = False
args['zoomable'] = False
args['pannable'] = False
args['menuable'] = False
args['showCursor'] = False
args['scale'] = 2
MicropolisDrawingArea.__init__(self, **args)
self.follow = follow
self.centerOnTileHandler = centerOnTileHandler
def handleMouseHover(
self,
event):
pass
def handleButtonPress(
self,
widget,
event):
centerOnTileHandler = self.centerOnTileHandler
if centerOnTileHandler:
centerX, centerY = self.getCenterTile()
centerOnTileHandler(centerX, centerY)
def handleMouseDrag(
self,
event):
pass
def handleButtonRelease(
self,
widget,
event):
pass
def handleMouseScroll(
self,
widget,
event):
pass
def beforeDraw(
self):
MicropolisDrawingArea.beforeDraw(self)
engine = self.engine
self.blinkFlag = (engine.tickCount() % 30) < 15
sprite = self.sprite
if sprite != micropolisengine.SPRITE_NOTUSED:
s = engine.getSprite(sprite)
if s:
fudge = 8
x = ((s.x + s.xHot + fudge) / 16.0)
y = ((s.y + s.yHot + fudge) / 16.0)
self.centerOnTile(x, y)
########################################################################
class NavigationMicropolisDrawingArea(MicropolisDrawingArea):
def __init__(
self,
**args):
args['keyable'] = False
args['clickable'] = False
args['zoomable'] = False
args['pannable'] = False
args['menuable'] = False
args['showCursor'] = False
args['showRobots'] = False
args['showSprites'] = False
args['scale'] = 1.0 / micropolisengine.EDITOR_TILE_SIZE
args['overlayAlpha'] = 0.8
MicropolisDrawingArea.__init__(self, **args)
self.currentView = None
self.panning = False
self.panningView = None
self.panningStartCursorX = 0
self.panningStartCursorY = 0
self.panningStartPanX = 0
self.panningStartPanY = 0
def drawOverlays(self, ctx):
MicropolisDrawingArea.drawOverlays(self, ctx)
self.drawOtherViews(ctx)
def getViewBox(self, view):
viewRect = view.get_allocation()
viewWidth = viewRect.width
viewHeight = viewRect.height
tileSize = self.tileSize
# @todo Validate the view.tileSize before using it. View might not be drawn yet, and we get the wrong size.
viewTileSize = view.tileSize
viewScale = float(tileSize) / float(viewTileSize)
x = self.panX - (view.panX * viewScale)
y = self.panY - (view.panY * viewScale)
width = viewWidth * viewScale
height = viewHeight * viewScale
#print "GETVIEWBOX", "view", view, "pan", view.panX, view.panY, "tileSize", view.tileSize, "pos", x, y, "size", width, height
return x, y, width, height
def drawOtherViews(self, ctx):
if self.panning:
currentView = self.panningView
else:
currentView = self.currentView
views = self.engine.views
#print "drawOtherViews", views
for view in views:
if not view.pannable:
continue
x, y, width, height = self.getViewBox(view)
if view == currentView:
pad = 4
ctx.rectangle(
x - pad,
y - pad,
width + (pad * 2),
height + (pad * 2))
ctx.set_line_width(
pad * 2)
ctx.set_source_rgb(
0.0,
0.0,
1.0)
ctx.stroke_preserve()
ctx.set_line_width(
pad)
ctx.set_source_rgb(
1.0,
1.0,
0.0)
ctx.stroke()
else:
pad = 2
ctx.rectangle(
x - pad,
y - pad,
width + (pad * 2),
height + (pad * 2))
ctx.set_line_width(
pad * 2)
ctx.set_source_rgb(
1.0,
1.0,
1.0)
ctx.stroke_preserve()
ctx.set_line_width(
pad)
ctx.set_source_rgb(
0.0,
0.0,
0.0)
ctx.stroke()
def getCursorPosition(
self,
event):
if not event:
x, y, state = self.window.get_pointer()
elif (hasattr(event, 'is_hint') and
event.is_hint):
x, y, state = event.window.get_pointer()
else:
x = event.x
y = event.y
state = event.state
return x, y
def handleMouseHover(
self,
event):
x, y = self.getCursorPosition(event)
views = self.engine.views
found = []
for view in views:
if not view.pannable:
continue
viewX, viewY, viewWidth, viewHeight = self.getViewBox(view)
if ((x >= viewX) and
(x < (viewX + viewWidth)) and
(y >= viewY) and
(y < (viewY + viewHeight))):
found.append(view)
if found:
self.currentView = found[-1]
else:
self.currentView = None
def handleButtonPress(
self,
widget,
event):
if not self.currentView:
self.panning = False
self.down = False
return
x, y = self.getCursorPosition(event)
view = self.currentView
self.down = True
self.panning = True
self.panningView = view
self.panningStartCursorX = x
self.panningStartCursorY = y
self.panningStartPanX = view.panX
self.panningStartPanY = view.panY
def handleMouseDrag(
self,
event):
if not self.panning:
return
x, y = self.getCursorPosition(event)
view = self.panningView
dx = self.panningStartCursorX - x
dy = self.panningStartCursorY - y
scale = view.tileSize / self.tileSize
dx *= scale
dy *= scale
view.panX = self.panningStartPanX + dx
view.panY = self.panningStartPanY + dy
view.updateView()
def handleButtonRelease(
self,
widget,
event):
if not self.panning:
return
self.handleMouseDrag(
event)
self.down = False
self.panning = False
self.panningView = None
def handleMouseScroll(
self,
widget,
event):
view = self.currentView
if ((not view) and
(not view.zoomable)):
pass
direction = event.direction
if direction == gtk.gdk.SCROLL_UP:
view.changeScale(view.scale * view.scrollWheelZoomScale)
elif direction == gtk.gdk.SCROLL_DOWN:
view.changeScale(view.scale / view.scrollWheelZoomScale)
########################################################################
class PreviewMicropolisDrawingArea(MicropolisDrawingArea):
def __init__(
self,
**args):
args['keyable'] = False
args['clickable'] = True
args['zoomable'] = False
args['pannable'] = False
args['menuable'] = False
args['showCursor'] = False
args['showRobots'] = False
args['showSprites'] = False
args['scale'] = 3.0 / micropolisengine.EDITOR_TILE_SIZE
args['overlayAlpha'] = 0.8
MicropolisDrawingArea.__init__(self, **args)
def handleMouseHover(
self,
event):
pass
def handleButtonPress(
self,
widget,
event):
pass
def handleMouseDrag(
self,
event):
pass
def handleButtonRelease(
self,
widget,
event):
pass
def handleMouseScroll(
self,
widget,
event):
pass
########################################################################
|
sdk/webpubsub/azure-messaging-webpubsubservice/azure/messaging/webpubsubservice/__init__.py
|
rsdoherty/azure-sdk-for-python
| 207 |
95423
|
<reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
__all__ = ["build_authentication_token", "WebPubSubServiceClient"]
from copy import deepcopy
from datetime import datetime, timedelta
from typing import TYPE_CHECKING
import jwt
import six
import azure.core.credentials as corecredentials
import azure.core.pipeline as corepipeline
import azure.core.pipeline.policies as corepolicies
import azure.core.pipeline.transport as coretransport
# Temporary location for types that eventually graduate to Azure Core
from .core import rest as corerest
from ._version import VERSION as _VERSION
from ._policies import JwtCredentialPolicy
from ._utils import UTC as _UTC
if TYPE_CHECKING:
from azure.core.pipeline.policies import HTTPPolicy, SansIOHTTPPolicy
from typing import Any, List, cast, Type, TypeVar
ClientType = TypeVar("ClientType", bound="WebPubSubServiceClient")
def _parse_connection_string(connection_string, **kwargs):
for segment in connection_string.split(";"):
if "=" in segment:
key, value = segment.split("=", maxsplit=1)
key = key.lower()
if key not in ("version", ):
kwargs.setdefault(key, value)
elif segment:
raise ValueError(
"Malformed connection string - expected 'key=value', found segment '{}' in '{}'".format(
segment, connection_string
)
)
if "endpoint" not in kwargs:
raise ValueError("connection_string missing 'endpoint' field")
if "accesskey" not in kwargs:
raise ValueError("connection_string missing 'accesskey' field")
return kwargs
def build_authentication_token(endpoint, hub, **kwargs):
"""Build an authentication token for the given endpoint, hub using the provided key.
:keyword endpoint: connetion string or HTTP or HTTPS endpoint for the WebPubSub service instance.
:type endpoint: ~str
:keyword hub: The hub to give access to.
:type hub: ~str
:keyword accesskey: Key to sign the token with. Required if endpoint is not a connection string
:type accesskey: ~str
:keyword ttl: Optional ttl timedelta for the token. Default is 1 hour.
:type ttl: ~datetime.timedelta
:keyword user: Optional user name (subject) for the token. Default is no user.
:type user: ~str
:keyword roles: Roles for the token.
:type roles: typing.List[str]. Default is no roles.
:returns: ~dict containing the web socket endpoint, the token and a url with the generated access token.
:rtype: ~dict
Example:
>>> build_authentication_token(endpoint='https://contoso.com/api/webpubsub', hub='theHub', key='123')
{
'baseUrl': 'wss://contoso.com/api/webpubsub/client/hubs/theHub',
'token': '<KEY>...',
'url': 'wss://contoso.com/api/webpubsub/client/hubs/theHub?access_token=<KEY>...'
}
"""
if 'accesskey' not in kwargs:
kwargs = _parse_connection_string(endpoint, **kwargs)
endpoint = kwargs.pop('endpoint')
user = kwargs.pop("user", None)
key = kwargs.pop("accesskey")
ttl = kwargs.pop("ttl", timedelta(hours=1))
roles = kwargs.pop("roles", [])
endpoint = endpoint.lower()
if not endpoint.startswith("http://") and not endpoint.startswith("https://"):
raise ValueError(
"Invalid endpoint: '{}' has unknown scheme - expected 'http://' or 'https://'".format(
endpoint
)
)
# Ensure endpoint has no trailing slash
endpoint = endpoint.rstrip("/")
# Switch from http(s) to ws(s) scheme
client_endpoint = "ws" + endpoint[4:]
client_url = "{}/client/hubs/{}".format(client_endpoint, hub)
audience = "{}/client/hubs/{}".format(endpoint, hub)
payload = {
"aud": audience,
"iat": datetime.now(tz=_UTC),
"exp": datetime.now(tz=_UTC) + ttl,
}
if user:
payload["sub"] = user
if roles:
payload["role"] = roles
token = six.ensure_str(jwt.encode(payload, key, algorithm="HS256"))
return {
"baseUrl": client_url,
"token": token,
"url": "{}?access_token={}".format(client_url, token),
}
class WebPubSubServiceClient(object):
def __init__(self, endpoint, credential, **kwargs):
# type: (str, corecredentials.AzureKeyCredential, Any) -> None
"""Create a new WebPubSubServiceClient instance
:param endpoint: Endpoint to connect to.
:type endpoint: ~str
:param credential: Credentials to use to connect to endpoint.
:type credential: ~azure.core.credentials.AzureKeyCredential
:keyword api_version: Api version to use when communicating with the service.
:type api_version: str
:keyword user: User to connect as. Optional.
:type user: ~str
"""
self.endpoint = endpoint.rstrip("/")
transport = kwargs.pop("transport", None) or coretransport.RequestsTransport(
**kwargs
)
kwargs.setdefault(
"sdk_moniker", "messaging-webpubsubservice/{}".format(_VERSION)
)
policies = [
corepolicies.HeadersPolicy(**kwargs),
corepolicies.UserAgentPolicy(**kwargs),
corepolicies.RetryPolicy(**kwargs),
corepolicies.ProxyPolicy(**kwargs),
corepolicies.CustomHookPolicy(**kwargs),
corepolicies.RedirectPolicy(**kwargs),
JwtCredentialPolicy(credential, kwargs.get("user", None)),
corepolicies.NetworkTraceLoggingPolicy(**kwargs),
] # type: Any
self._pipeline = corepipeline.Pipeline(
transport,
policies,
) # type: corepipeline.Pipeline
@classmethod
def from_connection_string(cls, connection_string, **kwargs):
# type: (Type[ClientType], str, Any) -> ClientType
"""Create a new WebPubSubServiceClient from a connection string.
:param connection_string: Connection string
:type connection_string: ~str
:rtype: WebPubSubServiceClient
"""
kwargs = _parse_connection_string(connection_string, **kwargs)
kwargs["credential"] = corecredentials.AzureKeyCredential(
kwargs.pop("accesskey")
)
return cls(**kwargs)
def __repr__(self):
return "<WebPubSubServiceClient> endpoint:'{}'".format(self.endpoint)
def _format_url(self, url):
# type: (str) -> str
assert self.endpoint[-1] != "/", "My endpoint should not have a trailing slash"
return "/".join([self.endpoint, url.lstrip("/")])
def send_request(self, http_request, **kwargs):
# type: (corerest.HttpRequest, Any) -> corerest.HttpResponse
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.messaging.webpubsub.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.messaging.webpubsub.rest import build_healthapi_get_health_status_request
>>> request = build_healthapi_get_health_status_request(api_version)
<HttpRequest [HEAD], url: '/api/health'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/llcwiki
For advanced cases, you can also create your own :class:`~azure.messaging.webpubsub.core.rest.HttpRequest`
and pass it in.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.messaging.webpubsub.core.rest.HttpRequest
:keyword bool stream_response: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.messaging.webpubsub.core.rest.HttpResponse
"""
request_copy = deepcopy(http_request)
request_copy.url = self._format_url(request_copy.url)
# can't do StreamCOntextManager yet. This client doesn't have a pipeline client,
# StreamContextManager requires a pipeline client. WIll look more into it
# if kwargs.pop("stream_response", False):
# return corerest._StreamContextManager(
# client=self._client,
# request=request_copy,
# )
pipeline_response = self._pipeline.run(request_copy._internal_request, **kwargs) # pylint: disable=protected-access
response = corerest.HttpResponse(
status_code=pipeline_response.http_response.status_code,
request=request_copy,
_internal_response=pipeline_response.http_response,
)
response.read()
return response
|
bin/api_connector_splunk/jsl/fields/util.py
|
CyberGRX/api-connector-splunk
| 237 |
95425
|
# coding: utf-8
import re
import sre_constants
from ..roles import Resolvable
def validate_regex(regex):
"""
:param str regex: A regular expression to validate.
:raises: ValueError
"""
try:
re.compile(regex)
except sre_constants.error as e:
raise ValueError('Invalid regular expression: {0}'.format(e))
def validate(value_or_var, validator):
if isinstance(value_or_var, Resolvable):
for value in value_or_var.iter_possible_values():
validator(value)
else:
validator(value_or_var)
|
Chapter10/c10_13_fig.py
|
John-ye666/Python-for-Finance-Second-Edition
| 236 |
95447
|
<reponame>John-ye666/Python-for-Finance-Second-Edition
# -*- coding: utf-8 -*-
"""
Name : c10_13_fig.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
from scipy import exp,sqrt,stats,arange,ones
from matplotlib import pyplot as plt
import numpy as np
z=0.325
def f(x):
return stats.norm.cdf(x)
x = arange(-3,3,0.1)
y1=f(x)
y2=ones(len(x))*0.5
x3=[0,0]
y3=[0,1]
plt.plot(x,y1)
plt.plot(x, y2, 'b-')
plt.plot(x3,y3)
plt.annotate('f(z)=f('+str(z)+') is '+str(np.round(f(z),4)),xy=(z,f(z)), xytext=(z-3,f(z)), arrowprops=dict(facecolor='red',shrink=0.01))
plt.annotate('z is '+str(z),xy=(z,0),xytext=(1.5,0.3), arrowprops=dict(facecolor='blue',shrink=0.01))
plt.show()
|
gui_cvs/event.py
|
Misaka17032/AidLearning-FrameWork
| 5,245 |
95460
|
<filename>gui_cvs/event.py<gh_stars>1000+
from cvs import *
class MyApp(App):
def __init__(self, *args):
super(MyApp, self).__init__(*args)
def main(self):
container = gui.VBox(width=120, height=100)
self.lbl = gui.Label('Hello world!')
self.bt = gui.Button('Hello name!')
self.bt2 = gui.Button('Hello name surname!')
# setting the listener for the onclick event of the buttons
self.bt.onclick.do(self.on_button_pressed, "Name")
self.bt2.onclick.do(self.on_button_pressed, "Name", "Surname")
# appending a widget to another
container.append(self.lbl)
container.append(self.bt)
container.append(self.bt2)
# returning the root widget
return container
# listener function
def on_button_pressed(self, widget, name='', surname=''):
self.lbl.set_text('Button pressed!')
widget.set_text('Hello ' + name + ' ' + surname)
# starts the gui
initcv(cvs.openwin)
startcv(MyApp)
|
align/pdk/finfet/transistor_array.py
|
pretl/ALIGN-public
| 119 |
95517
|
<reponame>pretl/ALIGN-public
import math
from itertools import cycle, islice
from align.cell_fabric import transformation
from align.schema.transistor import Transistor, TransistorArray
from . import CanvasPDK, MOS
import logging
logger = logging.getLogger(__name__)
logger_func = logger.debug
class MOSGenerator(CanvasPDK):
def __init__(self, *args, **kwargs):
super().__init__()
self.instantiated_cells = []
def addNMOSArray(self, x_cells, y_cells, pattern, vt_type, ports, **parameters):
self.mos_array_temporary_wrapper(x_cells, y_cells, pattern, vt_type, ports, **parameters)
def addPMOSArray(self, x_cells, y_cells, pattern, vt_type, ports, **parameters):
self.mos_array_temporary_wrapper(x_cells, y_cells, pattern, vt_type, ports, **parameters)
def mos_array_temporary_wrapper(self, x_cells, y_cells, pattern, vt_type, ports, **parameters):
logger_func(f'x_cells={x_cells}, y_cells={y_cells}, pattern={pattern}, ports={ports}, parameters={parameters}')
#################################################################################################
# TODO: All of below goes away when TransistorArray is passed to mos_array as shown below
for key in ['M', 'real_inst_type']:
assert key in parameters, f'Missing transistor parameter {key}'
assert 'NF' or 'STACK' in parameters, f'Missing transistor parameter nf or stack'
if 'STACK' in parameters and int(parameters['STACK']) > 1:
nf = 'STACK'
device_type = 'stack'
elif 'NF' in parameters and int(parameters['NF']) > 1:
nf = 'NF'
device_type = 'parallel'
else:
nf = device_type = None
assert False, f'Either nf>1 or stack>1 parameter should be defined {parameters}'
if 'W' in parameters:
nfin = int(float(parameters['W']) * 1e10) // self.pdk['Fin']['Pitch']
# w in the netlist is the effective total width for a single transistor
nfin = nfin // int(parameters[nf])
elif 'NFIN' in parameters:
nfin = int(parameters['NFIN'])
else:
assert False, f'Either nfin or w parameter should be defined {parameters}'
unit_transistor = Transistor(device_type=device_type,
nf=int(parameters[nf]),
nfin=nfin,
model_name=parameters['real_inst_type'].lower())
def find_ports(p, i):
d = {}
for (k, v) in p.items():
for t in v:
if t[0] == i:
d[t[1]] = k
return d
p1 = find_ports(ports, 'M1')
p = {1: p1}
m = {1: int(parameters['M'])}
p2 = find_ports(ports, 'M2')
if len(p2) > 1:
m[2] = int(parameters['M'])
p[2] = p2
self.transistor_array = TransistorArray(
unit_transistor=unit_transistor,
m=m,
ports=p,
n_rows=y_cells
)
# TODO: All of above goes away when TransistorArray is passed to mos_array as shown below
#################################################################################################
m = 2*int(parameters['M']) if pattern > 0 else int(parameters['M'])
self.n_row, self.n_col = self.validate_array(m, y_cells, x_cells)
logger_func(f'x_cells={self.n_col}, y_cells={self.n_row} after legalization')
if self.n_row * self.n_col != m:
assert False, f'x_cells {self.n_row} by y_cells {self.n_col} not equal to m {m}'
self.ports = ports
self.mos_array()
def mos_array(self):
assert len(self.transistor_array.m) <= 2, f'Arrays of more than 2 devices not supported yet'
if len(self.transistor_array.m) == 1:
is_dual = False
else:
is_dual = True
if 'B' in self.transistor_array.ports[1]:
tap_map = {'B': self.transistor_array.ports[1]['B']}
else:
tap_map = {'B': 'B'}
# Assign M2 tracks to prevent adjacent V2 violation
track_pattern_1 = {'G':[6], 'S':[4], 'D':[2]}
mg = MOS()
tx_a_1 = mg.mos(self.transistor_array.unit_transistor, track_pattern=track_pattern_1)
if is_dual:
track_pattern_2 = {}
if self.transistor_array.ports[2]['G'] == self.transistor_array.ports[1]['G']:
track_pattern_2['G'] = [6]
elif self.transistor_array.ports[2]['G'] == self.transistor_array.ports[1]['S']:
track_pattern_2['G'] = [4]
else:
track_pattern_2['G'] = [5]
if self.transistor_array.ports[2]['S'] == self.transistor_array.ports[1]['S']:
track_pattern_2['S'] = [4]
elif self.transistor_array.ports[2]['S'] == self.transistor_array.ports[1]['D']:
track_pattern_2['S'] = [2]
else:
track_pattern_2['S'] = [3]
if self.transistor_array.ports[2]['D'] == self.transistor_array.ports[1]['D']:
track_pattern_2['D'] = [2]
else:
track_pattern_2['D'] = [1]
# Alternate m2 tracks for device A and device B for improved matching
mg = MOS()
tx_a_2 = mg.mos(self.transistor_array.unit_transistor, track_pattern=track_pattern_2)
mg = MOS()
tx_b_1 = mg.mos(self.transistor_array.unit_transistor, track_pattern=track_pattern_1)
mg = MOS()
tx_b_2 = mg.mos(self.transistor_array.unit_transistor, track_pattern=track_pattern_2)
tg = MOS()
tp = tg.tap(self.transistor_array.unit_transistor)
fill = MOS().fill(1, self.transistor_array.unit_transistor.nfin)
# Define the interleaving array (aka array logic)
if is_dual:
interleave = self.interleave_pattern(self.n_row, self.n_col)
else:
interleave = [1]*(self.n_row*self.n_col)
cnt = 0
cnt_tap = 0
rows = []
for y in range(self.n_row):
# tap row
if y == 0:
row = []
row.append([fill, f't{cnt_tap}', {}, 1])
cnt_tap += 1
for _ in range(self.n_col):
row.append([tp, f't{cnt_tap}', tap_map, 1])
cnt_tap += 1
rows.append(row)
row.append([fill, f't{cnt_tap}', {}, 1])
cnt_tap += 1
row = []
row.append([fill, f't{cnt_tap}', {}, 1])
cnt_tap += 1
for _ in range(self.n_col):
pin_map = self.transistor_array.ports[interleave[cnt]]
flip_x = 1
if not is_dual:
tx = tx_a_1
else:
if interleave[cnt] == 2:
if y % 2 == 0:
tx = tx_b_2
else:
tx = tx_b_1
else:
if y % 2 == 0:
tx = tx_a_1
else:
tx = tx_a_2
row.append([tx, f'm{cnt}', pin_map, flip_x])
cnt += 1
row.append([fill, f't{cnt_tap}', {}, 1])
cnt_tap += 1
rows.append(row)
# Stamp the instances
self.place(rows)
# Route
self.route()
self.terminals = self.removeDuplicates()
def stamp_cell(self, template, instance_name, pin_map, x_offset, y_offset, flip_x):
bbox = template['bbox']
# bounding box as visual aid
t = {'layer': 'Boundary', 'netName': None,
'rect': [bbox[0]+x_offset, bbox[1]+y_offset, bbox[2]+x_offset, bbox[3]+y_offset], 'netType': 'drawing'}
self.terminals.append(t)
if flip_x < 0:
x_offset += bbox[2] - bbox[1]
# append terminals
for term in template['terminals']:
t = {}
r = term['rect'].copy()
if flip_x < 0:
t['rect'] = [x_offset-r[2], r[1]+y_offset, x_offset-r[0], r[3]+y_offset]
else:
t['rect'] = [x_offset+r[0], r[1]+y_offset, x_offset+r[2], r[3]+y_offset]
t['layer'] = term['layer']
t['netName'] = pin_map.get(term['netName'], None)
t['netType'] = term['netType']
self.terminals.append(t)
def place(self, rows):
x_offset = 0
y_offset = 0
x_offset += 0*self.pdk['Poly']['Pitch'] # whitespace for feol rules
for row in rows:
x_offset = 0
for device in row:
[cell, instance_name, pin_map, flip_x] = device
self.stamp_cell(cell, instance_name, pin_map, x_offset, y_offset, flip_x)
x_offset += cell['bbox'][2] - cell['bbox'][0]
y_offset += cell['bbox'][3] - cell['bbox'][1]
x_offset += 0*self.pdk['Poly']['Pitch'] # whitespace for feol rules
self.bbox = transformation.Rect(*[0, 0, x_offset, y_offset])
logger_func(f'bounding box: {self.bbox}')
def route(self):
self.join_wires(self.m1)
self.join_wires(self.m2)
def _stretch_m2_wires():
x_min = self.bbox.urx
x_max = self.bbox.lly
for term in self.terminals:
if term['layer'] == self.m2.layer:
if term['rect'][0] < x_min:
x_min = term['rect'][0]
if term['rect'][2] > x_max:
x_max = term['rect'][2]
for term in self.terminals:
if term['layer'] == self.m2.layer:
if term['rect'][0] > x_min:
term['rect'][0] = x_min
if term['rect'][2] < x_max:
term['rect'][2] = x_max
# M3
self.terminals = self.removeDuplicates(silence_errors=True)
if len(self.rd.opens) > 0:
open_pins = set()
for t in self.rd.opens:
open_pins.add(t[0])
x_mid = (self.bbox.llx + self.bbox.urx)//2
(c_idx, _) = self.m3.clg.inverseBounds(x_mid)
c_idx = c_idx[0] - len(open_pins)//2
def _find_y_bounds(pin, wire):
y_min = self.bbox.ury
y_max = self.bbox.lly
for term in self.terminals:
if term['layer'] == wire.layer and term['netName'] in pin:
if term['rect'][1] < y_min:
y_min = term['rect'][1]
if term['rect'][3] > y_max:
y_max = term['rect'][3]
return y_min, y_max
y_min, y_max = _find_y_bounds(open_pins, self.m2)
for pin in sorted(open_pins):
if len(self.transistor_array.m)==1:
y_min, y_max = _find_y_bounds(pin, self.m2)
(b1, b2) = self.m3.spg.inverseBounds(y_min)
(e1, e2) = self.m3.spg.inverseBounds(y_max)
if b1[0] + 1 == e2[0]:
b1 = (b1[0]-1, b1[1]) # Satisfy min length
self.addWire(self.m3, pin, c_idx, b1, e2)
c_idx +=1
self.drop_via(self.v2)
self.terminals = self.removeDuplicates(silence_errors=True)
if len(self.rd.opens) > 0:
_stretch_m2_wires()
self.drop_via(self.v2)
# Expose pins
for term in self.terminals:
if term['netName'] is not None and term['layer'] in ['M2', 'M3']:
term['netType'] = 'pin'
@staticmethod
def validate_array(m, n_row, n_col):
m = int(m)
n_row = int(n_row)
n_col = int(n_col)
if n_row * n_col == m:
return n_row, n_col
else:
y_sqrt = math.floor(math.sqrt(m))
for y in range(y_sqrt, 0, -1):
if y == 1:
return 1, m
elif m % y == 0:
return y, m//y
@staticmethod
def interleave_pattern(n_row, n_col):
"""
n_col odd:
A B A
B A B
n_col even:
A B A B
B A B A
"""
if n_row * n_col > 1:
assert (n_row * n_col) % 2 == 0, f'Odd number of transistors: {n_row}, {n_col}'
if n_row == 1:
assert n_col >= 2, 'Illegal combination'
lst = []
for y in range(n_row):
if y % 2 == 0:
lst.extend([k for k in islice(cycle([1, 2]), n_col)])
else:
lst.extend([k for k in islice(cycle([2, 1]), n_col)])
return lst
|
assembly/scaffold/views/views.py
|
chermed/assembly
| 176 |
95519
|
# -*- coding: utf-8 -*-
"""
Assembly: %MODULE_NAME%.py
"""
from assembly import (Assembly,
asm,
models,
request,
response,
HTTPError)
# ------------------------------------------------------------------------------
@request.route("/%ROUTE_NAME%/")
class Index(Assembly):
def index(self):
return
|
Funções Analíticas/Virtualenv/Lib/site-packages/setuptools/tests/textwrap.py
|
Leonardo-Maciel/PSO_Maciel
| 1,744 |
95580
|
<reponame>Leonardo-Maciel/PSO_Maciel<gh_stars>1000+
import textwrap
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
|
tests/api/v1/test_fields.py
|
omertuc/CTFd
| 3,592 |
95597
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import Fields, TeamFieldEntries, Teams, UserFieldEntries, Users
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_field,
gen_team,
login_as_user,
register_user,
)
def test_api_custom_fields():
app = create_ctfd()
with app.app_context():
register_user(app)
gen_field(app.db, name="CustomField1")
gen_field(app.db, name="CustomField2")
with login_as_user(app) as user:
r = user.get("/api/v1/configs/fields", json="")
assert r.status_code == 403
with login_as_user(app, name="admin") as admin:
r = admin.get("/api/v1/configs/fields", json="")
resp = r.get_json()
assert resp == {
"success": True,
"data": [
{
"public": True,
"required": True,
"type": "user",
"editable": True,
"id": 1,
"field_type": "text",
"description": "CustomFieldDescription",
"name": "CustomField1",
},
{
"public": True,
"required": True,
"type": "user",
"editable": True,
"id": 2,
"field_type": "text",
"description": "CustomFieldDescription",
"name": "CustomField2",
},
],
}
r = admin.post(
"/api/v1/configs/fields",
json={
"public": True,
"required": True,
"editable": True,
"id": 2,
"type": "user",
"field_type": "text",
"description": "CustomFieldDescription",
"name": "CustomField3",
},
)
assert r.status_code == 200
r = admin.get("/api/v1/configs/fields", json="")
resp = r.get_json()
assert resp == {
"success": True,
"data": [
{
"public": True,
"required": True,
"type": "user",
"editable": True,
"id": 1,
"field_type": "text",
"description": "CustomFieldDescription",
"name": "CustomField1",
},
{
"public": True,
"required": True,
"type": "user",
"editable": True,
"id": 2,
"field_type": "text",
"description": "CustomFieldDescription",
"name": "CustomField2",
},
{
"public": True,
"required": True,
"editable": True,
"id": 3,
"type": "user",
"field_type": "text",
"description": "CustomFieldDescription",
"name": "CustomField3",
},
],
}
r = admin.patch(
"/api/v1/configs/fields/3",
json={
"public": False,
"required": False,
"editable": False,
"id": 4,
"type": "user",
"field_type": "text",
"description": "CustomFieldDescription",
"name": "PatchedCustomField3",
},
)
assert r.status_code == 200
assert r.get_json()["data"] == {
"public": False,
"required": False,
"editable": False,
"id": 3,
"type": "user",
"field_type": "text",
"description": "CustomFieldDescription",
"name": "PatchedCustomField3",
}
r = admin.get("/api/v1/configs/fields/3", json="")
assert r.status_code == 200
assert r.get_json()["data"] == {
"public": False,
"required": False,
"editable": False,
"id": 3,
"type": "user",
"field_type": "text",
"description": "CustomFieldDescription",
"name": "PatchedCustomField3",
}
r = admin.delete("/api/v1/configs/fields/3", json="")
assert r.status_code == 200
r = admin.get("/api/v1/configs/fields/3", json="")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_self_fields_permissions():
app = create_ctfd()
with app.app_context():
gen_field(app.db, name="CustomField1", public=False, editable=False)
gen_field(app.db, name="CustomField2", public=True, editable=True)
with app.test_client() as client:
client.get("/register")
with client.session_transaction() as sess:
data = {
"name": "user",
"email": "<EMAIL>",
"password": "password",
"nonce": sess.get("nonce"),
"fields[1]": "CustomValue1",
"fields[2]": "CustomValue2",
}
r = client.post("/register", data=data)
with client.session_transaction() as sess:
assert sess["id"]
with login_as_user(app) as user, login_as_user(app, name="admin") as admin:
r = user.get("/api/v1/users/me")
resp = r.get_json()
assert resp["data"]["fields"] == [
{
"value": "CustomValue2",
"name": "CustomField2",
"description": "CustomFieldDescription",
"type": "text",
"field_id": 2,
}
]
r = admin.get("/api/v1/users/2")
resp = r.get_json()
assert len(resp["data"]["fields"]) == 2
field = Fields.query.filter_by(id=1).first()
field.public = True
app.db.session.commit()
r = user.get("/api/v1/users/me")
resp = r.get_json()
assert len(resp["data"]["fields"]) == 2
destroy_ctfd(app)
def test_partial_field_update():
app = create_ctfd()
with app.app_context():
register_user(app)
gen_field(app.db, name="CustomField1")
gen_field(app.db, name="CustomField2")
with login_as_user(app) as user:
r = user.patch(
"/api/v1/users/me",
json={
"fields": [
{"field_id": 1, "value": "CustomValue1"},
{"field_id": 2, "value": "CustomValue2"},
]
},
)
assert r.status_code == 200
assert UserFieldEntries.query.count() == 2
r = user.patch(
"/api/v1/users/me",
json={"fields": [{"field_id": 2, "value": "NewCustomValue2"}]},
)
assert r.status_code == 200
assert UserFieldEntries.query.count() == 2
assert (
UserFieldEntries.query.filter_by(field_id=1, user_id=2).first().value
== "CustomValue1"
)
assert (
UserFieldEntries.query.filter_by(field_id=2, user_id=2).first().value
== "NewCustomValue2"
)
with login_as_user(app, name="admin") as admin:
r = admin.patch(
"/api/v1/users/2",
json={"fields": [{"field_id": 2, "value": "AdminNewCustomValue2"}]},
)
assert r.status_code == 200
assert UserFieldEntries.query.count() == 2
assert (
UserFieldEntries.query.filter_by(field_id=1, user_id=2).first().value
== "CustomValue1"
)
assert (
UserFieldEntries.query.filter_by(field_id=2, user_id=2).first().value
== "AdminNewCustomValue2"
)
destroy_ctfd(app)
def test_api_team_self_fields_permissions():
app = create_ctfd(user_mode="teams")
with app.app_context():
register_user(app)
team = gen_team(app.db)
user = Users.query.filter_by(id=2).first()
user.team_id = team.id
app.db.session.commit()
team = Teams.query.filter_by(id=1).first()
team.captain_id = 2
app.db.session.commit()
gen_field(
app.db, name="CustomField1", type="team", public=False, editable=False
)
gen_field(app.db, name="CustomField2", type="team", public=True, editable=True)
app.db.session.add(
TeamFieldEntries(type="team", value="CustomValue1", team_id=1, field_id=1)
)
app.db.session.add(
TeamFieldEntries(type="team", value="CustomValue2", team_id=1, field_id=2)
)
app.db.session.commit()
assert len(team.field_entries) == 2
with login_as_user(app) as user, login_as_user(app, name="admin") as admin:
r = user.get("/api/v1/teams/me")
resp = r.get_json()
assert resp["data"]["fields"] == [
{
"value": "CustomValue2",
"name": "CustomField2",
"description": "CustomFieldDescription",
"type": "text",
"field_id": 2,
}
]
assert len(resp["data"]["fields"]) == 1
# Admin gets data and should see all fields
r = admin.get("/api/v1/teams/1")
resp = r.get_json()
assert len(resp["data"]["fields"]) == 2
r = user.patch(
"/api/v1/teams/me",
json={
"fields": [
{"field_id": 1, "value": "NewCustomValue1"},
{"field_id": 2, "value": "NewCustomValue2"},
]
},
)
assert r.get_json() == {
"success": False,
"errors": {"fields": ["Field 'CustomField1' cannot be editted"]},
}
assert r.status_code == 400
assert (
TeamFieldEntries.query.filter_by(id=1).first().value == "CustomValue1"
)
assert (
TeamFieldEntries.query.filter_by(id=2).first().value == "CustomValue2"
)
# After making the field public the user should see both fields
field = Fields.query.filter_by(id=1).first()
field.public = True
app.db.session.commit()
r = user.get("/api/v1/teams/me")
resp = r.get_json()
assert len(resp["data"]["fields"]) == 2
# Captain should be able to edit their values after it's made editable
field = Fields.query.filter_by(id=1).first()
field.editable = True
app.db.session.commit()
r = user.patch(
"/api/v1/teams/me",
json={
"fields": [
{"field_id": 1, "value": "NewCustomValue1"},
{"field_id": 2, "value": "NewCustomValue2"},
]
},
)
print(r.get_json())
assert r.status_code == 200
assert (
TeamFieldEntries.query.filter_by(id=1).first().value
== "NewCustomValue1"
)
assert (
TeamFieldEntries.query.filter_by(id=2).first().value
== "NewCustomValue2"
)
destroy_ctfd(app)
def test_team_partial_field_update():
app = create_ctfd(user_mode="teams")
with app.app_context():
register_user(app)
team = gen_team(app.db)
user = Users.query.filter_by(id=2).first()
user.team_id = team.id
team = Teams.query.filter_by(id=1).first()
team.captain_id = 2
app.db.session.commit()
gen_field(app.db, name="CustomField1", type="team")
gen_field(app.db, name="CustomField2", type="team")
with login_as_user(app) as user:
r = user.patch(
"/api/v1/teams/me",
json={
"fields": [
{"field_id": 1, "value": "CustomValue1"},
{"field_id": 2, "value": "CustomValue2"},
]
},
)
assert r.status_code == 200
assert TeamFieldEntries.query.count() == 2
r = user.patch(
"/api/v1/teams/me",
json={"fields": [{"field_id": 2, "value": "NewCustomValue2"}]},
)
assert r.status_code == 200
assert TeamFieldEntries.query.count() == 2
assert (
TeamFieldEntries.query.filter_by(field_id=1, team_id=1).first().value
== "CustomValue1"
)
assert (
TeamFieldEntries.query.filter_by(field_id=2, team_id=1).first().value
== "NewCustomValue2"
)
with login_as_user(app, name="admin") as admin:
r = admin.patch(
"/api/v1/teams/1",
json={"fields": [{"field_id": 2, "value": "AdminNewCustomValue2"}]},
)
assert r.status_code == 200
assert TeamFieldEntries.query.count() == 2
assert (
TeamFieldEntries.query.filter_by(field_id=1, team_id=1).first().value
== "CustomValue1"
)
assert (
TeamFieldEntries.query.filter_by(field_id=2, team_id=1).first().value
== "AdminNewCustomValue2"
)
destroy_ctfd(app)
|
zeus/testutils/pytest.py
|
conrad-kronos/zeus
| 221 |
95605
|
<gh_stars>100-1000
import os
import pytest
import responses
from sqlalchemy import event
from sqlalchemy.orm import Session
from sqlalchemy.testing import assertions, assertsql
from zeus import auth, config
from zeus.storage.mock import FileStorageCache
class CountStatementsWithDebug(assertsql.AssertRule):
def __init__(self, count):
self.count = count
self.statements = []
# TODO(dcramer): it'd be nice to capture the last in_app frame here
# TODO(dcramer): even better, it'd be nice to snapshot network requests
# similar to Jest, and just ensure they havent changed
def process_statement(self, execute_observed):
self.statements.extend(execute_observed.statements)
def no_more_statements(self):
statement_count = len(self.statements)
if self.count != statement_count:
assert False, "desired statement count %d does not match %d:\n%s" % (
self.count,
statement_count,
"\n".join(
(" {}. {}".format(k + 1, v) for k, v in enumerate(self.statements))
),
)
class AssertionHelper(object):
def __init__(self, db):
self.db = db
self.mgr = assertions.AssertsExecutionResults()
def assert_statement_count(self, count):
return self.mgr.assert_execution(
self.db.engine, CountStatementsWithDebug(count)
)
@pytest.fixture(scope="session")
def session_config(request):
return {"db_name": "test_zeus", "db_host": "127.0.0.1", "db_user": "postgres"}
@pytest.fixture(scope="session")
def app(request, session_config):
app = config.create_app(
_read_config=False,
SQLALCHEMY_DATABASE_URI="postgresql://{}@{}/{}".format(
session_config["db_user"],
session_config["db_host"],
session_config["db_name"],
),
FILE_STORAGE={"backend": "zeus.storage.mock.FileStorageCache"},
SECRET_KEY=os.urandom(24),
GITHUB_CLIENT_ID="github.client-id",
GITHUB_CLIENT_SECRET="github.client-secret",
MAIL_SUPPRESS_SEND=True,
NPLUSONE_RAISE=True,
)
app.testing = True
yield app
@pytest.fixture(scope="session", autouse=True)
def db(request, app, session_config):
db_name = session_config["db_name"]
db_host = session_config["db_host"]
db_user = session_config["db_user"]
with app.app_context():
# Postgres 9.1 does not support --if-exists
if (
os.system(
"psql -U {} -h {} -l | grep '{}'".format(db_user, db_host, db_name)
)
== 0
):
assert not os.system(
"dropdb -U {} -h {} {}".format(db_user, db_host, db_name)
)
assert not os.system(
"createdb -U {} -E utf-8 -h {} {}".format(db_user, db_host, db_name)
)
config.alembic.upgrade()
# TODO: need to kill db connections in order to drop database
# config.db.drop_all()
# os.system('dropdb %s' % db_name)
return config.db
@pytest.fixture(scope="session")
def sqla_assertions(db):
return AssertionHelper(db)
@event.listens_for(Session, "after_transaction_end")
def restart_savepoint(session, transaction):
if transaction.nested and not transaction._parent.nested:
session.begin_nested()
@pytest.fixture(scope="function")
def req_ctx(request, app):
with app.test_request_context() as req_ctx:
yield req_ctx
@pytest.fixture(scope="function", autouse=True)
def db_session(request, req_ctx, db):
db.session.begin_nested()
yield db.session
# transaction.rollback()
# connection.close()
# db.session.remove()
@pytest.fixture(scope="function", autouse=True)
def filestorage(app):
FileStorageCache.clear()
yield FileStorageCache
@pytest.fixture(scope="function", autouse=True)
def redis(app):
config.redis.flushdb()
yield config.redis
@pytest.fixture(scope="function")
def client(app):
with app.test_client() as client:
yield client
@pytest.fixture(scope="function", autouse=True)
def outbox(app):
with config.mail.record_messages() as ob:
yield ob
@pytest.fixture
def private_key():
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
return rsa.generate_private_key(
backend=default_backend(), public_exponent=65537, key_size=2048
)
@pytest.fixture
def public_key(private_key):
return private_key.public_key()
@pytest.fixture
def public_key_bytes(public_key):
from cryptography.hazmat.primitives import serialization
return public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
@pytest.fixture
def default_tenant(default_repo):
auth.set_current_tenant(auth.RepositoryTenant(repository_id=default_repo.id))
@pytest.fixture
def mock_vcs_server():
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
rsps.add(rsps.GET, "http://localhost:8070/stmt/log", json={"log": []})
rsps.add(
rsps.GET,
"http://localhost:8070/stmt/resolve",
status=400,
json={"error": "invalid_ref", "ref": "TEST_FIXTURE"},
)
rsps.add(
rsps.GET,
"http://localhost:8070/stmt/branches",
json={"branches": ["master"]},
)
yield rsps
|
train.py
|
bckim92/sequential-knowledge-transformer
| 135 |
95617
|
import os
import math
from pprint import PrettyPrinter
import random
import numpy as np
import torch # Torch must be imported before sklearn and tf
import sklearn
import tensorflow as tf
import better_exceptions
from tqdm import tqdm, trange
import colorlog
import colorful
from utils.etc_utils import set_logger, set_tcmalloc, set_gpus, check_none_gradients
from utils import config_utils, custom_argparsers
from models import MODELS
from modules.checkpoint_tracker import CheckpointTracker
from modules.trainer import run_wow_evaluation, Trainer
from modules.from_parlai import download_from_google_drive, unzip
from data.wizard_of_wikipedia import WowDatasetReader
from data.holle import HolleDatasetReader
better_exceptions.hook()
_command_args = config_utils.CommandArgs()
pprint = PrettyPrinter().pprint
pformat = PrettyPrinter().pformat
BEST_N_CHECKPOINTS = 5
def main():
# Argument passing/parsing
args, model_args = config_utils.initialize_argparser(
MODELS, _command_args, custom_argparsers.DialogArgumentParser)
hparams, hparams_dict = config_utils.create_or_load_hparams(
args, model_args, args.cfg)
pprint(hparams_dict)
# Set environment variables & gpus
set_logger()
set_gpus(hparams.gpus)
set_tcmalloc()
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus, 'GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# Set random seed
tf.random.set_seed(hparams.random_seed)
np.random.seed(hparams.random_seed)
random.seed(hparams.random_seed)
# For multi-gpu
if hparams.num_gpus > 1:
mirrored_strategy = tf.distribute.MirroredStrategy() # NCCL will be used as default
else:
mirrored_strategy = None
# Download BERT pretrained model
if not os.path.exists(hparams.bert_dir):
os.makedirs(hparams.bert_dir)
fname = 'uncased_L-12_H-768_A-12.zip'
gd_id = '17rfV9CleFBwwfS7m5Yd72vvxdPLWBHl6'
download_from_google_drive(gd_id, os.path.join(hparams.bert_dir, fname))
unzip(hparams.bert_dir, fname)
# Make dataset reader
os.makedirs(hparams.cache_dir, exist_ok=True)
if hparams.data_name == "wizard_of_wikipedia":
reader_cls = WowDatasetReader
elif hparams.data_name == "holle":
reader_cls = HolleDatasetReader
else:
raise ValueError("data_name must be one of 'wizard_of_wikipedia' and 'holle'")
reader = reader_cls(
hparams.batch_size, hparams.num_epochs,
buffer_size=hparams.buffer_size,
bucket_width=hparams.bucket_width,
max_length=hparams.max_length,
max_episode_length=hparams.max_episode_length,
max_knowledge=hparams.max_knowledge,
knowledge_truncate=hparams.knowledge_truncate,
cache_dir=hparams.cache_dir,
bert_dir=hparams.bert_dir,
)
train_dataset, iters_in_train = reader.read('train', mirrored_strategy)
test_dataset, iters_in_test = reader.read('test', mirrored_strategy)
if hparams.data_name == 'wizard_of_wikipedia':
unseen_dataset, iters_in_unseen = reader.read('test_unseen', mirrored_strategy)
vocabulary = reader.vocabulary
# Build model & optimizer & trainer
if mirrored_strategy:
with mirrored_strategy.scope():
model = MODELS[hparams.model](hparams, vocabulary)
optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
clipnorm=hparams.clipnorm)
else:
model = MODELS[hparams.model](hparams, vocabulary)
optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
clipnorm=hparams.clipnorm)
trainer = Trainer(model, optimizer, mirrored_strategy,
hparams.enable_function,
reader_cls.remove_pad)
# misc (tensorboard, checkpoints)
file_writer = tf.summary.create_file_writer(hparams.checkpoint_dir)
file_writer.set_as_default()
global_step = tf.compat.v1.train.get_or_create_global_step()
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=global_step)
checkpoint_manager = tf.train.CheckpointManager(checkpoint,
directory=hparams.checkpoint_dir,
max_to_keep=hparams.max_to_keep)
checkpoint_tracker = CheckpointTracker(
hparams.checkpoint_dir, max_to_keep=BEST_N_CHECKPOINTS)
# Main loop!
train_dataset_iter = iter(train_dataset)
for epoch in range(hparams.num_epochs):
print(hparams.checkpoint_dir)
base_description = f"(Train) Epoch {epoch}, GPU {hparams.gpus}"
train_tqdm = trange(iters_in_train, ncols=120, desc=base_description)
for current_step in train_tqdm:
example = next(train_dataset_iter)
global_step.assign_add(1)
_global_step = int(global_step)
# Train
output_dict = trainer.train_step(example)
# Print model
if _global_step == 1:
model.print_model()
loss_str = str(output_dict['loss'].numpy())
train_tqdm.set_description(f"{base_description}, Loss {loss_str}")
with file_writer.as_default():
if _global_step % int(hparams.logging_step) == 0:
tf.summary.histogram('train/vocab', output_dict['sample_ids'], step=_global_step)
tf.summary.scalar('train/loss', output_dict['loss'], step=_global_step)
tf.summary.scalar('train/gen_loss', output_dict['gen_loss'], step=_global_step)
tf.summary.scalar('train/knowledge_loss', output_dict['knowledge_loss'], step=_global_step)
tf.summary.scalar('train/kl_loss', output_dict['kl_loss'], step=_global_step)
# Test
if _global_step % int(iters_in_train * hparams.evaluation_epoch) == 0:
checkpoint_manager.save(global_step)
test_loop_outputs = trainer.test_loop(test_dataset, iters_in_test, epoch, 'seen')
if hparams.data_name == 'wizard_of_wikipedia':
unseen_loop_outputs = trainer.test_loop(unseen_dataset, iters_in_unseen, epoch, 'unseen')
test_summaries, log_dict = run_wow_evaluation(
test_loop_outputs, hparams.checkpoint_dir, 'seen')
if hparams.data_name == 'wizard_of_wikipedia':
unseen_summaries, unseen_log_dict = run_wow_evaluation(
unseen_loop_outputs, hparams.checkpoint_dir, 'unseen')
# Logging
tqdm.write(colorful.bold_green("seen").styled_string)
tqdm.write(colorful.bold_red(pformat(log_dict)).styled_string)
if hparams.data_name == 'wizard_of_wikipedia':
tqdm.write(colorful.bold_green("unseen").styled_string)
tqdm.write(colorful.bold_red(pformat(unseen_log_dict)).styled_string)
with file_writer.as_default():
for family, test_summary in test_summaries.items():
for key, value in test_summary.items():
tf.summary.scalar(f'{family}/{key}', value, step=_global_step)
if hparams.data_name == 'wizard_of_wikipedia':
for family, unseen_summary in unseen_summaries.items():
for key, value in unseen_summary.items():
tf.summary.scalar(f'{family}/{key}', value, step=_global_step)
if hparams.keep_best_checkpoint:
current_score = log_dict["rouge1"]
checkpoint_tracker.update(current_score, _global_step)
if __name__ == '__main__':
main()
|
share/lib/python/neuron/expect_hocerr.py
|
niltonlk/nrn
| 203 |
95622
|
<filename>share/lib/python/neuron/expect_hocerr.py<gh_stars>100-1000
import sys
from io import StringIO
import inspect
from neuron import h
pc = h.ParallelContext()
nhost = pc.nhost()
quiet = True
def set_quiet(b):
global quiet
old = quiet
quiet = b
return old
def printerr(e):
if not quiet:
print(e)
def checking(s):
if not quiet:
print("CHECKING: " + s)
def expect_hocerr(callable, args, sec=None):
"""
Execute callable(args) and assert that it generated an error.
If sec is not None, executes callable(args, sec=sec)
Skips if nhost > 1 as all hoc_execerror end in MPI_ABORT
Does not work well with nrniv launch since hoc_execerror messages do not
pass through sys.stderr.
"""
if nhost > 1:
return
original_stderr = sys.stderr
sys.stderr = my_stderr = StringIO()
err = 0
pyerrmes = False
try:
if sec:
callable(*args, sec=sec)
else:
callable(*args)
printerr("expect_hocerr: no err for %s%s" % (str(callable), str(args)))
except Exception as e:
err = 1
errmes = my_stderr.getvalue()
if errmes:
errmes = errmes.splitlines()[0]
errmes = errmes[(errmes.find(":") + 2) :]
printerr("expect_hocerr: %s" % errmes)
elif e:
printerr(e)
finally:
sys.stderr = original_stderr
assert err
def expect_err(stmt):
"""
expect_err('stmt')
stmt is expected to raise an error
"""
here = inspect.currentframe()
caller = here.f_back
err = 0
checking(stmt)
try:
exec(stmt, caller.f_globals, caller.f_locals)
printerr("expect_err: no err for-- " + stmt)
except Exception as e:
err = 1
printerr(e)
assert err
|
modules/nltk_contrib/lpath/treecanvasnode.py
|
h4ck3rm1k3/NLP-project
| 123 |
95624
|
from qt import *
from qtcanvas import *
from lpathtree_qt import *
class Point:
def __init__(self, *args):
if len(args) == 2 and \
(isinstance(args[0],int) or isinstance(args[0],float)) and \
(isinstance(args[1],int) or isinstance(args[0],float)):
self.x = float(args[0])
self.y = float(args[1])
elif len(args) == 1 and \
isinstance(args[0],QPoint):
self.x = float(args[0].x())
self.y = float(args[0].y())
else:
raise TypeError("invalid argument type")
def __add__(self, p):
if not isinstance(p,Point):
raise TypeError("invalid argument type")
return Point(self.x+p.x, self.y+p.y)
def __sub__(self, p):
if not isinstance(p,Point):
raise TypeError("invalid argument type")
return Point(self.x-p.x, self.y-p.y)
def __mul__(self, n):
if not isinstance(n,int) and \
not isinstance(n,float):
raise TypeError("invalid argument type")
n = float(n)
return Point(self.x*n,self.y*n)
def __div__(self, n):
if not isinstance(n,int) and \
not isinstance(n,float):
raise TypeError("invalid argument type")
n = float(n)
return Point(self.x/n,self.y/n)
class TreeCanvasNode(QCanvasText):
def __init__(self, node=None, canvas=None):
assert(isinstance(node,LPathTreeModel))
if 'label' in node.data and node.data['label']:
QCanvasText.__init__(self, node.data['label'], canvas)
else:
QCanvasText.__init__(self, '', canvas)
node.gui = self
self.numberWidget = QCanvasText(canvas)
self.numberWidget.setColor(Qt.lightGray)
self.numberHidden = True
self.node = node
self.triangle = QCanvasPolygon(canvas)
self.triangle.setBrush(QBrush(Qt.gray))
def hide(self):
self.numberWidget.hide()
self.triangle.hide()
QCanvasText.hide(self)
def draw(self, painter):
self.updateNumber()
alignment = self.node.lpAlignment()
if alignment == self.node.AlignLeft:
self.setText('^'+self.node.data['label'])
elif alignment == self.node.AlignRight:
self.setText(self.node.data['label']+'$')
elif alignment == self.node.AlignBoth:
self.setText("^%s$" % self.node.data['label'])
elif self.node.data['label']:
self.setText(self.node.data['label'])
else:
self.setText('')
if self.node.collapsed:
dw = self.width() / 2.0
x1 = self.x() + dw
y1 = self.y() + self.height()
pa = QPointArray(3)
pa.setPoint(0, x1,y1)
pa.setPoint(1, x1-dw,y1+self.height())
pa.setPoint(2, x1+dw,y1+self.height())
self.triangle.setPoints(pa)
self.triangle.show()
else:
self.triangle.hide()
QCanvasText.draw(self, painter)
def clear(self):
f = self.font()
f.setUnderline(False)
self.setFont(f)
def width(self):
return self.boundingRect().width()
def height(self):
return self.boundingRect().height()
def intersection(self, item):
p = Point(item.boundingRect().center())
box = self.boundingRect()
c = Point(box.center())
v = p - c
if self == item:
return c
elif v.x != 0:
v = v / abs(v.x)
elif v.y > 0:
return Point(c.x,box.bottom())
else:
return Point(c.x,box.top())
v1 = Point(box.bottomRight() - box.topLeft())
if v1.x > 0.0:
v1 = v1 / v1.x
if abs(v.y) < v1.y:
dx = box.width() / 2.0
x = c.x + dx * v.x
y = c.y + dx * v.y
else:
if v.y != 0:
v = v / abs(v.y)
dy = box.height() / 2.0
x = c.x + dy * v.x
y = c.y + dy * v.y
elif v.x > 0:
x = box.right()
y = c.y
else:
x = box.left()
y = c.y
return Point(x, y)
def connectingLine(self, item):
p1 = self.intersection(item)
p2 = item.intersection(self)
return p1.x,p1.y,p2.x,p2.y
def updateNumber(self):
if self.node.lpIsolated():
self.numberHidden = True
self.numberWidget.hide()
else:
number = self.node.lpScopeDepth()
c = self.canvas()
w = self.numberWidget
c.setChanged(w.boundingRect())
w.setText("%d" % number)
r = self.boundingRect()
wr = w.boundingRect()
wy = r.top() - wr.height()
wx = r.left() + (r.width() - wr.width()) / 2.0
w.move(wx,wy)
c.setChanged(w.boundingRect())
self.numberHidden = False
w.show()
def getNumber(self):
self.node.lpScopeDepth()
def updateTrace(self):
f = self.font()
f.setUnderline(self.node.filterExpression is not None)
self.setFont(f)
self.canvas().update()
if __name__ == "__main__":
from qt import *
app = QApplication([])
c = QCanvas(100,100)
c.setBackgroundColor(Qt.blue)
w = QCanvasView(c)
n = TreeCanvasNode("test",c)
n.setColor(Qt.red)
n.show()
app.setMainWidget(w)
w.show()
app.exec_loop()
|
autogl/module/nas/space/single_path.py
|
THUMNLab/AutoGL
| 824 |
95625
|
<filename>autogl/module/nas/space/single_path.py
from autogl.module.nas.space.operation import gnn_map
import typing as _typ
import torch
import torch.nn.functional as F
from . import register_nas_space
from .base import apply_fixed_architecture
from .base import BaseSpace
from ...model import BaseModel
from ....utils import get_logger
from ...model import AutoGCN
@register_nas_space("singlepath")
class SinglePathNodeClassificationSpace(BaseSpace):
def __init__(
self,
hidden_dim: _typ.Optional[int] = 64,
layer_number: _typ.Optional[int] = 2,
dropout: _typ.Optional[float] = 0.2,
input_dim: _typ.Optional[int] = None,
output_dim: _typ.Optional[int] = None,
ops: _typ.Tuple = ["GCNConv", "GATConv"],
):
super().__init__()
self.layer_number = layer_number
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.output_dim = output_dim
self.ops = ops
self.dropout = dropout
def instantiate(
self,
hidden_dim: _typ.Optional[int] = None,
layer_number: _typ.Optional[int] = None,
input_dim: _typ.Optional[int] = None,
output_dim: _typ.Optional[int] = None,
ops: _typ.Tuple = None,
dropout=None,
):
super().instantiate()
self.hidden_dim = hidden_dim or self.hidden_dim
self.layer_number = layer_number or self.layer_number
self.input_dim = input_dim or self.input_dim
self.output_dim = output_dim or self.output_dim
self.ops = ops or self.ops
self.dropout = dropout or self.dropout
for layer in range(self.layer_number):
setattr(
self,
f"op_{layer}",
self.setLayerChoice(
layer,
[
op(
self.input_dim if layer == 0 else self.hidden_dim,
self.output_dim
if layer == self.layer_number - 1
else self.hidden_dim,
)
if isinstance(op, type)
else gnn_map(
op,
self.input_dim if layer == 0 else self.hidden_dim,
self.output_dim
if layer == self.layer_number - 1
else self.hidden_dim,
)
for op in self.ops
],
),
)
self._initialized = True
def forward(self, data):
x, edges = data.x, data.edge_index
for layer in range(self.layer_number):
x = getattr(self, f"op_{layer}")(x, edges)
if layer != self.layer_number - 1:
x = F.leaky_relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return F.log_softmax(x, dim=1)
def parse_model(self, selection, device) -> BaseModel:
# return AutoGCN(self.input_dim, self.output_dim, device)
return self.wrap(device).fix(selection)
|
uncalled/sim_utils.py
|
skovaka/nanopore_aligner
| 489 |
95636
|
<gh_stars>100-1000
#!/usr/bin/env python
import sys, os
import numpy as np
import argparse
from uncalled.pafstats import parse_paf
from time import time
from collections import Counter
SAMP_RATE = 4000
CHS = np.arange(512)+1
NCHS = len(CHS)
NTICKS = 32
TICK_MOD = NCHS / NTICKS
PROG = " progress "
SP = '-'*( (NTICKS - len(PROG))//2 - 1)
PROG_HEADER = '|'+SP+PROG+SP+"|\n"
def find_scans(sts,ens,mxs,max_block_gap=1,max_intv_gap=20,min_mux_frac=0.95):
i = np.argsort(sts)
sts = sts[i]
ens = ens[i]
mxs = mxs[i]
blocks = list()
bst = sts[0]
ben = ens[0]
for rst,ren in zip(sts[1:], ens[1:]):
if rst - ben > max_block_gap:
blocks.append( (bst,ben) )
bst = rst
ben = ren
else:
ben = max(ren,ben)
#ben = ren
blocks.append((bst,ben))
scan_segs = list()
scan = list()
scan_gaps = list()
prev_en = 0
for bst,ben in blocks:
if len(scan) > 0 and bst - scan[-1][1] > max_intv_gap:
if len(scan) == 4:
scan_segs.append(scan)
scan = list()
mux_counts = Counter(mxs[(sts >= bst) & (sts < ben)])
mux_counts = [(c,m) for m,c in mux_counts.items()]
top_count, top_mux = max(mux_counts)
if top_count / sum((c for c,m in mux_counts)) >= min_mux_frac:
if top_mux != 4 and len(scan) == 4:
scan_segs.append(scan)
scan_gaps.append((gap1,
bst - scan[-1][1]))
scan = list()
if len(scan) > 0 and top_mux == len(scan):
if ben - scan[-1][1] < max_intv_gap:
scan[-1] = (scan[-1][0],ben)
elif top_mux == 1:
scan[0] = (bst,ben)
gap1 = bst - prev_en
elif top_mux-1 == len(scan):
scan.append( (bst,ben) )
if len(scan) == 1:
gap1 = bst - prev_en
else:
scan = list()
else:
if len(scan) == 4:
scan_segs.append(scan)
scan_gaps.append((gap1,
bst - scan[-1][1]))
scan = list()
prev_en = ben
scans = list()
for segs,gaps in zip(scan_segs, scan_gaps):
scans.append((segs[0][0]-gaps[0], segs[-1][1]+gaps[1]))
return scans
class SeqsumProfile:
PROPS=['chs','sts','lns','mxs',
'ids','ens','glns','gsts','tms','tds','bps']
def __init__(self, fname, min_st=0, max_en=np.inf):
infile = open(fname)
header = infile.readline().split()
ch_i = header.index("channel")
st_i = header.index("start_time")
ln_i = header.index("duration")
mx_i = header.index("mux")
id_i = header.index("read_id")
tm_i = header.index("template_start")
td_i = header.index("template_duration")
bp_i = header.index("sequence_length_template")
ids=list()
chs=list()
sts=list()
mxs=list()
lns=list()
ens=list()
tms=list()
tds=list()
bps=list()
SIZE = os.path.getsize(fname)
MOD = SIZE / NTICKS
bts = 0
sys.stderr.write("=")
for line in infile:
bts += len(line)
if bts > MOD:
bts = 0
sys.stderr.write("=")
sys.stderr.flush()
tabs = line.split()
st = float(tabs[st_i])
ln = float(tabs[ln_i])
en = st+ln
if st < min_st or en > max_en: continue
sts.append(st)
lns.append(ln)
ens.append(en)
chs.append(int(tabs[ch_i]))
mxs.append(int(tabs[mx_i]))
ids.append(tabs[id_i])
tms.append(float(tabs[tm_i])-st)
tds.append(float(tabs[td_i]))
bps.append(int(tabs[bp_i]))
sys.stderr.write("\n")
sys.stderr.write("Procesing run...................\n")
ids,chs,sts,mxs,lns,ens,tms,tds,bps=map(np.array, (ids,chs,sts,mxs,lns,ens,tms,tds,bps))
self.ids,self.chs,self.sts,self.mxs,self.lns,self.ens,self.tms,self.tds,self.bps = ids,chs,sts,mxs,lns,ens,tms,tds,bps
self.sort(np.argsort(sts))
self.chodr = CHS
self.chcts = np.array([np.sum(self.chs==ch)
for ch in CHS])
self.duraiton = np.max(ens)
def rm_scans(self):
scans = find_scans(self.sts, self.ens, self.mxs)
bounds = list()
sh = 0
for st,en in (scans):
m = np.flatnonzero((self.sts+sh >= st) & (self.ens+sh <= en))
for pr in SeqsumProfile.PROPS:
a = getattr(self, pr, None)
if a is not None:
setattr(self, pr, np.delete(a,m))
l = en-st
bounds.append(st-sh)
self.sts[self.sts+sh >= st] -= l
self.ens[self.ens+sh >= st] -= l
sh += l
bounds.append(np.max(self.ens))
self.chcts = np.array([np.sum(self.chs==ch)
for ch in CHS])
return np.array(bounds)
def compute_eject_delays(self, fname):
self.dls = np.zeros(len(self.sts))
self.dls.fill(np.inf)
idxs = {self.ids[i] : i for i in range(len(self.ids))}
tlns = self.lns - self.tms
for p in parse_paf(open(fname)):
i = idxs.get(p.qr_name, None)
if i != None and ('ej' in p.tags or 'ub' in p.tags):
ej = (p.tags['ej'] if 'ej' in p.tags else p.tags['ub'])[0]
self.dls[i] = max(0, tlns[i] - ((p.qr_len/450.0)+ej))
def compute_gaps(self):
self.gsts = np.zeros(len(self.ids))
self.glns = np.zeros(len(self.ids))
for ch in CHS:
cids = self.ids[self.chs==ch]
csts = self.sts[self.chs==ch]
cens = self.ens[self.chs==ch]
gsts = np.insert(cens[:-1], 0, 0)
glns = csts - gsts
self.gsts[self.chs==ch] = gsts
self.glns[self.chs==ch] = glns
def chsort(self, odr):
self.chodr = self.chodr[odr]
self.chcts = self.chcts[odr]
def sort(self, order=None):
for pr in SeqsumProfile.PROPS:
a = getattr(self, pr, None)
if a is not None:
setattr(self, pr, a[order])
def __len__(self):
return len(self.sts)
def sec_to_samp(sec, coef=1.0):
return int(np.round(sec*SAMP_RATE*coef))
#def write_itv(out, ch, sc, st_sec, en_sec, time_scale):
# st_samp = np.round(st_sec*SAMP_RATE*time_scale)
# en_samp = np.round(en_sec*SAMP_RATE*time_scale)
# itvs_out.write("%d\t%d\t%d\t%d\n" % (ch,sc,st_samp,en_samp))
#
#def write_gap(out, ch, sc, ln_sec):
# ln_samp = np.round(ln_sec*SAMP_RATE)
# out.write("%d\t%d\t%d\n" % (ch, sc, ln_samp))
def load_sim(client, conf):
t0 = time()
sys.stderr.write("Loading UNCALLED PAF............\n")
unc = SeqsumProfile(conf.unc_seqsum)
unc_scans = unc.rm_scans()
unc.compute_gaps()
unc.compute_eject_delays(conf.unc_paf)
delays = unc.dls[unc.dls != np.inf]
DELAY = np.median(delays)
unc.chsort(np.argsort(unc.chcts))
sys.stderr.write("Generating pattern..............\n")
ACTIVE_THRESH = np.median(unc.glns)+np.std(unc.glns)
for ch in CHS:
if (ch-1) % TICK_MOD == 0:
sys.stderr.write("=")
sys.stderr.flush()
ch_i = unc.chs==ch
if not np.any(ch_i):
continue
gsts = unc.gsts[ch_i]
glns = unc.glns[ch_i]
ch_intervals = list()
#break scan intervals at long gaps
sc = 0
itv_st = 0
for br in np.flatnonzero(glns >= ACTIVE_THRESH):
act_en = gsts[br] #find first long gap (break)
#print(ch, itv_st, _en)
#add all full intervals preceding break
while unc_scans[sc+1] < act_en:
itv_en = conf.scan_intv_time
st_samp = sec_to_samp(itv_st-unc_scans[sc], conf.sim_speed)
en_samp = sec_to_samp(itv_en, conf.sim_speed)
client.add_intv(ch,sc,st_samp,en_samp)
#write_itv(itvs_out, ch, sc, itv_st-unc_scans[sc], itv_en, conf.sim_speed)
itv_st = unc_scans[sc+1]
sc += 1
#add partial intervals before break
if itv_st != act_en:
#if np.any((glns < ACTIVE_THRESH) & (gsts > itv_st) & (gsts < act_en)):
st_samp = sec_to_samp(itv_st-unc_scans[sc], conf.sim_speed)
en_samp = sec_to_samp(act_en-unc_scans[sc], conf.sim_speed)
client.add_intv(ch,sc,st_samp,en_samp)
#write_itv(itvs_out, ch, sc, itv_st-unc_scans[sc], act_en-unc_scans[sc], conf.sim_speed)
itv_st = act_en + glns[br]
#skip intervals before interval starts
while unc_scans[sc+1] < itv_st:
sc += 1
last = np.max(unc.ens[ch_i]) #time of last read
#add intervals between last break and final read
while sc < len(unc_scans)-1 and unc_scans[sc] < last:
itv_en = min(last - unc_scans[sc], conf.scan_intv_time)
st_samp = sec_to_samp(itv_st-unc_scans[sc], conf.sim_speed)
en_samp = sec_to_samp(itv_en, conf.sim_speed)
client.add_intv(ch,sc,st_samp,en_samp)
#write_itv(itvs_out,ch,sc,itv_st-unc_scans[sc],itv_en, conf.sim_speed)
itv_st = unc_scans[sc+1]
sc += 1
#write short gaps within each interval
for sc in range(len(unc_scans)-1):
sc_i = (gsts > unc_scans[sc]) & ((gsts+glns) <= unc_scans[sc+1])
for ln in glns[sc_i]:
if ln < ACTIVE_THRESH and ln > 0:
client.add_gap(ch, sc, sec_to_samp(ln))
#write_gap(gaps_out, ch, sc, ln)
for dl in unc.dls[ch_i][sc_i]:
if dl != np.inf:
#write_gap(delays_out, ch, sc, dl)
#write_gap(delays_out, ch, sc, DELAY)
client.add_delay(ch, sc, sec_to_samp(DELAY))
sys.stderr.write("\n\n")
#gaps_out.close()
#itvs_out.close()
#delays_out.close()
sys.stderr.write("Loading control PAF.............\n")
ctl = SeqsumProfile(conf.ctl_seqsum)
ctl.rm_scans()
ctl.chsort(np.argsort(ctl.chcts))
sys.stderr.write("Ordering reads..................\n")
#sys.stderr.write(PROG_HEADER)
#Channels with any reads recieve minimum read count
min_const = np.zeros(NCHS)
min_const[unc.chcts > 0] = conf.min_ch_reads
tgt_total = np.sum(ctl.chcts)
#Max reads proportional to unc read counts
max_prpl = tgt_total * unc.chcts / np.sum(unc.chcts)
#Combine and min const with re-scaled proportional to fit max reads
remain = max_prpl - min_const
remain_clp = np.clip(remain, 0, np.inf)
tgt_counts = min_const + (np.sum(remain) * remain_clp / np.sum(remain_clp))
#Round and adjust for error
tgt_counts = np.round(tgt_counts).astype(int)
dr = -1 if np.sum(tgt_counts) > tgt_total else 1
i = len(tgt_counts)-1
while np.sum(tgt_counts) != tgt_total:
tgt_counts[i] += dr
i -= 1
diff = ctl.chcts - tgt_counts
odr = np.flip(np.argsort(diff),0)
diff = diff[odr]
tgt_counts = tgt_counts[odr]
ctl.chsort(odr)
unc.chsort(odr)
sim_reads = [None for c in CHS]
extra = list()
e = 0
for i in range(NCHS):
if i % TICK_MOD == 0:
sys.stderr.write("=")
sys.stderr.flush()
j = ctl.chs==ctl.chodr[i]
ctl_reads = list(zip(ctl.ids[j], ctl.tms[j]))
new_reads = None
tgt = tgt_counts[i]
if diff[i] >= 0:
new_reads = ctl_reads[:tgt]
if diff[i] > 0:
extra.append(ctl_reads[tgt:])
else:
if e >= len(extra):
sys.stderr.write("Not enough reads? maybe should haved checked earlier\n")
sys.exit(1)
new_reads = ctl_reads
while len(new_reads) < tgt and e < len(extra):
needed = tgt-len(new_reads)
if len(extra[e]) > needed:
new_reads += extra[e][:needed]
extra[e] = extra[e][needed:]
else:
new_reads += extra[e]
e += 1
if len(new_reads) < tgt:
sys.stderr.write("Not enough reads? again? not sure I should be here\n")
sys.exit(1)
sim_reads[unc.chodr[i]-1] = new_reads
sys.stderr.write("\n")
for ch in CHS:
for rd,tm in sim_reads[ch-1]:
client.add_read(ch, rd, sec_to_samp(tm))
#reads_out.write("%3d %s %d\n" % (ch, rd, np.round(tm*SAMP_RATE)))
#reads_out.write("%3d %s %d\n" % (ch, rd, 0))
#reads_out.close()
|
Python/Algorithms/SearchingAlgorithms/SequentialSearch.py
|
m-payal/AlgorithmsAndDataStructure
| 195 |
95669
|
<filename>Python/Algorithms/SearchingAlgorithms/SequentialSearch.py
# Implementation of SequentialSearch in python
# Python3 code to sequentially search key in arr[].
# If key is present then return its position,
# otherwise return -1
# If return value -1 then print "Not Found!"
# else print position at which element found
def Sequential_Search(dlist, item):
pos = 0
found = False
while pos < len(dlist) and not found:
if dlist[pos] == item:
# check if element at current position in list matches entered key
found = True
else:
pos += 1
if found:
return pos
else:
return -1
# Driver Code
list = input("Enter list elements (space seperated): ").split()
list = [int(i) for i in list]
key = int(input("Enter key to search: "))
res = Sequential_Search(list, key)
if res >= 0:
print(f"Found element at position: {res}")
else:
print("Not found!")
|
tests/requests/valid/028.py
|
bojiang/gunicorn
| 6,851 |
95682
|
<reponame>bojiang/gunicorn
from gunicorn.config import Config
cfg = Config()
cfg.set("strip_header_spaces", True)
request = {
"method": "GET",
"uri": uri("/stuff/here?foo=bar"),
"version": (1, 1),
"headers": [
("CONTENT-LENGTH", "3"),
],
"body": b"xyz"
}
|
angr/engines/pcode/arch/ArchPcode_PIC_17_LE_16_PIC_17C7xx.py
|
matthewpruett/angr
| 6,132 |
95696
|
<reponame>matthewpruett/angr
###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_PIC_17_LE_16_PIC_17C7xx(ArchPcode):
name = 'PIC-17:LE:16:PIC-17C7xx'
pcode_arch = 'PIC-17:LE:16:PIC-17C7xx'
description = 'PIC-17C7xx'
bits = 16
ip_offset = 0x0
sp_offset = 0x4
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('indf0', 1, 0x0),
Register('fsr0', 1, 0x1),
Register('pclat', 2, 0x2),
Register('pcl', 1, 0x2),
Register('pclath', 1, 0x3),
Register('alusta', 1, 0x4),
Register('t0sta', 1, 0x5),
Register('cpusta', 1, 0x6),
Register('intsta', 1, 0x7),
Register('indf1', 1, 0x8),
Register('fsr1', 1, 0x9),
Register('tmr0l', 1, 0xb),
Register('tmr0h', 1, 0xc),
Register('tblptr', 2, 0xd),
Register('tblptrl', 1, 0xd),
Register('tblptrh', 1, 0xe),
Register('bsr', 1, 0xf),
Register('porta', 1, 0x10),
Register('ddrb', 1, 0x11),
Register('portb', 1, 0x12),
Register('rcsta1', 1, 0x13),
Register('rcreg1', 1, 0x14),
Register('txsta1', 1, 0x15),
Register('txreg1', 1, 0x16),
Register('spbrg1', 1, 0x17),
Register('prod', 2, 0x18),
Register('prodl', 1, 0x18),
Register('prodh', 1, 0x19),
Register('ddrc', 1, 0x110),
Register('portc', 1, 0x111),
Register('ddrd', 1, 0x112),
Register('portd', 1, 0x113),
Register('ddre', 1, 0x114),
Register('porte', 1, 0x115),
Register('pir1', 1, 0x116),
Register('pie1', 1, 0x117),
Register('tmr1', 1, 0x210),
Register('tmr2', 1, 0x211),
Register('tmr3l', 1, 0x212),
Register('tmr3h', 1, 0x213),
Register('pr1', 1, 0x214),
Register('pr2', 1, 0x215),
Register('pr3lca1l', 1, 0x216),
Register('pr3hca1h', 1, 0x217),
Register('pw1dcl', 1, 0x310),
Register('pw2dcl', 1, 0x311),
Register('pw1dch', 1, 0x312),
Register('pw2dch', 1, 0x313),
Register('ca2l', 1, 0x314),
Register('ca2h', 1, 0x315),
Register('tcon1', 1, 0x316),
Register('tcon2', 1, 0x317),
Register('pir2', 1, 0x410),
Register('pie2', 1, 0x411),
Register('rcsta2', 1, 0x413),
Register('rcreg2', 1, 0x414),
Register('txsta2', 1, 0x415),
Register('txreg2', 1, 0x416),
Register('spbrg2', 1, 0x417),
Register('ddrf', 1, 0x510),
Register('portf', 1, 0x511),
Register('ddrg', 1, 0x512),
Register('portg', 1, 0x513),
Register('adcon0', 1, 0x514),
Register('adcon1', 1, 0x515),
Register('adres', 2, 0x516),
Register('adresl', 1, 0x516),
Register('adresh', 1, 0x517),
Register('sspadd', 1, 0x610),
Register('sspcon1', 1, 0x611),
Register('sspcon2', 1, 0x612),
Register('sspstat', 1, 0x613),
Register('sspbuf', 1, 0x614),
Register('pw3dcl', 1, 0x710),
Register('pw3dch', 1, 0x711),
Register('ca3l', 1, 0x712),
Register('ca3h', 1, 0x713),
Register('ca4l', 1, 0x714),
Register('ca4h', 1, 0x715),
Register('tcon3', 1, 0x716),
Register('ddrh', 1, 0x810),
Register('porth', 1, 0x811),
Register('ddrj', 1, 0x812),
Register('portj', 1, 0x813),
Register('pc', 2, 0x0, alias_names=('ip',)),
Register('stkptr', 1, 0x4),
Register('fs32', 1, 0x5),
Register('fs10', 1, 0x6),
Register('ov', 1, 0x7),
Register('z', 1, 0x8),
Register('dc', 1, 0x9),
Register('c', 1, 0xa),
Register('tblat', 2, 0x10),
Register('tblatl', 1, 0x10),
Register('tblath', 1, 0x11),
Register('wreg', 1, 0x20)
]
register_arch(['pic-17:le:16:pic-17c7xx'], 16, Endness.LE, ArchPcode_PIC_17_LE_16_PIC_17C7xx)
|
ursina/scripts/chunk_mesh.py
|
DropBear586/ursina
| 1,431 |
95705
|
<reponame>DropBear586/ursina<filename>ursina/scripts/chunk_mesh.py<gh_stars>1000+
from ursina import *
from math import floor
app = Ursina()
t = time.time()
application.asset_folder = application.asset_folder.parent.parent
terrain = Entity(model=Terrain('grass_fields_heightmap', skip=8), texture='grass', texture_scale=(3,3), scale=256)
# grid = [[[None for z in range(8)] for y in range(1)] for x in range(8)] # make 2d array of entities
grid = [[None for z in range(8)] for x in range(8)] # make 2d array of entities
x_slices = 8
# y = 1
z_slices = 8
for z in range(z_slices):
for x in range(x_slices):
part = Entity(
parent=terrain,
x=(x/x_slices) - .5 + (1/x_slices/2),
z=(z/z_slices) - .5 + (1/z_slices/2),
color=color.random_color(),
model=Mesh(),
always_on_top=True
)
grid[x][z] = part
terrain.model.generated_vertices = [v+Vec3(.5,0.5) for v in terrain.model.generated_vertices]
for i in range(0, len(terrain.model.generated_vertices), 3):
v = terrain.model.generated_vertices[i]
x = floor(v.x * x_slices)
z = floor(v.z * z_slices)
x = min(x, x_slices-1)
z = min(z, z_slices-1)
offset = Vec3(- (x/x_slices) - (1/x_slices/2), -.5, -(z/z_slices) - (1/x_slices/2))
grid[x][z].model.vertices.extend([
terrain.model.generated_vertices[i] + offset,
terrain.model.generated_vertices[i+1] + offset,
terrain.model.generated_vertices[i+2] + offset,
])
for z in range(z_slices):
for x in range(x_slices):
grid[x][z].model.generate()
# Entity(parent=grid[x][z], model='cube', scale=.01, color=color.red, always_on_top=True)
# grid[x][z].enabled = False
grid[x][z].collider = 'mesh'
# grid[x][z].model = None
from ursina.prefabs.first_person_controller import FirstPersonController
player = FirstPersonController(position=(0,200,0))
player.add_script(NoclipMode())
# player = EditorCamera(rotation_x=90, y=128)
def update():
for part in terrain.children:
part.enabled = distance_xz(part.world_position, player.world_position) < 256/8
# print(distance_xz(part.world_position, camera.world_position), 256/4)
app.run()
|
metadata-ingestion/src/datahub/ingestion/extractor/mce_extractor.py
|
pramodbiligiri/datahub
| 3,586 |
95723
|
from typing import Iterable, Union
from datahub.emitter.mce_builder import get_sys_time
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api import RecordEnvelope
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Extractor, WorkUnit
from datahub.ingestion.api.workunit import MetadataWorkUnit, UsageStatsWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.mxe import (
MetadataChangeEvent,
MetadataChangeProposal,
SystemMetadata,
)
from datahub.metadata.schema_classes import UsageAggregationClass
class WorkUnitRecordExtractor(Extractor):
"""An extractor that simply returns the data inside workunits back as records."""
ctx: PipelineContext
def configure(self, config_dict: dict, ctx: PipelineContext) -> None:
self.ctx = ctx
def get_records(
self, workunit: WorkUnit
) -> Iterable[
RecordEnvelope[
Union[
MetadataChangeEvent,
MetadataChangeProposal,
MetadataChangeProposalWrapper,
UsageAggregationClass,
]
]
]:
if isinstance(workunit, MetadataWorkUnit):
if isinstance(workunit.metadata, MetadataChangeEvent):
mce = workunit.metadata
mce.systemMetadata = SystemMetadata(
lastObserved=get_sys_time(), runId=self.ctx.run_id
)
if len(mce.proposedSnapshot.aspects) == 0:
raise AttributeError("every mce must have at least one aspect")
if not workunit.metadata.validate():
raise ValueError(
f"source produced an invalid metadata work unit: {workunit.metadata}"
)
yield RecordEnvelope(
workunit.metadata,
{
"workunit_id": workunit.id,
},
)
elif isinstance(workunit, UsageStatsWorkUnit):
if not workunit.usageStats.validate():
raise ValueError(
f"source produced an invalid usage stat: {workunit.usageStats}"
)
yield RecordEnvelope(
workunit.usageStats,
{
"workunit_id": workunit.id,
},
)
else:
raise ValueError(f"unknown WorkUnit type {type(workunit)}")
def close(self):
pass
|
uliweb/contrib/datadict/commands.py
|
timgates42/uliweb
| 202 |
95745
|
from uliweb.core.commands import Command, CommandManager, get_commands
from optparse import make_option
class DataDictCommand(CommandManager):
#change the name to real command name, such as makeapp, makeproject, etc.
name = 'datadict'
#help information
help = "Data dict tool, create index, validate models' of apps or tables"
#args information, used to display show the command usage message
args = ''
#if True, it'll check the current directory should has apps directory
check_apps_dirs = True
#if True, it'll check args parameters should be valid apps name
check_apps = False
#if True, it'll skip not predefined parameters in options_list, otherwise it'll
#complain not the right parameters of the command, it'll used in subcommands or
#passing extra parameters to a special command
skip_options = True
def get_commands(self, global_options):
import datadict_subcommands as subcommands
cmds = get_commands(subcommands)
return cmds
|
lib/layers/diffeq_layers/__init__.py
|
arnabgho/ffjord
| 518 |
95752
|
from .container import *
from .resnet import *
from .basic import *
from .wrappers import *
|
var/spack/repos/builtin/packages/libestr/package.py
|
kkauder/spack
| 2,360 |
95757
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libestr(AutotoolsPackage):
"""C library for string handling (and a bit more)."""
homepage = "https://libestr.adiscon.com/"
url = "https://github.com/rsyslog/libestr/archive/v0.1.11.tar.gz"
version('0.1.11', sha256='46b53b80f875fd82981d927a45f0c9df9d17ee1d0e29efab76aaa9cd54a46bb4')
version('0.1.10', sha256='e8756b071540314abef25c044f893d6b5d249e46709329a4b3e7361403c29a1e')
version('0.1.9', sha256='efa0b90b5fe22844bac26042f988de6e8b2770e28dbd84bf49b9982d9c3e34f8')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
|
smartnlp/custom/layer/attention.py
|
msgi/nlp-tour
| 1,559 |
95758
|
# coding=utf-8
# created by msgi on 2020/4/1 7:23 下午
import tensorflow as tf
# simple attention mechanism
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# query shape: (batch_size, hidden_size)
# values shape: (batch_size, max_len, hidden_size)
# hidden_with_time_axis shape: (batch_size,1,hidden_size)
hidden_with_time_axis = tf.expand_dims(query, 1)
score = self.V(tf.nn.tanh(self.W1(values) + self.W2(hidden_with_time_axis)))
# attention_weights shape: (batch_size,max_len,1)
attention_weights = tf.nn.softmax(score, axis=1)
# context vector shape: (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class LuongAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(LuongAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
def call(self, query, values):
# query shape: (batch_size, hidden_size)
# values shape: (batch_size, max_len, hidden_size)
# score shape: (batch_size, 1, max_len)
# hidden_with_time_axis shape: (batch_size, 1, hidden_size)
hidden_with_time_axis = tf.expand_dims(query, 1)
score = tf.matmul(hidden_with_time_axis, self.W1(values), transpose_b=True)
attention_weights = tf.nn.softmax(score, axis=2)
context_vector = tf.matmul(attention_weights, values)
return context_vector, attention_weights
class VanillaRNNAttention(tf.keras.layers.Layer):
def __init__(self, attention_size):
self.attention_size = attention_size
self.W = tf.keras.layers.Dense(attention_size, activation='tanh')
self.U = tf.keras.layers.Dense(1)
super(VanillaRNNAttention, self).__init__()
def call(self, x, mask=None):
# et shape: (batch_size, max_len, attention_size)
et = self.W(x)
# at shape: (batch_size, max_len)
at = tf.nn.softmax(tf.squeeze(self.U(et), axis=-1))
if mask is not None:
at *= tf.cast(mask, tf.float32)
# atx shape: (batch_size, max_len, 1)
atx = tf.expand_dims(at, -1)
# sum result shape: (batch_size, attention_size)
sum_result = tf.reduce_sum(atx * x, axis=1)
return sum_result
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
# 多头自注意力机制
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
@staticmethod
def scaled_dot_product_attention(q, k, v, mask):
"""计算注意力权重。
q, k, v 必须具有匹配的前置维度。
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
虽然 mask 根据其类型(填充或前瞻)有不同的形状,
但是 mask 必须能进行广播转换以便求和。
参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v)
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
返回值:
输出,注意力权重
"""
matmul_qk = tf.matmul(q, k, transpose_b=True)
# 缩放 matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# 将 mask 加入到缩放的张量上。
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax 在最后一个轴(seq_len_k)上归一化,因此分数相加等于1。
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output, attention_weights
def split_heads(self, x, batch_size):
"""
分拆最后一个维度到 (num_heads, depth).
转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q)
k = self.wk(k)
v = self.wv(v)
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention, attention_weights = self.scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model))
output = self.dense(concat_attention)
return output, attention_weights
@staticmethod
def point_wise_feed_forward_network(d_model, dff):
"""
:param d_model:
:param dff:
:return: A feed forward nn to be stacked after each attention layer.
"""
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
if __name__ == '__main__':
temp_mha = MultiHeadAttention(d_model=512, num_heads=8)
y = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(y, k=y, q=y, mask=None)
print(out.shape, attn.shape)
|
tests/test_mailjet_backend.py
|
bhumikapahariapuresoftware/django-anymail
| 1,324 |
95763
|
<filename>tests/test_mailjet_backend.py<gh_stars>1000+
import json
from base64 import b64encode
from decimal import Decimal
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, override_settings, tag
from anymail.exceptions import AnymailAPIError, AnymailSerializationError, AnymailUnsupportedFeature
from anymail.message import attach_inline_image_file
from .mock_requests_backend import RequestsBackendMockAPITestCase, SessionSharingTestCases
from .utils import sample_image_content, sample_image_path, SAMPLE_IMAGE_FILENAME, AnymailTestMixin, decode_att
@tag('mailjet')
@override_settings(EMAIL_BACKEND='anymail.backends.mailjet.EmailBackend',
ANYMAIL={
'MAILJET_API_KEY': 'API KEY HERE',
'MAILJET_SECRET_KEY': 'SECRET KEY HERE'
})
class MailjetBackendMockAPITestCase(RequestsBackendMockAPITestCase):
DEFAULT_RAW_RESPONSE = b"""{
"Messages": [{
"Status": "success",
"To": [{
"Email": "<EMAIL>",
"MessageUUID": "cb927469-36fd-4c02-bce4-0d199929a207",
"MessageID": 70650219165027410,
"MessageHref": "https://api.mailjet.com/v3/message/70650219165027410"
}]
}]
}"""
def setUp(self):
super().setUp()
# Simple message useful for many tests
self.message = mail.EmailMultiAlternatives('Subject', 'Text Body', '<EMAIL>', ['<EMAIL>'])
@tag('mailjet')
class MailjetBackendStandardEmailTests(MailjetBackendMockAPITestCase):
"""Test backend support for Django standard email features"""
def test_send_mail(self):
"""Test basic API for simple send"""
mail.send_mail('Subject here', 'Here is the message.',
'<EMAIL>', ['<EMAIL>'], fail_silently=False)
self.assert_esp_called('/v3.1/send')
auth = self.get_api_call_auth()
self.assertEqual(auth, ('API KEY HERE', 'SECRET KEY HERE'))
data = self.get_api_call_json()
self.assertEqual(len(data['Messages']), 1)
message = data['Messages'][0]
self.assertEqual(data['Globals']['Subject'], "Subject here")
self.assertEqual(data['Globals']['TextPart'], "Here is the message.")
self.assertEqual(data['Globals']['From'], {"Email": "<EMAIL>"})
self.assertEqual(message['To'], [{"Email": "<EMAIL>"}])
def test_name_addr(self):
"""Make sure RFC2822 name-addr format (with display-name) is allowed
(Test both sender and recipient addresses)
"""
msg = mail.EmailMessage(
'Subject', 'Message', 'From Name <<EMAIL>>',
['"Recipient, #1" <<EMAIL>>', '<EMAIL>'],
cc=['Carbon Copy <<EMAIL>>', '<EMAIL>'],
bcc=['Blind Copy <<EMAIL>>', '<EMAIL>'])
msg.send()
data = self.get_api_call_json()
self.assertEqual(len(data['Messages']), 1)
message = data['Messages'][0]
self.assertEqual(data['Globals']['From'], {"Email": "<EMAIL>", "Name": "From Name"})
self.assertEqual(message['To'], [{"Email": "<EMAIL>", "Name": "Recipient, #1"},
{"Email": "<EMAIL>"}])
self.assertEqual(data['Globals']['Cc'], [{"Email": "<EMAIL>", "Name": "Carbon Copy"},
{"Email": "<EMAIL>"}])
self.assertEqual(data['Globals']['Bcc'], [{"Email": "<EMAIL>", "Name": "Blind Copy"},
{"Email": "<EMAIL>"}])
def test_email_message(self):
email = mail.EmailMessage(
'Subject', 'Body goes here', '<EMAIL>',
['<EMAIL>', 'Also To <<EMAIL>>'],
bcc=['<EMAIL>', 'Also BCC <<EMAIL>>'],
cc=['<EMAIL>', 'Also CC <<EMAIL>>'],
headers={'Reply-To': '<EMAIL>',
'X-MyHeader': 'my value'})
email.send()
data = self.get_api_call_json()
self.assertEqual(len(data['Messages']), 1)
message = data['Messages'][0]
self.assertEqual(data['Globals']['Subject'], "Subject")
self.assertEqual(data['Globals']['TextPart'], "Body goes here")
self.assertEqual(data['Globals']['From'], {"Email": "<EMAIL>"})
self.assertEqual(message['To'], [{"Email": "<EMAIL>"},
{"Email": "<EMAIL>", "Name": "Also To"}])
self.assertEqual(data['Globals']['Cc'], [{"Email": "<EMAIL>"},
{"Email": "<EMAIL>", "Name": "Also CC"}])
self.assertEqual(data['Globals']['Bcc'], [{"Email": "<EMAIL>"},
{"Email": "<EMAIL>", "Name": "Also BCC"}])
self.assertEqual(data['Globals']['Headers'],
{'X-MyHeader': 'my value'}) # Reply-To should be moved to own param
self.assertEqual(data['Globals']['ReplyTo'], {"Email": "<EMAIL>"})
def test_html_message(self):
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
email = mail.EmailMultiAlternatives('Subject', text_content,
'<EMAIL>', ['<EMAIL>'])
email.attach_alternative(html_content, "text/html")
email.send()
data = self.get_api_call_json()
self.assertEqual(len(data['Messages']), 1)
self.assertEqual(data['Globals']['TextPart'], text_content)
self.assertEqual(data['Globals']['HTMLPart'], html_content)
# Don't accidentally send the html part as an attachment:
self.assertNotIn('Attachments', data['Globals'])
def test_html_only_message(self):
html_content = '<p>This is an <strong>important</strong> message.</p>'
email = mail.EmailMessage('Subject', html_content, '<EMAIL>', ['<EMAIL>'])
email.content_subtype = "html" # Main content is now text/html
email.send()
data = self.get_api_call_json()
self.assertNotIn('TextPart', data['Globals'])
self.assertEqual(data['Globals']['HTMLPart'], html_content)
def test_extra_headers(self):
self.message.extra_headers = {'X-Custom': 'string', 'X-Num': 123}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['Headers'], {
'X-Custom': 'string',
'X-Num': 123,
})
def test_extra_headers_serialization_error(self):
self.message.extra_headers = {'X-Custom': Decimal(12.5)}
with self.assertRaisesMessage(AnymailSerializationError, "Decimal"):
self.message.send()
@override_settings(ANYMAIL_IGNORE_UNSUPPORTED_FEATURES=True) # Mailjet only allows single reply-to
def test_reply_to(self):
email = mail.EmailMessage('Subject', 'Body goes here', '<EMAIL>', ['<EMAIL>'],
reply_to=['<EMAIL>', 'Other <<EMAIL>>'],
headers={'X-Other': 'Keep'})
email.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['ReplyTo'], {"Email": "<EMAIL>"}) # only the first reply_to
self.assertEqual(data['Globals']['Headers'], {
'X-Other': 'Keep'
}) # don't lose other headers
def test_attachments(self):
text_content = "* Item one\n* Item two\n* Item three"
self.message.attach(filename="test.txt", content=text_content, mimetype="text/plain")
# Should guess mimetype if not provided...
png_content = b"PNG\xb4 pretend this is the contents of a png file"
self.message.attach(filename="test.png", content=png_content)
# Should work with a MIMEBase object (also tests no filename)...
pdf_content = b"PDF\xb4 pretend this is valid pdf data"
mimeattachment = MIMEBase('application', 'pdf')
mimeattachment.set_payload(pdf_content)
self.message.attach(mimeattachment)
self.message.send()
data = self.get_api_call_json()
attachments = data['Globals']['Attachments']
self.assertEqual(len(attachments), 3)
self.assertEqual(attachments[0]["Filename"], "test.txt")
self.assertEqual(attachments[0]["ContentType"], "text/plain")
self.assertEqual(decode_att(attachments[0]["Base64Content"]).decode('ascii'), text_content)
self.assertNotIn('ContentID', attachments[0])
self.assertEqual(attachments[1]["ContentType"], "image/png") # inferred from filename
self.assertEqual(attachments[1]["Filename"], "test.png")
self.assertEqual(decode_att(attachments[1]["Base64Content"]), png_content)
self.assertNotIn('ContentID', attachments[1]) # make sure image not treated as inline
self.assertEqual(attachments[2]["ContentType"], "application/pdf")
self.assertEqual(attachments[2]["Filename"], "") # none
self.assertEqual(decode_att(attachments[2]["Base64Content"]), pdf_content)
self.assertNotIn('ContentID', attachments[2])
self.assertNotIn('InlinedAttachments', data['Globals'])
def test_unicode_attachment_correctly_decoded(self):
self.message.attach("Une pièce jointe.html", '<p>\u2019</p>', mimetype='text/html')
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['Attachments'], [{
'Filename': 'Une pièce jointe.html',
'ContentType': 'text/html',
'Base64Content': b64encode('<p>\u2019</p>'.encode('utf-8')).decode('ascii')
}])
def test_embedded_images(self):
image_filename = SAMPLE_IMAGE_FILENAME
image_path = sample_image_path(image_filename)
image_data = sample_image_content(image_filename)
cid = attach_inline_image_file(self.message, image_path) # Read from a png file
html_content = '<p>This has an <img src="cid:%s" alt="inline" /> image.</p>' % cid
self.message.attach_alternative(html_content, "text/html")
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['HTMLPart'], html_content)
attachments = data['Globals']['InlinedAttachments']
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0]['Filename'], image_filename)
self.assertEqual(attachments[0]['ContentID'], cid)
self.assertEqual(attachments[0]['ContentType'], 'image/png')
self.assertEqual(decode_att(attachments[0]["Base64Content"]), image_data)
self.assertNotIn('Attachments', data['Globals'])
def test_attached_images(self):
image_filename = SAMPLE_IMAGE_FILENAME
image_path = sample_image_path(image_filename)
image_data = sample_image_content(image_filename)
self.message.attach_file(image_path) # option 1: attach as a file
image = MIMEImage(image_data) # option 2: construct the MIMEImage and attach it directly
self.message.attach(image)
image_data_b64 = b64encode(image_data).decode('ascii')
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['Attachments'], [
{
'Filename': image_filename, # the named one
'ContentType': 'image/png',
'Base64Content': image_data_b64,
},
{
'Filename': '', # the unnamed one
'ContentType': 'image/png',
'Base64Content': image_data_b64,
},
])
def test_multiple_html_alternatives(self):
# Multiple alternatives not allowed
self.message.attach_alternative("<p>First html is OK</p>", "text/html")
self.message.attach_alternative("<p>But not second html</p>", "text/html")
with self.assertRaises(AnymailUnsupportedFeature):
self.message.send()
def test_html_alternative(self):
# Only html alternatives allowed
self.message.attach_alternative("{'not': 'allowed'}", "application/json")
with self.assertRaises(AnymailUnsupportedFeature):
self.message.send()
def test_alternatives_fail_silently(self):
# Make sure fail_silently is respected
self.message.attach_alternative("{'not': 'allowed'}", "application/json")
sent = self.message.send(fail_silently=True)
self.assert_esp_not_called("API should not be called when send fails silently")
self.assertEqual(sent, 0)
def test_suppress_empty_address_lists(self):
"""Empty to, cc, bcc, and reply_to shouldn't generate empty fields"""
self.message.send()
data = self.get_api_call_json()
self.assertNotIn('Cc', data['Globals'])
self.assertNotIn('Bcc', data['Globals'])
self.assertNotIn('ReplyTo', data['Globals'])
def test_empty_to_list(self):
# Mailjet v3.1 doesn't support cc-only or bcc-only messages
self.message.to = []
self.message.cc = ['<EMAIL>']
with self.assertRaisesMessage(AnymailUnsupportedFeature, "messages without any `to` recipients"):
self.message.send()
def test_api_failure(self):
self.set_mock_response(status_code=500)
with self.assertRaisesMessage(AnymailAPIError, "Mailjet API response 500"):
mail.send_mail('Subject', 'Body', '<EMAIL>', ['<EMAIL>'])
# Make sure fail_silently is respected
self.set_mock_response(status_code=500)
sent = mail.send_mail('Subject', 'Body', '<EMAIL>', ['<EMAIL>'], fail_silently=True)
self.assertEqual(sent, 0)
def test_api_error_includes_details(self):
"""AnymailAPIError should include ESP's error message"""
# JSON error response - global error:
error_response = json.dumps({
"ErrorIdentifier": "06df1144-c6f3-4ca7-8885-7ec5d4344113",
"ErrorCode": "mj-0002",
"ErrorMessage": "Helpful explanation from Mailjet.",
"StatusCode": 400
}).encode('utf-8')
self.set_mock_response(status_code=400, raw=error_response)
with self.assertRaisesMessage(AnymailAPIError, "Helpful explanation from Mailjet"):
self.message.send()
# Non-JSON error response:
self.set_mock_response(status_code=500, raw=b"Ack! Bad proxy!")
with self.assertRaisesMessage(AnymailAPIError, "Ack! Bad proxy!"):
self.message.send()
# No content in the error response:
self.set_mock_response(status_code=502, raw=None)
with self.assertRaises(AnymailAPIError):
self.message.send()
@tag('mailjet')
class MailjetBackendAnymailFeatureTests(MailjetBackendMockAPITestCase):
"""Test backend support for Anymail added features"""
def test_envelope_sender(self):
self.message.envelope_sender = "<EMAIL>"
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['Sender'], {"Email": "<EMAIL>"})
def test_metadata(self):
# Mailjet expects the payload to be a single string
# https://dev.mailjet.com/guides/#tagging-email-messages
self.message.metadata = {'user_id': "12345", 'items': 6}
self.message.send()
data = self.get_api_call_json()
self.assertJSONEqual(data['Globals']['EventPayload'], {"user_id": "12345", "items": 6})
def test_send_at(self):
self.message.send_at = 1651820889 # 2022-05-06 07:08:09 UTC
with self.assertRaisesMessage(AnymailUnsupportedFeature, 'send_at'):
self.message.send()
def test_tags(self):
self.message.tags = ["receipt"]
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['CustomCampaign'], "receipt")
self.message.tags = ["receipt", "repeat-user"]
with self.assertRaisesMessage(AnymailUnsupportedFeature, 'multiple tags'):
self.message.send()
def test_track_opens(self):
self.message.track_opens = True
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['TrackOpens'], 'enabled')
def test_track_clicks(self):
self.message.track_clicks = True
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['TrackClicks'], 'enabled')
self.message.track_clicks = False
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['TrackClicks'], 'disabled')
def test_template(self):
# template_id can be str or int (but must be numeric ID -- not the template's name)
self.message.template_id = '1234567'
self.message.merge_global_data = {'name': "Alice", 'group': "Developers"}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data['Globals']['TemplateID'], 1234567) # must be integer
self.assertEqual(data['Globals']['TemplateLanguage'], True) # required to use variables
self.assertEqual(data['Globals']['Variables'], {'name': "Alice", 'group': "Developers"})
def test_template_populate_from_sender(self):
# v3.1 API allows omitting From param to use template's sender
self.message.template_id = '1234567'
self.message.from_email = None # must set to None after constructing EmailMessage
self.message.send()
data = self.get_api_call_json()
self.assertNotIn('From', data['Globals']) # use template's sender as From
def test_merge_data(self):
self.message.to = ['<EMAIL>', 'Bob <<EMAIL>>']
self.message.merge_data = {
'<EMAIL>': {'name': "Alice", 'group': "Developers"},
'<EMAIL>': {'name': "Bob"},
}
self.message.merge_global_data = {'group': "Default Group", 'global': "Global value"}
self.message.send()
data = self.get_api_call_json()
messages = data['Messages']
self.assertEqual(len(messages), 2) # with merge_data, each 'to' gets separate message
self.assertEqual(messages[0]['To'], [{"Email": "<EMAIL>"}])
self.assertEqual(messages[1]['To'], [{"Email": "<EMAIL>", "Name": "Bob"}])
# global merge_data is sent in Globals
self.assertEqual(data['Globals']['Variables'], {'group': "Default Group", 'global': "Global value"})
# per-recipient merge_data is sent in Messages (and Mailjet will merge with Globals)
self.assertEqual(messages[0]['Variables'], {'name': "Alice", 'group': "Developers"})
self.assertEqual(messages[1]['Variables'], {'name': "Bob"})
def test_merge_metadata(self):
self.message.to = ['<EMAIL>', 'Bob <<EMAIL>>']
self.message.merge_metadata = {
'<EMAIL>': {'order_id': 123, 'tier': 'premium'},
'<EMAIL>': {'order_id': 678},
}
self.message.metadata = {'notification_batch': 'zx912'}
self.message.send()
data = self.get_api_call_json()
messages = data['Messages']
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0]['To'][0]['Email'], "<EMAIL>")
# metadata and merge_metadata[recipient] are combined:
self.assertJSONEqual(messages[0]['EventPayload'],
{'order_id': 123, 'tier': 'premium', 'notification_batch': 'zx912'})
self.assertEqual(messages[1]['To'][0]['Email'], "<EMAIL>")
self.assertJSONEqual(messages[1]['EventPayload'],
{'order_id': 678, 'notification_batch': 'zx912'})
def test_default_omits_options(self):
"""Make sure by default we don't send any ESP-specific options.
Options not specified by the caller should be omitted entirely from
the API call (*not* sent as False or empty). This ensures
that your ESP account settings apply by default.
"""
self.message.send()
data = self.get_api_call_json()
self.assertNotIn('CustomCampaign', data["Globals"])
self.assertNotIn('EventPayload', data["Globals"])
self.assertNotIn('HTMLPart', data["Globals"])
self.assertNotIn('TemplateID', data["Globals"])
self.assertNotIn('TemplateLanguage', data["Globals"])
self.assertNotIn('Variables', data["Globals"])
self.assertNotIn('TrackOpens', data["Globals"])
self.assertNotIn('TrackClicks', data["Globals"])
def test_esp_extra(self):
# Anymail deep merges Mailjet esp_extra into the v3.1 Send API payload.
# Most options you'd want to override are in Globals, though a few are
# at the root. Note that it's *not* possible to merge into Messages
# (though you could completely replace it).
self.message.esp_extra = {
'Globals': {
'TemplateErrorDeliver': True,
'TemplateErrorReporting': '<EMAIL>',
},
'SandboxMode': True,
}
self.message.send()
data = self.get_api_call_json()
self.assertEqual(data["Globals"]['TemplateErrorDeliver'], True)
self.assertEqual(data["Globals"]['TemplateErrorReporting'], '<EMAIL>')
self.assertIs(data['SandboxMode'], True)
# Make sure the backend params are also still there
self.assertEqual(data["Globals"]['Subject'], "Subject")
# noinspection PyUnresolvedReferences
def test_send_attaches_anymail_status(self):
""" The anymail_status should be attached to the message when it is sent """
response_content = json.dumps({
"Messages": [{
"Status": "success",
"To": [{
"Email": "<EMAIL>",
"MessageUUID": "cb927469-36fd-4c02-bce4-0d199929a207",
"MessageID": 12345678901234500,
"MessageHref": "https://api.mailjet.com/v3/message/12345678901234500"
}]
}]
}).encode('utf-8')
self.set_mock_response(raw=response_content)
msg = mail.EmailMessage('Subject', 'Message', '<EMAIL>', ['<EMAIL>'])
sent = msg.send()
self.assertEqual(sent, 1)
self.assertEqual(msg.anymail_status.status, {'sent'})
self.assertEqual(msg.anymail_status.message_id, "12345678901234500")
self.assertEqual(msg.anymail_status.recipients['<EMAIL>'].status, 'sent')
self.assertEqual(msg.anymail_status.recipients['<EMAIL>'].message_id, "12345678901234500")
self.assertEqual(msg.anymail_status.esp_response.content, response_content)
# noinspection PyUnresolvedReferences
def test_mixed_status(self):
"""The status should include an entry for each recipient"""
# Mailjet's v3.1 API will partially fail a batch send, allowing valid emails to go out.
# The API response doesn't identify the failed email addresses; make sure we represent
# them correctly in the anymail_status.
response_content = json.dumps({
"Messages": [{
"Status": "success",
"CustomID": "",
"To": [{
"Email": "<EMAIL>",
"MessageUUID": "556e896a-e041-4836-bb35-8bb75ee308c5",
"MessageID": 12345678901234500,
"MessageHref": "https://api.mailjet.com/v3/REST/message/12345678901234500"
}],
"Cc": [],
"Bcc": []
}, {
"Errors": [{
"ErrorIdentifier": "f480a5a2-0334-4e08-b2b7-f372ce5669e0",
"ErrorCode": "mj-0013",
"StatusCode": 400,
"ErrorMessage": "\"[email protected]\" is an invalid email address.",
"ErrorRelatedTo": ["To[0].Email"]
}],
"Status": "error"
}]
}).encode('utf-8')
self.set_mock_response(raw=response_content, status_code=400) # Mailjet uses 400 for partial success
msg = mail.EmailMessage('Subject', 'Message', '<EMAIL>', ['<EMAIL>', '[email protected]'])
sent = msg.send()
self.assertEqual(sent, 1)
self.assertEqual(msg.anymail_status.status, {'sent', 'failed'})
self.assertEqual(msg.anymail_status.recipients['<EMAIL>'].status, 'sent')
self.assertEqual(msg.anymail_status.recipients['<EMAIL>'].message_id, "12345678901234500")
self.assertEqual(msg.anymail_status.recipients['<EMAIL>'].status, 'failed')
self.assertEqual(msg.anymail_status.recipients['<EMAIL>'].message_id, None)
self.assertEqual(msg.anymail_status.message_id, {"12345678901234500", None})
self.assertEqual(msg.anymail_status.esp_response.content, response_content)
# noinspection PyUnresolvedReferences
def test_send_failed_anymail_status(self):
""" If the send fails, anymail_status should contain initial values"""
self.set_mock_response(status_code=500)
sent = self.message.send(fail_silently=True)
self.assertEqual(sent, 0)
self.assertIsNone(self.message.anymail_status.status)
self.assertIsNone(self.message.anymail_status.message_id)
self.assertEqual(self.message.anymail_status.recipients, {})
self.assertIsNone(self.message.anymail_status.esp_response)
# noinspection PyUnresolvedReferences
def test_send_unparsable_response(self):
"""If the send succeeds, but a non-JSON API response, should raise an API exception"""
mock_response = self.set_mock_response(status_code=200,
raw=b"yikes, this isn't a real response")
with self.assertRaises(AnymailAPIError):
self.message.send()
self.assertIsNone(self.message.anymail_status.status)
self.assertIsNone(self.message.anymail_status.message_id)
self.assertEqual(self.message.anymail_status.recipients, {})
self.assertEqual(self.message.anymail_status.esp_response, mock_response)
def test_json_serialization_errors(self):
"""Try to provide more information about non-json-serializable data"""
self.message.tags = [Decimal('19.99')] # yeah, don't do this
with self.assertRaises(AnymailSerializationError) as cm:
self.message.send()
print(self.get_api_call_json())
err = cm.exception
self.assertIsInstance(err, TypeError) # compatibility with json.dumps
self.assertIn("Don't know how to send this data to Mailjet", str(err)) # our added context
self.assertRegex(str(err), r"Decimal.*is not JSON serializable") # original message
def test_merge_data_null_values(self):
# Mailjet doesn't accept None (null) as a merge value;
# returns "HTTP/1.1 500 Cannot convert data from Null value"
self.message.merge_global_data = {'Some': None}
self.set_mock_response(status_code=500, reason="Cannot convert data from Null value", raw=None)
with self.assertRaisesMessage(AnymailAPIError, "Cannot convert data from Null value"):
self.message.send()
@tag('mailjet')
class MailjetBackendSessionSharingTestCase(SessionSharingTestCases, MailjetBackendMockAPITestCase):
"""Requests session sharing tests"""
pass # tests are defined in SessionSharingTestCases
@tag('mailjet')
@override_settings(EMAIL_BACKEND="anymail.backends.mailjet.EmailBackend")
class MailjetBackendImproperlyConfiguredTests(AnymailTestMixin, SimpleTestCase):
"""Test ESP backend without required settings in place"""
def test_missing_api_key(self):
with self.assertRaises(ImproperlyConfigured) as cm:
mail.send_mail('Subject', 'Message', '<EMAIL>', ['<EMAIL>'])
errmsg = str(cm.exception)
self.assertRegex(errmsg, r'\bMAILJET_API_KEY\b')
@override_settings(ANYMAIL={'MAILJET_API_KEY': 'dummy'})
def test_missing_secret_key(self):
with self.assertRaises(ImproperlyConfigured) as cm:
mail.send_mail('Subject', 'Message', '<EMAIL>', ['<EMAIL>'])
errmsg = str(cm.exception)
self.assertRegex(errmsg, r'\bMAILJET_SECRET_KEY\b')
|
src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_exception_info_py3.py
|
Mannan2812/azure-cli-extensions
| 2,728 |
95771
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EventsExceptionInfo(Model):
"""The exception info.
:param severity_level: The severity level of the exception
:type severity_level: int
:param problem_id: The problem ID of the exception
:type problem_id: str
:param handled_at: Indicates where the exception was handled at
:type handled_at: str
:param assembly: The assembly which threw the exception
:type assembly: str
:param method: The method that threw the exception
:type method: str
:param message: The message of the exception
:type message: str
:param type: The type of the exception
:type type: str
:param outer_type: The outer type of the exception
:type outer_type: str
:param outer_method: The outer method of the exception
:type outer_method: str
:param outer_assembly: The outer assmebly of the exception
:type outer_assembly: str
:param outer_message: The outer message of the exception
:type outer_message: str
:param innermost_type: The inner most type of the exception
:type innermost_type: str
:param innermost_message: The inner most message of the exception
:type innermost_message: str
:param innermost_method: The inner most method of the exception
:type innermost_method: str
:param innermost_assembly: The inner most assembly of the exception
:type innermost_assembly: str
:param details: The details of the exception
:type details:
list[~azure.applicationinsights.models.EventsExceptionDetail]
"""
_attribute_map = {
'severity_level': {'key': 'severityLevel', 'type': 'int'},
'problem_id': {'key': 'problemId', 'type': 'str'},
'handled_at': {'key': 'handledAt', 'type': 'str'},
'assembly': {'key': 'assembly', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'outer_type': {'key': 'outerType', 'type': 'str'},
'outer_method': {'key': 'outerMethod', 'type': 'str'},
'outer_assembly': {'key': 'outerAssembly', 'type': 'str'},
'outer_message': {'key': 'outerMessage', 'type': 'str'},
'innermost_type': {'key': 'innermostType', 'type': 'str'},
'innermost_message': {'key': 'innermostMessage', 'type': 'str'},
'innermost_method': {'key': 'innermostMethod', 'type': 'str'},
'innermost_assembly': {'key': 'innermostAssembly', 'type': 'str'},
'details': {'key': 'details', 'type': '[EventsExceptionDetail]'},
}
def __init__(self, *, severity_level: int=None, problem_id: str=None, handled_at: str=None, assembly: str=None, method: str=None, message: str=None, type: str=None, outer_type: str=None, outer_method: str=None, outer_assembly: str=None, outer_message: str=None, innermost_type: str=None, innermost_message: str=None, innermost_method: str=None, innermost_assembly: str=None, details=None, **kwargs) -> None:
super(EventsExceptionInfo, self).__init__(**kwargs)
self.severity_level = severity_level
self.problem_id = problem_id
self.handled_at = handled_at
self.assembly = assembly
self.method = method
self.message = message
self.type = type
self.outer_type = outer_type
self.outer_method = outer_method
self.outer_assembly = outer_assembly
self.outer_message = outer_message
self.innermost_type = innermost_type
self.innermost_message = innermost_message
self.innermost_method = innermost_method
self.innermost_assembly = innermost_assembly
self.details = details
|
tools/turbinia_job_graph.py
|
sa3eed3ed/turbinia
| 559 |
95772
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph to visualise job/evidence relationships."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import graphviz
import sys
from turbinia.jobs import manager as jobs_manager
try:
unicode
except NameError:
unicode = str # pylint: disable=redefined-builtin
def create_graph():
"""Create graph of relationships between Turbinia jobs and evidence.
Returns:
Instance of graphviz.dot.Digraph
"""
dot = graphviz.Digraph(comment='Turbinia Evidence graph', format='png')
for _, job in jobs_manager.JobsManager.GetJobs():
dot.node(job.NAME)
for evidence in job.evidence_input:
dot.node(evidence.__name__, shape='box')
dot.edge(evidence.__name__, job.NAME)
for evidence in job.evidence_output:
dot.node(evidence.__name__, shape='box')
dot.edge(job.NAME, evidence.__name__)
return dot
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create Turbinia evidence graph.')
parser.add_argument(
'-f', '--format', default='png',
help='The format of the output file you wish to generate. Specify '
'"list" to list out the available output types. More info is here: '
'http://www.graphviz.org/doc/info/output.html')
parser.add_argument(
'-e', '--engine', default='dot',
help='The graphviz engine used to generate the graph layout. Specify '
'"list" to list out the available engines.')
parser.add_argument('filename', type=unicode, help='where to save the file')
args = parser.parse_args()
if args.format == 'list':
formats = ' '.join(graphviz.FORMATS)
print('Available format types: {0:s}'.format(formats))
sys.exit(0)
if args.format not in graphviz.FORMATS:
print('Format type {0:s} is not supported'.format(args.format))
sys.exit(1)
if args.engine == 'list':
engines = ' '.join(graphviz.ENGINES)
print('Available graph layout engines: {0:s}'.format(engines))
sys.exit(0)
if args.engine not in graphviz.ENGINES:
print('Layout engine type {0:s} is not supported'.format(args.engine))
sys.exit(1)
graph = create_graph()
graph.engine = args.engine
output_file = args.filename.replace('.png', '')
try:
rendered_graph = graph.render(
filename=output_file, format=args.format, cleanup=True)
print('Graph generated and saved to: {0}'.format(rendered_graph))
except graphviz.ExecutableNotFound:
print('Graphviz is not installed - Run: apt-get install graphviz')
|
examples/fci/10-spin.py
|
robert-anderson/pyscf
| 501 |
95789
|
<reponame>robert-anderson/pyscf
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Assign spin state for FCI wavefunction.
By default, the FCI solver will take Mole attribute spin for the spin state.
It can be overwritten by passing kwarg ``nelec`` to the kernel function of FCI
solver. The nelec argument is a two-element tuple. The first is the number
of alpha electrons; the second is the number of beta electrons.
If spin-contamination is observed on FCI wavefunction, we can use the
decoration function :func:`fci.addons.fix_spin_` to level shift the energy of
states which do not have the target spin.
'''
import numpy
from pyscf import gto, scf, fci
mol = gto.M(atom='Ne 0 0 0', basis='631g', spin=2)
m = scf.RHF(mol)
m.kernel()
norb = m.mo_energy.size
fs = fci.FCI(mol, m.mo_coeff)
e, c = fs.kernel()
print('E = %.12f 2S+1 = %.7f' %
(e, fs.spin_square(c, norb, (6,4))[1]))
e, c = fs.kernel(nelec=(5,5))
print('E = %.12f 2S+1 = %.7f' %
(e, fs.spin_square(c, norb, (5,5))[1]))
fs = fci.addons.fix_spin_(fci.FCI(mol, m.mo_coeff), shift=.5)
e, c = fs.kernel()
print('E = %.12f 2S+1 = %.7f' %
(e, fs.spin_square(c, norb, (6,4))[1]))
#
# Example 2: Oxygen molecule singlet state
#
nelec = (8,8)
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
symmetry=1, verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff)
mci.wfnsym = 'A1g'
mci = fci.addons.fix_spin_(mci, ss=0)
# Use keyword argument nelec to explicitly control the spin. Otherwise
# mol.spin is applied.
e, civec = mci.kernel(nelec=nelec)
print('A1g singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
mci.wfnsym = 'A2g'
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('A2g singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
mol = gto.M(atom='O 0 0 0; O 0 0 1.2', spin=2, basis='sto3g',
verbose=0)
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff)
mci = fci.addons.fix_spin_(mci, ss=0)
e, civec = mci.kernel(nelec=nelec)
print('Singlet E = %.12f 2S+1 = %.7f' %
(e, mci.spin_square(civec, mf.mo_coeff.shape[1], nelec)[1]))
#
# Example 3: Triplet and Sz=0
#
# It's common that the default initial guess has no overlap with the ground
# state. In this example, the default initial guess is singlet. Computing
# multiple roots to overcome the initial guess issue.
#
mol = gto.M(verbose=0,
atom = '''
H 1 -1. 0
H 0 -1. -1
H 0 -0.5 -0
H 0 -0. -1
H 1 -0.5 0
H 0 1. 1''',
basis='sto-3g')
mf = scf.RHF(mol).run()
mci = fci.FCI(mol, mf.mo_coeff, singlet=False)
mci = fci.addons.fix_spin_(mci, ss=2)
e, civec = mci.kernel(nroots=2)
nelec = (3,3)
print('Triplet E = %.12f 2S+1 = %.7f' %
(e[0], mci.spin_square(civec[0], mf.mo_coeff.shape[1], nelec)[1]))
#
# Be careful with the trick of energy penalty. Numerical problems may be
# observed when function fix_spin_ was applied.
#
mol = gto.M(atom='O 0 0 0', basis='6-31G')
m = scf.RHF(mol).run()
norb = m.mo_coeff.shape[1]
nelec = mol.nelec
fs = fci.addons.fix_spin_(fci.FCI(mol, m.mo_coeff), .5)
fs.nroots = 15
e, fcivec = fs.kernel(verbose=5)
# The first 5 states should be degenerated. The degeneracy may be broken.
for i, c in enumerate(fcivec):
print('state = %d, E = %.9f, S^2=%.4f' %
(i, e[i], fci.spin_op.spin_square(c, norb, nelec)[0]))
|
django_linter/transformers/models.py
|
enefeq/django_linter
| 101 |
95795
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from astroid import MANAGER, Class, Instance, Function, Arguments, Pass
def transform_model_class(cls):
if cls.is_subtype_of('django.db.models.base.Model'):
core_exceptions = MANAGER.ast_from_module_name('django.core.exceptions')
# add DoesNotExist exception
DoesNotExist = Class('DoesNotExist', None)
DoesNotExist.bases = core_exceptions.lookup('ObjectDoesNotExist')[1]
cls.locals['DoesNotExist'] = [DoesNotExist]
# add MultipleObjectsReturned exception
MultipleObjectsReturned = Class('MultipleObjectsReturned', None)
MultipleObjectsReturned.bases = core_exceptions.lookup(
'MultipleObjectsReturned')[1]
cls.locals['MultipleObjectsReturned'] = [MultipleObjectsReturned]
# add objects manager
if 'objects' not in cls.locals:
try:
Manager = MANAGER.ast_from_module_name(
'django.db.models.manager').lookup('Manager')[1][0]
QuerySet = MANAGER.ast_from_module_name(
'django.db.models.query').lookup('QuerySet')[1][0]
except IndexError:
pass
else:
if isinstance(Manager.body[0], Pass):
# for django >= 1.7
for func_name, func_list in QuerySet.locals.items():
if (not func_name.startswith('_') and
func_name not in Manager.locals):
func = func_list[0]
if (isinstance(func, Function) and
'queryset_only' not in func.instance_attrs):
f = Function(func_name, None)
f.args = Arguments()
Manager.locals[func_name] = [f]
cls.locals['objects'] = [Instance(Manager)]
# add id field
if 'id' not in cls.locals:
try:
AutoField = MANAGER.ast_from_module_name(
'django.db.models.fields').lookup('AutoField')[1][0]
except IndexError:
pass
else:
cls.locals['id'] = [Instance(AutoField)]
|
4_Classification/Classification_Comparison/classification_Comparison.py
|
labaran1/100DaysofMLCode
| 236 |
95804
|
<reponame>labaran1/100DaysofMLCode<filename>4_Classification/Classification_Comparison/classification_Comparison.py
# Classification Comparison
# Importing the libraries
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
def main():
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
# Convert gender into numeric
labelEncoder_gender = LabelEncoder()
dataset.iloc[:, 1] = labelEncoder_gender.fit_transform(dataset.iloc[:, 1])
# creating dependent and independent variables
X = dataset.iloc[:, 1:4].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#### Fitting different classifiers to the Training set ###
###########################################################
classifiers = {
"Decision Tree": DecisionTreeClassifier(criterion='entropy', random_state=0),
"K Nearest Neighbors": KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2),
"Kernel SVM": SVC(kernel='rbf', random_state=0),
"Logistic Regression": LogisticRegression(random_state=0, solver='lbfgs'),
"Naive Bayes": GaussianNB(),
"Random Forest": RandomForestClassifier(n_estimators=10, criterion='entropy', random_state=0),
"Support Vector Machine": SVC(kernel='linear', random_state=0)
}
adict = {}
# Fitting classifiers to the Training set
for key in classifiers:
classifiers[key].fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifiers[key].predict(X_test)
# Making the Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
TP = cm[1, 1]
TN = cm[0, 0]
FP = cm[0, 1]
FN = cm[1, 0]
accuracy = accuracy_score(y_test, y_pred)
error = 1 - accuracy
Sensitivity = recall_score(y_test, y_pred)
Specificity = TN / (TN + FP)
False_Positive_Rate = 1 - Specificity
Precision = precision_score(y_test, y_pred)
adict[key] = [accuracy, round(Precision, 3), round(Sensitivity, 3), round(Specificity, 3), round(error, 4),
round(False_Positive_Rate, 3)]
print(pd.DataFrame.from_dict(adict, orient='index',
columns=['Accuracy', 'Precision', 'Sensitivity', 'Specificity', 'Error',
'False_Positive_Rate']))
if __name__ == '__main__':
main()
|
Data Structure/Matrix/Addition of Two Matrices/SolutionByRiya.py
|
rajethanm4/Programmers-Community
| 261 |
95830
|
<filename>Data Structure/Matrix/Addition of Two Matrices/SolutionByRiya.py
rows= int(input("Enter the number of rows: "))
cols= int(input("Enter the number of columns: "))
matrixA=[]
print("Enter the entries rowwise for matrix A: ")
for i in range(rows):
a=[]
for j in range(cols):
a.append(int(input()))
matrixA.append(a)
matrixB=[]
print("Enter the entries rowwise for matrix B: ")
for i in range(rows):
b=[]
for j in range(cols):
b.append(int(input()))
matrixB.append(b)
matrixResultant=[[ 0 for i in range(rows) ] for j in range(cols)]
for i in range(rows):
for j in range(cols):
matrixResultant[i][j]=matrixA[i][j]+matrixB[i][j]
for r in matrixResultant:
print (r)
|
pingo/examples/pushbutton_led/pushbutton_led.py
|
pingo-io/pingo-py
| 116 |
95904
|
<gh_stars>100-1000
"""Pushbutton led.
The led comes on when you press the button.
Connections example found on ./button.png
"""
# -*- coding: utf-8 -*-
import pingo
import sys
try:
print("Loading board...")
board = pingo.detect.get_board()
print("Its ok...")
except Exception as e:
print("Error on get_board: {}".format(e))
sys.exit(1)
led_pin = board.pins[13]
led_pin.mode = pingo.OUT
button_pin = board.pins[5]
button_pin.mode = pingo.IN
while True:
if button_pin.state == pingo.HIGH:
led_pin.hi()
else:
led_pin.lo()
|
examples/datasets/plot_vertex.py
|
mvdoc/pycortex
| 423 |
95909
|
"""
================
Plot Vertex Data
================
This plots example vertex data onto an example subject, S1, onto a flatmap
using quickflat. In order for this to run, you have to have a flatmap for
this subject in the pycortex filestore.
The cortex.Vertex object is instantiated with a numpy array of the same size
as the total number of vertices in that subject's flatmap. Each pixel is
colored according to the value given for the nearest vertex in the flatmap.
Instead of the random test data, you can replace this with any array that is
the length of all of the vertices in the subject.
Additionally, if you create a Vertex object using only the number of vertices
that exists in the left hemisphere of the brain, the right hemisphere is
filled in with zeros.
"""
import cortex
import cortex.polyutils
import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
subject = 'S1'
# In order to get the number of vertices in this subject's cortical surface
# we have to load in their surfaces and get the number of points in each
surfs = [cortex.polyutils.Surface(*d)
for d in cortex.db.get_surf(subject, "fiducial")]
# This is the total number of vertices in both hemispheres combined
num_verts = surfs[0].pts.shape[0] + surfs[1].pts.shape[0]
# Creating a random dataset with one entry for each vertex
test_data = np.random.randn(num_verts)
# This creates a Vertex object for our subject and test dataset
vertex_data = cortex.Vertex(test_data, subject)
# And now we can display it on a flatmap
cortex.quickshow(vertex_data)
plt.show()
# We can also plot just the left hemisphere data
numl = surfs[0].pts.shape[0]
# This creates a Vertex object with an array only as long as the number of
# vertices in the left hemisphere, and the right hemisphere will be filled
# in with zeros
vertex_data_left = cortex.Vertex(test_data[:numl], subject)
cortex.quickshow(vertex_data_left)
plt.show()
|
Training Utility/somatictrainer/gestures.py
|
ZackFreedman/Somatic
| 328 |
95925
|
<reponame>ZackFreedman/Somatic<gh_stars>100-1000
import logging
import os
import time
from typing import List
import numpy as np
import uuid
import pickle
import bisect
standard_gesture_length = 50 # Length of (yaw, pitch) coordinates per gesture to be fed into ML algorithm
_log_level = logging.DEBUG
class Gesture:
logger = logging.getLogger(__name__)
logger.setLevel(_log_level)
def __init__(self, glyph, bearings, raw_data, gesture_uuid=None):
"""
:param bearings: Standardized ordered list of (yaw/pitch) coordinates
:type bearings: np.array
:param raw_data: Raw data collected during training,
in case we make a processing boo-boo and need to retroactively fix things
:type raw_data: list
:param glyph: Which letter or opcode this gesture represents
:type glyph: str
:param gesture_uuid: A unique identifier used to tie the gesture to UI elements
:type gesture_uuid: uuid.UUID
"""
if bearings.shape != (50, 2):
raise AttributeError('Data invalid - got {} orientations instead of {}'
.format(len(bearings), standard_gesture_length))
self.bearings = bearings
self.raw_data = raw_data
self.glyph = glyph
if gesture_uuid is not None:
self.uuid = gesture_uuid
else:
self.uuid = uuid.uuid4()
def to_dict(self):
datastore = {
'g': self.glyph,
'b': self.bearings.tolist(),
'r': self.raw_data,
'id': str(self.uuid)
}
return datastore
@staticmethod
def from_dict(datastore):
try:
if 'id' in datastore:
gesture_uuid = uuid.UUID(datastore['id'])
else:
gesture_uuid = None
glyph = datastore['g']
bearings = np.array(datastore['b'])
assert len(bearings) == standard_gesture_length
raw_data = datastore['r']
return Gesture(glyph, bearings, raw_data, gesture_uuid)
except (AssertionError, AttributeError, KeyError):
Gesture.logger.exception('Gesture class: Error parsing dict {}...'.format(str(datastore)[:20]))
return None
class GestureTrainingSet:
examples: List[Gesture]
big_ole_list_o_glyphs = '\x08\n !"#$\'+,-./0123456789?@ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
short_glyphs = '., \x08l-/'
current_version = 3 # For deleting old saves
logger = logging.getLogger(__name__)
logger.setLevel(_log_level)
def __init__(self):
self.target_examples_per_glyph = 100
self.examples = []
self.glyphs_represented = []
# self.unidentified_examples = []
@staticmethod
def load(pathspec):
with open(pathspec, 'rb') as f:
output = GestureTrainingSet()
output.examples = pickle.load(f)
output.glyphs_represented = np.unique([x.glyph for x in output.examples])
GestureTrainingSet.logger.debug('GestureTrainingSet class: Loaded {}'.format(output))
return output
def save(self, pathspec):
t = time.perf_counter()
GestureTrainingSet.logger.debug('Generating save dict took {}'.format(time.perf_counter() - t))
t = time.perf_counter()
# Save unidentified samples here?
with open(pathspec + '.tmp', 'wb') as f:
pickle.dump(self.examples, f)
GestureTrainingSet.logger.debug('Saving took {}'.format(time.perf_counter() - t))
if os.path.exists(pathspec):
os.remove(pathspec)
os.rename(pathspec + '.tmp', pathspec)
def add(self, example: Gesture):
self.examples.append(example)
if example.glyph not in self.glyphs_represented:
bisect.insort(self.glyphs_represented, example.glyph)
def get_examples_for(self, glyph):
return [example for example in self.examples if example.glyph == glyph]
def count(self, glyph):
return len(self.get_examples_for(glyph))
def summarize(self):
return {glyph: self.count(glyph) for glyph in self.big_ole_list_o_glyphs}
def remove(self, example_or_uuid):
if type(example_or_uuid) is Gesture:
example = example_or_uuid
if example in self.examples:
self.examples.remove(example)
if not self.count(example.glyph):
self.glyphs_represented.remove(example.glyph)
elif type(example_or_uuid) is uuid.UUID:
gesture_uuid = example_or_uuid
for example in self.examples:
if gesture_uuid == example.uuid:
self.examples.remove(example)
if not self.count(example.glyph):
self.glyphs_represented.remove(example.glyph)
break
def move(self, example, new_glyph):
if example in self.examples:
example.glyph = new_glyph
def remove_at(self, glyph, index):
count = self.count(glyph)
if index < count:
self.examples.remove(self.get_examples_for(glyph)[index])
def get_character_map(self, type='decoding'):
if type is 'decoding':
return {i: self.glyphs_represented[i] for i in range(len(self.glyphs_represented))}
elif type is 'encoding':
return {self.glyphs_represented[i]: i for i in range(len(self.glyphs_represented))}
else:
raise AttributeError('Char map type must be "encoding" or "decoding"')
def to_training_set(self):
char_map = self.get_character_map(type='encoding')
data = []
labels = []
for example in self.examples:
data.append(example.bearings)
labels.append(char_map[example.glyph])
return np.array(data), np.array(labels)
|
agents/ppo2_agent.py
|
christopherhesse/retro-baselines
| 134 |
95943
|
#!/usr/bin/env python
"""
Train an agent on Sonic using PPO2 from OpenAI Baselines.
"""
import tensorflow as tf
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
import baselines.ppo2.ppo2 as ppo2
import baselines.ppo2.policies as policies
import gym_remote.exceptions as gre
from sonic_util import make_env
def main():
"""Run PPO until the environment throws an exception."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config):
# Take more timesteps than we need to be sure that
# we stop due to an exception.
ppo2.learn(policy=policies.CnnPolicy,
env=DummyVecEnv([make_env]),
nsteps=4096,
nminibatches=8,
lam=0.95,
gamma=0.99,
noptepochs=3,
log_interval=1,
ent_coef=0.01,
lr=lambda _: 2e-4,
cliprange=lambda _: 0.1,
total_timesteps=int(1e7))
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
|
analysis_engine/load_algo_dataset_from_file.py
|
virdesai/stock-analysis-engine
| 819 |
95949
|
"""
Helper for loading datasets from a file
**Supported environment variables**
::
# to show debug, trace logging please export ``SHARED_LOG_CFG``
# to a debug logger json file. To turn on debugging for this
# library, you can export this variable to the repo's
# included file with the command:
export SHARED_LOG_CFG=/opt/sa/analysis_engine/log/debug-logging.json
"""
import analysis_engine.consts as ae_consts
import analysis_engine.prepare_dict_for_algo as prepare_utils
import spylunking.log.setup_logging as log_utils
log = log_utils.build_colorized_logger(name=__name__)
def load_algo_dataset_from_file(
path_to_file,
serialize_datasets=ae_consts.DEFAULT_SERIALIZED_DATASETS,
compress=True,
encoding='utf-8'):
"""load_algo_dataset_from_file
Load an algorithm-ready dataset for algorithm backtesting
from a local file
:param path_to_file: string - path to file holding an
algorithm-ready dataset
:param serialize_datasets: optional - list of dataset names to
deserialize in the dataset
:param compress: optional - boolean flag for decompressing
the contents of the ``path_to_file`` if necessary
(default is ``True`` and algorithms
use ``zlib`` for compression)
:param encoding: optional - string for data encoding
"""
log.info(
f'start: {path_to_file}')
data_from_file = None
file_args = 'rb'
if not compress:
file_args = 'r'
with open(path_to_file, file_args) as cur_file:
data_from_file = cur_file.read()
if not data_from_file:
log.error(f'missing data from file={path_to_file}')
return None
return prepare_utils.prepare_dict_for_algo(
data=data_from_file,
compress=compress,
convert_to_dict=True,
encoding=encoding)
# end of load_algo_dataset_from_file
|
statsforecast/core.py
|
Nixtla/statsforecast
| 483 |
95973
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/core.ipynb (unless otherwise specified).
__all__ = ['StatsForecast']
# Cell
import inspect
import logging
from functools import partial
from os import cpu_count
import numpy as np
import pandas as pd
# Internal Cell
logging.basicConfig(
format='%(asctime)s %(name)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
# Internal Cell
class GroupedArray:
def __init__(self, data, indptr):
self.data = data
self.indptr = indptr
self.n_groups = self.indptr.size - 1
def __getitem__(self, idx):
if isinstance(idx, int):
return self.data[self.indptr[idx] : self.indptr[idx + 1]]
elif isinstance(idx, slice):
idx = slice(idx.start, idx.stop + 1, idx.step)
new_indptr = self.indptr[idx].copy()
new_data = self.data[new_indptr[0] : new_indptr[-1]].copy()
new_indptr -= new_indptr[0]
return GroupedArray(new_data, new_indptr)
raise ValueError(f'idx must be either int or slice, got {type(idx)}')
def __len__(self):
return self.n_groups
def __repr__(self):
return f'GroupedArray(n_data={self.data.size:,}, n_groups={self.n_groups:,})'
def __eq__(self, other):
if not hasattr(other, 'data') or not hasattr(other, 'indptr'):
return False
return np.allclose(self.data, other.data) and np.array_equal(self.indptr, other.indptr)
def compute_forecasts(self, h, func, xreg=None, level=None, *args):
has_level = 'level' in inspect.signature(func).parameters and level is not None
if has_level:
out = np.full((h * self.n_groups, 2 * len(level) + 1), np.nan, dtype=np.float32)
func = partial(func, level=level)
else:
out = np.full(h * self.n_groups, np.nan, dtype=np.float32)
xr = None
keys = None
for i, grp in enumerate(self):
if xreg is not None:
xr = xreg[i]
res = func(grp, h, xr, *args)
if has_level:
if keys is None:
keys = list(res.keys())
for j, key in enumerate(keys):
out[h * i : h * (i + 1), j] = res[key]
else:
out[h * i : h * (i + 1)] = res
return out, keys
def split(self, n_chunks):
return [self[x[0] : x[-1] + 1] for x in np.array_split(range(self.n_groups), n_chunks) if x.size]
# Internal Cell
def _grouped_array_from_df(df):
df = df.set_index('ds', append=True)
if not df.index.is_monotonic_increasing:
df = df.sort_index()
data = df.values.astype(np.float32)
indices_sizes = df.index.get_level_values('unique_id').value_counts(sort=False)
indices = indices_sizes.index
sizes = indices_sizes.values
cum_sizes = sizes.cumsum()
dates = df.index.get_level_values('ds')[cum_sizes - 1]
indptr = np.append(0, cum_sizes).astype(np.int32)
return GroupedArray(data, indptr), indices, dates
# Internal Cell
def _build_forecast_name(model, *args) -> str:
model_name = f'{model.__name__}'
func_params = inspect.signature(model).parameters
func_args = list(func_params.items())[3:] # remove input array, horizon and xreg
changed_params = [
f'{name}-{value}'
for value, (name, arg) in zip(args, func_args)
if arg.default != value
]
if changed_params:
model_name += '_' + '_'.join(changed_params)
return model_name
# Internal Cell
def _as_tuple(x):
if isinstance(x, tuple):
return x
return (x,)
# Internal Cell
def _get_n_jobs(n_groups, n_jobs, ray_address):
if ray_address is not None:
logger.info(
'Using ray address,'
'using available resources insted of `n_jobs`'
)
try:
import ray
except ModuleNotFoundError as e:
msg = (
'{e}. To use a ray cluster you have to install '
'ray. Please run `pip install ray`. '
)
raise ModuleNotFoundError(msg) from e
if not ray.is_initialized():
ray.init(ray_address, ignore_reinit_error=True)
actual_n_jobs = int(ray.available_resources()['CPU'])
else:
if n_jobs == -1 or (n_jobs is None):
actual_n_jobs = cpu_count()
else:
actual_n_jobs = n_jobs
return min(n_groups, actual_n_jobs)
# Cell
class StatsForecast:
def __init__(self, df, models, freq, n_jobs=1, ray_address=None):
self.ga, self.uids, self.last_dates = _grouped_array_from_df(df)
self.models = models
self.freq = pd.tseries.frequencies.to_offset(freq)
self.n_jobs = _get_n_jobs(len(self.ga), n_jobs, ray_address)
self.ray_address = ray_address
def forecast(self, h, xreg=None, level=None):
if xreg is not None:
expected_shape = (h * len(self.ga), self.ga.data.shape[1])
if xreg.shape != expected_shape:
raise ValueError(f'Expected xreg to have shape {expected_shape}, but got {xreg.shape}')
xreg, _, _ = _grouped_array_from_df(xreg)
if self.n_jobs == 1:
fcsts = self._sequential_forecast(h, xreg, level)
else:
fcsts = self._data_parallel_forecast(h, xreg, level)
if issubclass(self.last_dates.dtype.type, np.integer):
last_date_f = lambda x: np.arange(x + 1, x + 1 + h, dtype=self.last_dates.dtype)
else:
last_date_f = lambda x: pd.date_range(x + self.freq, periods=h, freq=self.freq)
if len(np.unique(self.last_dates)) == 1:
dates = np.tile(last_date_f(self.last_dates[0]), len(self.ga))
else:
dates = np.hstack([
last_date_f(last_date)
for last_date in self.last_dates
])
idx = pd.Index(np.repeat(self.uids, h), name='unique_id')
return pd.DataFrame({'ds': dates, **fcsts}, index=idx)
def _sequential_forecast(self, h, xreg, level):
fcsts = {}
logger.info('Computing forecasts')
for model_args in self.models:
model, *args = _as_tuple(model_args)
model_name = _build_forecast_name(model, *args)
values, keys = self.ga.compute_forecasts(h, model, xreg, level, *args)
if keys is not None:
for j, key in enumerate(keys):
fcsts[f'{model_name}_{key}'] = values[:, j]
else:
fcsts[model_name] = values
logger.info(f'Computed forecasts for {model_name}.')
return fcsts
def _data_parallel_forecast(self, h, xreg, level):
fcsts = {}
logger.info('Computing forecasts')
gas = self.ga.split(self.n_jobs)
if xreg is not None:
xregs = xreg.split(self.n_jobs)
else:
from itertools import repeat
xregs = repeat(None)
if self.ray_address is not None:
try:
from ray.util.multiprocessing import Pool
except ModuleNotFoundError as e:
msg = (
f'{e}. To use a ray cluster you have to install '
'ray. Please run `pip install ray`. '
)
raise ModuleNotFoundError(msg) from e
kwargs = dict(ray_address=self.ray_address)
else:
from multiprocessing import Pool
kwargs = dict()
with Pool(self.n_jobs, **kwargs) as executor:
for model_args in self.models:
model, *args = _as_tuple(model_args)
model_name = _build_forecast_name(model, *args)
futures = []
for ga, xr in zip(gas, xregs):
future = executor.apply_async(ga.compute_forecasts, (h, model, xr, level, *args,))
futures.append(future)
values, keys = list(zip(*[f.get() for f in futures]))
keys = keys[0]
if keys is not None:
values = np.vstack(values)
for j, key in enumerate(keys):
fcsts[f'{model_name}_{key}'] = values[:, j]
else:
values = np.hstack(values)
fcsts[model_name] = values
logger.info(f'Computed forecasts for {model_name}.')
return fcsts
|
src/pymap3d/mathfun.py
|
scivision/pymap3d
| 108 |
95979
|
"""
import from Numpy, and if not available fallback to math stdlib
"""
try:
from numpy import (
sin,
cos,
sqrt,
exp,
log,
inf,
isnan,
radians,
tan,
arctan as atan,
hypot,
degrees,
arctan2 as atan2,
arcsin as asin,
arcsinh as asinh,
arctanh as atanh,
power,
)
except ImportError:
from math import sin, cos, sqrt, exp, log, inf, isnan, radians, tan, atan, hypot, degrees, atan2, asin, asinh, atanh # type: ignore
def power(x, y): # type: ignore
return pow(x, y)
|
recipes/Python/577867_Make_Class_Available_its_Own/recipe-577867.py
|
tdiprima/code
| 2,023 |
95989
|
class InwardMeta(type):
@classmethod
def __prepare__(meta, name, bases, **kwargs):
cls = super().__new__(meta, name, bases, {})
return {"__newclass__": cls}
def __new__(meta, name, bases, namespace):
cls = namespace["__newclass__"]
del namespace["__newclass__"]
for name in namespace:
setattr(cls, name, namespace[name])
return cls
|
esphome/helpers.py
|
OttoWinter/esphomeyaml
| 249 |
96001
|
import codecs
from contextlib import suppress
import logging
import os
from pathlib import Path
from typing import Union
import tempfile
_LOGGER = logging.getLogger(__name__)
def ensure_unique_string(preferred_string, current_strings):
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
def indent_all_but_first_and_last(text, padding=" "):
lines = text.splitlines(True)
if len(lines) <= 2:
return text
return lines[0] + "".join(padding + line for line in lines[1:-1]) + lines[-1]
def indent_list(text, padding=" "):
return [padding + line for line in text.splitlines()]
def indent(text, padding=" "):
return "\n".join(indent_list(text, padding))
# From https://stackoverflow.com/a/14945195/8924614
def cpp_string_escape(string, encoding="utf-8"):
def _should_escape(byte): # type: (int) -> bool
if not 32 <= byte < 127:
return True
if byte in (ord("\\"), ord('"')):
return True
return False
if isinstance(string, str):
string = string.encode(encoding)
result = ""
for character in string:
if _should_escape(character):
result += f"\\{character:03o}"
else:
result += chr(character)
return f'"{result}"'
def run_system_command(*args):
import subprocess
with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
stdout, stderr = p.communicate()
rc = p.returncode
return rc, stdout, stderr
def mkdir_p(path):
if not path:
# Empty path - means create current dir
return
try:
os.makedirs(path)
except OSError as err:
import errno
if err.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
from esphome.core import EsphomeError
raise EsphomeError(f"Error creating directories {path}: {err}") from err
def is_ip_address(host):
parts = host.split(".")
if len(parts) != 4:
return False
try:
for p in parts:
int(p)
return True
except ValueError:
return False
def _resolve_with_zeroconf(host):
from esphome.core import EsphomeError
from esphome.zeroconf import EsphomeZeroconf
try:
zc = EsphomeZeroconf()
except Exception as err:
raise EsphomeError(
"Cannot start mDNS sockets, is this a docker container without "
"host network mode?"
) from err
try:
info = zc.resolve_host(f"{host}.")
except Exception as err:
raise EsphomeError(f"Error resolving mDNS hostname: {err}") from err
finally:
zc.close()
if info is None:
raise EsphomeError(
"Error resolving address with mDNS: Did not respond. "
"Maybe the device is offline."
)
return info
def resolve_ip_address(host):
from esphome.core import EsphomeError
import socket
errs = []
if host.endswith(".local"):
try:
return _resolve_with_zeroconf(host)
except EsphomeError as err:
errs.append(str(err))
try:
return socket.gethostbyname(host)
except OSError as err:
errs.append(str(err))
raise EsphomeError(f"Error resolving IP address: {', '.join(errs)}") from err
def get_bool_env(var, default=False):
return bool(os.getenv(var, default))
def is_ha_addon():
return get_bool_env("ESPHOME_IS_HA_ADDON")
def walk_files(path):
for root, _, files in os.walk(path):
for name in files:
yield os.path.join(root, name)
def read_file(path):
try:
with codecs.open(path, "r", encoding="utf-8") as f_handle:
return f_handle.read()
except OSError as err:
from esphome.core import EsphomeError
raise EsphomeError(f"Error reading file {path}: {err}") from err
except UnicodeDecodeError as err:
from esphome.core import EsphomeError
raise EsphomeError(f"Error reading file {path}: {err}") from err
def _write_file(path: Union[Path, str], text: Union[str, bytes]):
"""Atomically writes `text` to the given path.
Automatically creates all parent directories.
"""
if not isinstance(path, Path):
path = Path(path)
data = text
if isinstance(text, str):
data = text.encode()
directory = path.parent
directory.mkdir(exist_ok=True, parents=True)
tmp_path = None
try:
with tempfile.NamedTemporaryFile(
mode="wb", dir=directory, delete=False
) as f_handle:
tmp_path = f_handle.name
f_handle.write(data)
# Newer tempfile implementations create the file with mode 0o600
os.chmod(tmp_path, 0o644)
# If destination exists, will be overwritten
os.replace(tmp_path, path)
finally:
if tmp_path is not None and os.path.exists(tmp_path):
try:
os.remove(tmp_path)
except OSError as err:
_LOGGER.error("Write file cleanup failed: %s", err)
def write_file(path: Union[Path, str], text: str):
try:
_write_file(path, text)
except OSError as err:
from esphome.core import EsphomeError
raise EsphomeError(f"Could not write file at {path}") from err
def write_file_if_changed(path: Union[Path, str], text: str) -> bool:
"""Write text to the given path, but not if the contents match already.
Returns true if the file was changed.
"""
if not isinstance(path, Path):
path = Path(path)
src_content = None
if path.is_file():
src_content = read_file(path)
if src_content == text:
return False
write_file(path, text)
return True
def copy_file_if_changed(src: os.PathLike, dst: os.PathLike) -> None:
import shutil
if file_compare(src, dst):
return
mkdir_p(os.path.dirname(dst))
try:
shutil.copyfile(src, dst)
except OSError as err:
if isinstance(err, PermissionError):
# Older esphome versions copied over the src file permissions too.
# So when the dst file had 444 permissions, the dst file would have those
# too and subsequent writes would fail
# -> delete file (it would be overwritten anyway), and try again
# if that fails, use normal error handler
with suppress(OSError):
os.unlink(dst)
shutil.copyfile(src, dst)
return
from esphome.core import EsphomeError
raise EsphomeError(f"Error copying file {src} to {dst}: {err}") from err
def list_starts_with(list_, sub):
return len(sub) <= len(list_) and all(list_[i] == x for i, x in enumerate(sub))
def file_compare(path1: os.PathLike, path2: os.PathLike) -> bool:
"""Return True if the files path1 and path2 have the same contents."""
import stat
try:
stat1, stat2 = os.stat(path1), os.stat(path2)
except OSError:
# File doesn't exist or another error -> not equal
return False
if (
stat.S_IFMT(stat1.st_mode) != stat.S_IFREG
or stat.S_IFMT(stat2.st_mode) != stat.S_IFREG
):
# At least one of them is not a regular file (or does not exist)
return False
if stat1.st_size != stat2.st_size:
# Different sizes
return False
bufsize = 8 * 1024
# Read files in blocks until a mismatch is found
with open(path1, "rb") as fh1, open(path2, "rb") as fh2:
while True:
blob1, blob2 = fh1.read(bufsize), fh2.read(bufsize)
if blob1 != blob2:
# Different content
return False
if not blob1:
# Reached end
return True
# A dict of types that need to be converted to heaptypes before a class can be added
# to the object
_TYPE_OVERLOADS = {
int: type("EInt", (int,), {}),
float: type("EFloat", (float,), {}),
str: type("EStr", (str,), {}),
dict: type("EDict", (str,), {}),
list: type("EList", (list,), {}),
}
# cache created classes here
_CLASS_LOOKUP = {}
def add_class_to_obj(value, cls):
"""Add a class to a python type.
This function modifies value so that it has cls as a basetype.
The value itself may be modified by this action! You must use the return
value of this function however, since some types need to be copied first (heaptypes).
"""
if isinstance(value, cls):
# If already is instance, do not add
return value
try:
orig_cls = value.__class__
key = (orig_cls, cls)
new_cls = _CLASS_LOOKUP.get(key)
if new_cls is None:
new_cls = orig_cls.__class__(orig_cls.__name__, (orig_cls, cls), {})
_CLASS_LOOKUP[key] = new_cls
value.__class__ = new_cls
return value
except TypeError:
# Non heap type, look in overloads dict
for type_, func in _TYPE_OVERLOADS.items():
# Use type() here, we only need to trigger if it's the exact type,
# as otherwise we don't need to overload the class
if type(value) is type_: # pylint: disable=unidiomatic-typecheck
return add_class_to_obj(func(value), cls)
raise
|
PhysicsTools/JetExamples/test/printJetFlavour.py
|
ckamtsikis/cmssw
| 852 |
96002
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
process = cms.Process("testJET")
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(5)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# /TTJets_MassiveBinDECAY_TuneZ2star_8TeV-madgraph-tauola/Summer12_DR53X-PU_S10_START53_V7C-v1/AODSIM
'/store/mc/Summer12_DR53X/TTJets_MassiveBinDECAY_TuneZ2star_8TeV-madgraph-tauola/AODSIM/PU_S10_START53_V7C-v1/00000/008BD264-1526-E211-897A-00266CFFA7BC.root'
)
)
process.printList = cms.EDAnalyzer("ParticleListDrawer",
src = cms.InputTag("genParticles"),
maxEventsToPrint = cms.untracked.int32(1)
)
process.myPartons = cms.EDProducer("PartonSelector",
src = cms.InputTag("genParticles"),
withLeptons = cms.bool(False)
)
process.flavourByRef = cms.EDProducer("JetPartonMatcher",
jets = cms.InputTag("ak5PFJets"),
coneSizeToAssociate = cms.double(0.3),
partons = cms.InputTag("myPartons")
)
process.flavourByVal = cms.EDProducer("JetFlavourIdentifier",
srcByReference = cms.InputTag("flavourByRef"),
physicsDefinition = cms.bool(False)
)
process.printEvent = cms.EDAnalyzer("printJetFlavour",
srcSelectedPartons = cms.InputTag("myPartons"),
srcByReference = cms.InputTag("flavourByRef"),
srcByValue = cms.InputTag("flavourByVal")
)
process.p = cms.Path(process.printList*process.myPartons*process.flavourByRef*process.flavourByVal*process.printEvent)
#process.MessageLogger.cout = dict(enable = True, threshold = 'ERROR')
|
components/detector.py
|
awesome-archive/FooProxy
| 235 |
96078
|
# coding:utf-8
"""
@author : linkin
@email : <EMAIL>
@date : 2018-10-07
"""
import time
import asyncio
import logging
from components.dbhelper import Database
from config.DBsettings import _DB_SETTINGS
from config.DBsettings import _TABLE
from config.config import DETECT_HIGH_AMOUNT
from config.config import DETECT_LOCAL
from config.config import DETECT_AMOUNT
from config.config import STABLE_MIN_RATE
from config.config import STABLE_MIN_COUNT
from config.config import DELETE_COMBO
logger = logging.getLogger('Detector')
class Detector(object):
"""
本地检测器,主要职责有三:
1. 负责检测本地standby数据库中存入的有效代理IP数据是否有符合高分稳定条件的,
有则存入高分稳定数据库stable数据库
2. 检测standby数据库的同时,如果符合高分条件的代理已经在stable中,则将standby中
该代理的最新数据同步更新到stable数据库中
3. 负责检测stable数据库中的高分稳定代理是否有不符合高分条件的,有则从stable中删除
"""
def __init__(self):
self.standbyDB = Database(_DB_SETTINGS)
self.stableDB = Database(_DB_SETTINGS)
self.standbyDB.table = _TABLE['standby']
self.stableDB.table = _TABLE['stable']
self.standby_data = []
self.stable_data = []
def begin(self):
self.stableDB.connect()
self.standbyDB.connect()
def end(self):
self.standbyDB.close()
self.stableDB.close()
def run(self):
"""
运行本地检测器,利用asyncio提供的异步读写
"""
logger.info('Running Detector.')
self.begin()
loop = asyncio.get_event_loop()
while 1:
try:
self.detect_standby(loop)
self.detect_stable(loop)
time.sleep(DETECT_LOCAL)
except Exception as e:
logger.error('Error class : %s , msg : %s ' % (e.__class__, e))
self.end()
loop.close()
logger.info('Detector shuts down.')
return
def detect_standby(self,loop):
"""
检测standby数据库
:param loop: 异步事件循环
"""
if self.standby_data:
pen = len(self.standby_data)
logger.info('Imported the "standby" database\' data,length: %d ' % pen)
pop_len = pen if pen <= DETECT_AMOUNT else DETECT_AMOUNT
logger.info('Start to detect the local valid data,amount: %d ' % pop_len)
standby_data = [self.standby_data.pop() for i in range(pop_len)]
tasks = [self._detect_standby(i) for i in standby_data]
loop.run_until_complete(asyncio.gather(*tasks))
logger.info('Detection finished.Left standby data length:%d' % len(self.standby_data))
else:
self.standby_data = self.standbyDB.all()
def detect_stable(self,loop):
"""
检测stable数据库
:param loop: 异步事件循环
"""
if self.stable_data:
pen = len(self.stable_data)
logger.info('Imported the "stable" database\' data,length: %d ' % pen)
pop_len = pen if pen <= DETECT_HIGH_AMOUNT else DETECT_HIGH_AMOUNT
logger.info('Start to detect the high scored data,amount: %d ' % pop_len)
stable_data = [self.stable_data.pop() for i in range(pop_len)]
tasks = [self._detect_stable(i) for i in stable_data]
loop.run_until_complete(asyncio.gather(*tasks))
logger.info('Detection finished.Left stable data length:%d' % len(self.stable_data))
else:
self.stable_data = self.stableDB.all()
async def _detect_standby(self,data):
"""
异步协程,对单个standby数据库中的数据文档进行检测
其中的
data['test_count']<STABLE_MIN_COUNT
表示 测试总数小于config中配置的数值
round(float(data['success_rate'].replace('%',''))/100,4)< STABLE_MIN_RATE
表示 成功率小于config中配置的数值
data['combo_fail'] >= DELETE_COMBO
表示 连续失败数 超过或等于config中配置的数值
:param data: standby中的单个数据文档 ,dict类型
"""
del data['_id']
ip = data['ip']
port = data['port']
proxy = ':'.join([ip,port])
if data['test_count']<STABLE_MIN_COUNT or round(float(data['success_rate'].replace('%',''))/100,4)\
< STABLE_MIN_RATE or data['combo_fail'] >= DELETE_COMBO:
return
condition = {'ip':ip,'port':port}
_one_data = self.stableDB.select(condition)
if _one_data:
self.stableDB.update(condition,data)
else:
self.stableDB.save(data)
logger.info('Find a stable proxy: %s , put it into the stable database.' % proxy)
async def _detect_stable(self,data):
"""
异步协程,对单个stable数据库中的数据文档进行检测
其中的
round(float(_one_data['success_rate'].replace('%',''))/100,4)< STABLE_MIN_RATE
表示 成功率小于config中配置的数值
_one_data['combo_fail'] >= DELETE_COMBO
表示 连续失败数 超过或等于config中配置的数值
:param data: stable中的单个数据文档 ,dict类型
"""
ip = data['ip']
port = data['port']
proxy = ':'.join([ip,port])
condition = {'ip':ip,'port':port}
res = self.standbyDB.select(condition)
_one_data = res[0] if res else None
if not bool(_one_data):
self.stableDB.delete(condition)
logger.warning(
'The high scored proxy: %s had been deleted from the standby database.It\'s unavailable.' % proxy)
else:
if round(float(_one_data['success_rate'].replace('%',''))/100,4) < STABLE_MIN_RATE or _one_data['combo_fail'] >= DELETE_COMBO:
self.stableDB.delete(condition)
logger.warning(
'The high scored proxy: %s is not that stable now.It\'s Removed.' % proxy)
else:
del _one_data['_id']
self.stableDB.update(condition,_one_data)
|
reactivated/widgets.py
|
silviogutierrez/reactivated
| 178 |
96081
|
from typing import Any, Dict, Optional, cast
from django import forms
from django.core.exceptions import ValidationError
from django.forms.models import ModelChoiceIterator
class Autocomplete(forms.Select):
template_name = "reactivated/autocomplete"
def get_context(
self, name: str, value: Any, attrs: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
choices = cast(ModelChoiceIterator, self.choices)
assert choices.queryset is not None
assert hasattr(
choices.queryset, "autocomplete"
), "Models marked for autocompletion must implement autocomplete(query: str) at the manager level"
to_field_name = choices.field.to_field_name or "pk"
# context = forms.Widget.get_context(self, name, value, attrs)
# self.choices.queryset = self.choices.queryset._clone()[:10]
# context = super().get_context(name, value, attrs)
context = forms.Widget.get_context(self, name, value, attrs)
try:
selected = choices.field.to_python(value)
except ValidationError:
selected = None
if selected is not None:
context["widget"]["selected"] = {
"value": getattr(selected, to_field_name),
"label": str(selected),
}
else:
context["widget"]["selected"] = None
return context
|
cupcake2/ice2/IceArrowAll2.py
|
ArthurDondi/cDNA_Cupcake
| 205 |
96128
|
<filename>cupcake2/ice2/IceArrowAll2.py
from cupcake2.tofu2.ToFuOptions2 import add_sge_arguments, \
add_tmp_dir_argument, add_cluster_summary_report_arguments, \
add_ice_post_arrow_hq_lq_arguments2, \
add_cluster_root_dir_as_positional_argument, \
add_fofn_arguments
from cupcake2.ice2.IceArrow2 import IceArrow2
from cupcake2.ice2.IceArrowPostProcess2 import IceArrowPostProcess2
from cupcake2.ice2.__init__ import ICE_ARROW_PY
class IceArrowAll2(object):
"""
IceArrowAll2
"""
desc = "After assigning all non-full-length reads to unpolished " + \
"consensus isoforms (e.g., 'run_IcePartials2.py all' is done), " + \
"polish these isoforms by using Arrow for Sequel data " + \
"then output high QV and low QV isoforms."
prog = "%s all " % ICE_ARROW_PY
def __init__(self, root_dir, subread_xml, sge_opts, ipq_opts,
report_fn=None, summary_fn=None, tmp_dir=None, prog_name=None):
prog_name = prog_name if prog_name is not None else "IceArrowAll2"
self.root_dir = root_dir
self.subread_xml = subread_xml
self.report_fn = report_fn
self.summary_fn = summary_fn
self.sge_opts = sge_opts
self.ipq_opts = ipq_opts
self.tmp_dir = tmp_dir
def cmd_str(self):
return self._cmd_str(root_dir=self.root_dir, subread_xml=self.subread_xml,
sge_opts=self.sge_opts,
ipq_opts=self.ipq_opts, report_fn=self.report_fn,
summary_fn=self.summary_fn, tmp_dir=self.tmp_dir)
def _cmd_str(self, root_dir, subread_xml, sge_opts, ipq_opts,
report_fn, summary_fn, tmp_dir):
"""Return a cmd string. ($ICE_ARROW_PY all)."""
cmd = self.prog + \
"{d} ".format(d=root_dir) + \
"--subread_xml={f} ".format(f=subread_xml)
if tmp_dir is not None:
cmd += "--tmp_dir={d} ".format(d=tmp_dir)
if report_fn is not None:
cmd += "--report={f} ".format(f=report_fn)
if summary_fn is not None:
cmd += "--summary={f} ".format(f=summary_fn)
cmd += sge_opts.cmd_str(show_blasr_nproc=True, show_arrow_nproc=True)
cmd += ipq_opts.cmd_str()
return cmd
def run(self):
"""Run"""
iceq = IceArrow2(root_dir=self.root_dir, subread_xml=self.subread_xml,
sge_opts=self.sge_opts,
tmp_dir=self.tmp_dir)
iceq.validate_inputs()
iceq.run()
icepq = IceArrowPostProcess2(root_dir=self.root_dir,
quit_if_not_done=False,
ipq_opts=self.ipq_opts)
icepq.run()
return 0
def add_ice_arrow_all_arguments(parser):
arg_parser = parser #parser.arg_parser.parser
arg_parser = add_cluster_root_dir_as_positional_argument(arg_parser)
arg_parser = add_fofn_arguments(arg_parser, subread_xml=True)
arg_parser = add_cluster_summary_report_arguments(arg_parser)
arg_parser = add_ice_post_arrow_hq_lq_arguments2(arg_parser)
arg_parser = add_sge_arguments(arg_parser, blasr_nproc=True, arrow_nproc=True)
arg_parser = add_tmp_dir_argument(arg_parser)
return arg_parser
|
examples/image/cath/datasets/to_ignore/modelnet/modelnet.py
|
mariogeiger/se3cnn
| 170 |
96130
|
<reponame>mariogeiger/se3cnn
# pylint: disable=E1101,R,C
import glob
import os
import numpy as np
import torch
import torch.utils.data
import shutil
from functools import partial
from scipy.ndimage import affine_transform
from se3cnn.SO3 import rot
def get_modelnet_loader(root_dir, dataset, mode, size, data_loader_kwargs, args):
if dataset == 'ModelNet10':
classes = ["bathtub", "bed", "chair", "desk", "dresser", "monitor", "night_stand", "sofa", "table", "toilet"]
else:
raise NotImplementedError('Other datasets than ModelNet10 not fully implemented yet')
def _get_trafo(size, args):
trafos = []
if args.add_z_axis:
trafos.append(AddZAxis(zmin=-size/2, zmax=size/2))
affine_trafo_args = {'scale': (1,args.augment_scales) if args.augment_scales is not False else False,
'flip': args.augment_flip,
'translate': args.augment_translate,
'rotate': args.augment_rotate}
if not all(False for val in affine_trafo_args.values()):
trafos.append(RandomAffine3d(vol_shape=(size,size,size), **affine_trafo_args))
if len(trafos) == 0:
return None
else:
from torchvision.transforms import Compose
return Compose(trafos)
transform = _get_trafo(size, args)
dataset = ModelNet(root_dir, dataset, mode, size, classes, transform)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_kwargs)
return dataset, data_loader
class ModelNet(torch.utils.data.Dataset):
''' '''
def __init__(self, root_dir, dataset, mode, size, classes, transform=None, target_transform=None):
'''
:param root: directory to store dataset in
:param dataset:
:param mode: dataset to load: 'train', 'validation', 'test' or 'train_full'
the validation set is split from the train set, the full train set can be accessed via 'train_full'
:param transform: transformation applied to image in __getitem__
currently used to load cached file from string
:param target_transform: transformation applied to target in __getitem__
'''
self.root = os.path.expanduser(root_dir)
assert dataset in ['ModelNet10', 'ModelNet40']
self.dataset = dataset
assert mode in ['train', 'validation', 'test', 'train_full']
self.mode = mode
self.size = size
self.classes = classes
self.transform = transform
self.target_transform = target_transform
if mode == 'train_full':
self.file_names = sorted(glob.glob(os.path.join(self.root, self.dataset, '*', 'train', '*_size{}.npy'.format(self.size))))
self.file_names += sorted(glob.glob(os.path.join(self.root, self.dataset, '*', 'validation', '*_size{}.npy'.format(self.size))))
else:
self.file_names = sorted(glob.glob(os.path.join(self.root, self.dataset, '*', self.mode, '*_size{}.npy'.format(self.size))))
assert self.__len__() > 0
print('Loaded dataset \'{}\', size \'{}\' in mode \'{}\' with {} elements'.format(self.dataset, self.size, self.mode, self.__len__()))
def __getitem__(self, index):
img_fname = self.file_names[index]
img = np.load(img_fname).astype(np.int8).reshape((1, self.size, self.size, self.size))
target_class_string = img_fname.split(os.path.sep)[-3]
target = self.classes.index(target_class_string)
if self.transform is not None:
img = self.transform(img)
img = torch.from_numpy(img.astype(np.float32))
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.file_names)
class AddZAxis(object):
''' add z-axis as second channel to volume
the scale of the z-axis can be set freely
if the volume tensor does not contain a channel dimension, add it
the z axis is assumed to be the last axis of the volume tensor
'''
def __init__(self, zmin, zmax):
''' :param zmin: min z-value
:param zmin: max z-value
'''
self.zmin = zmin
self.zmax = zmax
def __call__(self, sample):
assert sample.ndim in (3,4)
if sample.ndim == 3:
sample = sample[np.newaxis,...]
broadcast_shape = list(sample.shape)
broadcast_shape[0] = 1
zsize = sample.shape[-1]
zcoords = np.linspace(self.zmin, self.zmax, num=zsize, endpoint=True)
zcoords = np.broadcast_to(zcoords, broadcast_shape)
return np.concatenate([sample,zcoords], axis=0)
class RandomAffine3d(object):
''' random affine transformation applied to volume center
assumes volume with channel dimension, shape (C,X,Y,Z)
'''
def __init__(self, vol_shape, scale=(.9,1.1), flip=True, translate=True, rotate=True):
''' :param vol_shape: shape of the volumes (X,Y,Z), needed to compute center
:param scale: False or tuple giving min and max scale value
VALUES <1 ZOOM IN !!!
:param flip: bool controlling random reflection with p=0.5
:param trans: bool controlling uniform random translations in (-.5, .5) on all axes
:param rotate: bool controlling uniform random rotations
'''
self.vol_shape = np.array(vol_shape)
self.scale = scale if scale is not False else (1,1)
self.flip = flip
self.translate = translate
self.rotate = rotate
def __call__(self, sample):
assert sample.ndim == 4
trafo = self._get_random_affine_trafo()
return np.stack([trafo(channel) for channel in sample])
def _get_random_affine_trafo(self):
if self.rotate:
alpha,beta,gamma = np.pi*np.array([2,1,2])*np.random.rand(3)
aff = rot(alpha,beta,gamma)
else:
aff = np.eye(3) # only non-homogeneous coord part
fl = (-1)**np.random.randint(low=0, high=2) if self.flip else 1
if self.scale is not None:
sx,sy,sz = np.random.uniform(low=self.scale[0], high=self.scale[1], size=3)
else:
sx,sy,sz = 1
aff[:,0] *= sx*fl
aff[:,1] *= sy
aff[:,2] *= sz
center = self.vol_shape/2
offset = center - [email protected] # correct offset to apply trafo around center
if self.translate:
offset += np.random.uniform(low=-.5, high=.5, size=3)
return partial(affine_transform, matrix=aff, offset=offset)
|
TopQuarkAnalysis/TopJetCombination/python/TtFullLepHypKinSolution_cfi.py
|
ckamtsikis/cmssw
| 852 |
96166
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
#
# module to make the kinematic solution hypothesis
#
ttFullLepHypKinSolution = cms.EDProducer("TtFullLepHypKinSolution",
electrons = cms.InputTag("selectedPatElectrons"),
muons = cms.InputTag("selectedPatMuons"),
jets = cms.InputTag("selectedPatJets"),
mets = cms.InputTag("patMETs"),
match = cms.InputTag("kinSolutionTtFullLepEventHypothesis"),
Neutrinos = cms.InputTag("kinSolutionTtFullLepEventHypothesis","fullLepNeutrinos"),
NeutrinoBars = cms.InputTag("kinSolutionTtFullLepEventHypothesis","fullLepNeutrinoBars"),
solutionWeight = cms.InputTag("kinSolutionTtFullLepEventHypothesis","solWeight"),
jetCorrectionLevel = cms.string("L3Absolute")
)
|
tests/guinea-pigs/unittest/test_changes_name.py
|
Tirzono/teamcity-messages
| 105 |
96167
|
<filename>tests/guinea-pigs/unittest/test_changes_name.py
import unittest
from teamcity.unittestpy import TeamcityTestRunner
class Foo(unittest.TestCase):
a = 1
def test_aa(self): pass
def shortDescription(self):
s = str(self.a)
self.a += 10
return s
unittest.main(testRunner=TeamcityTestRunner())
|
pyats-to-netbox/netbox_utils.py
|
fallenfuzz/netdevops_demos
| 104 |
96172
|
"""Library of functions used to work with netbox.
"""
import pynetbox
import os
# Common mappings for device types and roles
device_roles = {
"CSR1000v": "router",
"ASAv": "firewall",
"NX-OSv 9000": "switch",
"IOSvL2": "switch",
"OTHER": "other",
}
# Constants for Interface Form Factor IDs
FF_1000BASE_T = 1000
FF_SFPPLUS = 1200
FF_OTHER = 32767
# Pull in details about the netbox environment to use
netbox_token = os.getenv("NETBOX_TOKEN")
netbox_url = os.getenv("NETBOX_URL")
netbox_site_name = os.getenv("NETBOX_SITE")
# Create netbox API object
netbox = pynetbox.api(netbox_url, token=netbox_token)
def netbox_manufacturer(name):
nb_manufacturer = netbox.dcim.manufacturers.get(name=name)
if nb_manufacturer is None:
# Create a slug from the name
slug = (
name.lower()
.replace(" ", "-")
.replace(",", "-")
.replace(".", "_")
.replace("(", "_")
.replace(")", "_")
)
nb_manufacturer = netbox.dcim.manufacturers.create(
name=name, slug=slug
)
return nb_manufacturer
def netbox_device(genie_device):
"""Get or Create a device in netbox based on a genie device object.
"""
# See if device exists, if not create one.
nb_device = netbox.dcim.devices.get(name=genie_device.name)
if nb_device is None:
nb_manufacturer = netbox_manufacturer("Cisco")
# Verify Device Type Exists, if not create one.
# ToDo: refactor to function
nb_device_type = netbox.dcim.device_types.get(model=genie_device.type)
if nb_device_type is None:
device_slug=(
str(genie_device.type).lower()
.replace(" ", "-")
.replace(",", "-")
.replace(".", "_")
.replace("(", "_")
.replace(")", "_")
)
nb_device_type = netbox.dcim.device_types.create(
manufacturer=nb_manufacturer.id,
model=genie_device.type,
slug=device_slug,
u_height=1,
)
# Get the device role based on type. If not defined, set to "OTHER"
if genie_device.type in device_roles:
nb_device_role = netbox_device_role(
device_roles[genie_device.type]
)
else:
nb_device_role = netbox_device_role(device_roles["OTHER"])
nb_site = netbox_site(netbox_site_name)
# Create the device in netbox
nb_device = netbox.dcim.devices.create(
name=genie_device.name,
device_type=netbox.dcim.device_types.get(
model=genie_device.type
).id,
device_role=nb_device_role.id,
site=nb_site.id,
status=1,
tags=[],
)
return nb_device
def netbox_site(name):
"""Get or Create a netbox site object."""
nb_site = netbox.dcim.sites.get(name=name)
if nb_site is None:
# Create a slug from the name
slug = (
name.lower()
.replace(" ", "-")
.replace(",", "-")
.replace(".", "_")
.replace("(", "_")
.replace(")", "_")
)
nb_site = netbox.dcim.sites.create(
name=name, slug=slug, status=1
)
return nb_site
def netbox_device_role(name):
"""Get or Create a netbox device role."""
nb_role = netbox.dcim.device_roles.get(name=name)
if nb_role is None:
# Create a slug from the name
slug = (
name.lower()
.replace(" ", "-")
.replace(",", "-")
.replace(".", "_")
.replace("(", "_")
.replace(")", "_")
)
nb_role = netbox.dcim.device_roles.create(
name=name, slug=slug, color="c0c0c0"
)
return nb_role
def update_netbox_device(nb_device, genie_platform):
"""Update device details in netbox based on Genie platform info.
"""
# Deice serial number
nb_device.serial = genie_platform.chassis_sn
# Device Platform - That is OS and Version
platform_name = "{} {}".format(genie_platform.os, genie_platform.version)
nb_platform = netbox_device_platform(platform_name)
nb_device.platform = nb_platform.id
# Save changes to netbox
nb_device.save()
return nb_device
def netbox_device_platform(name):
"""Get or Create a netbox Device Platform"""
nb_platform = netbox.dcim.platforms.get(name=name)
if nb_platform is None:
# Create slug from name
slug = (
name.lower()
.replace(" ", "-")
.replace(",", "-")
.replace(".", "_")
.replace("(", "_")
.replace(")", "_")
)
nb_platform = netbox.dcim.platforms.create(name=name, slug=slug)
return nb_platform
def netbox_interface(
nb_device, interface_name, interface_description="", mgmt_only=False
):
"""Create and update a netbox interface object for a device."""
# See if the interface exists
nb_interface = netbox.dcim.interfaces.filter(
device=nb_device.name, name=interface_name
)
# See if single item returned, if so, set to value
if len(nb_interface) == 1:
nb_interface = nb_interface[0]
# Create Interface
elif nb_interface is None or len(nb_interface) == 0:
# Create New Interface
nb_interface = netbox.dcim.interfaces.create(
device=nb_device.id,
name=interface_name,
form_factor=FF_OTHER,
enabled=True,
mgmt_only=mgmt_only,
description=interface_description,
)
else:
print("More than one interface found.. that is odd.")
return nb_interface
def netbox_vrf(vrf_name):
"""Get or Create a netbox VRF."""
# Managmenet VRFs are named different things on Platforms
# For lack of better option, creating a common "oob_mgmt" vrf to use
if vrf_name in ["Mgmt-intf", "Mgmt-vrf", "management"]:
vrf = "oob_mgmt"
else:
vrf = vrf_name
# Get vrf if present, create if not
nb_vrf = netbox.ipam.vrfs.get(name=vrf)
if nb_vrf is None:
nb_vrf = netbox.ipam.vrfs.create(name=vrf)
return nb_vrf
def netbox_ip4(
address=None, prefix_len=None, prefix=None, nb_vrf=None, description=""
):
"""Get or Create a netbox IP address object."""
# Normalize input to slash notation
if prefix is None:
if address is not None and prefix_len is not None:
prefix = "{}/{}".format(address, prefix_len)
else:
raise (
ValueError("Need address and prefix_len if prefix not given")
)
# See if IP exists already
# Question: What about tenant?
# Question: Searches across VRFs? Global?
# Process if more than one is returned
nb_ip4 = netbox.ipam.ip_addresses.get(address=prefix)
# VRF Id
try:
vrf_id = nb_vrf.id
except:
vrf_id = None
if nb_ip4 is None:
# Create it
nb_ip4 = netbox.ipam.ip_addresses.create(
address=prefix, vrf=vrf_id, description=description
)
return nb_ip4
def netbox_device_interface_genie(nb_device, genie_interface):
"""Get or Create and Update device interface based on Genie model"""
# If no description present, set to blank string
if "description" in genie_interface["details"].keys():
description = genie_interface["details"]["description"]
else:
description = ""
# ToDO: Managmenet Check
mgmt_only = False
# ToDo: Add other details MTU, MAC
# ToDo: For port-channels, bring in LAG info
# Get a netbox interface object
nb_interface = netbox_interface(
nb_device,
interface_name=genie_interface["name"],
interface_description=description,
mgmt_only=mgmt_only,
)
# Get vrf if present
if "vrf" in genie_interface["details"].keys():
nb_vrf = netbox_vrf(genie_interface["details"]["vrf"])
else:
nb_vrf = None
# If IP listed on interface, add to it
if "ipv4" in genie_interface["details"].keys():
for prefix, prefix_details in genie_interface["details"][
"ipv4"
].items():
# Get netbox IP object
nb_ip4 = netbox_ip4(
prefix=prefix,
nb_vrf=nb_vrf,
description="Configured on {} interface {}".format(
nb_device.name, nb_interface.name
),
)
# Update IP with interface link
nb_ip4.interface = nb_interface.id
nb_ip4.save()
# Identify Primary IPs - based on presence of Mgmt VRF
if nb_vrf is not None and nb_vrf.name == "oob_mgmt":
nb_device.primary_ip4 = nb_ip4.id
nb_device.save()
# ToDo: Add looking for address listed in Testbed file - VIRL uses console
# return the nb_interface
return nb_interface
def netbox_device_interface_testbed(nb_device, testbed_interface):
"""Get or Create and Update Device Interface based on Testbed details
- Mostly for ASAv until ASA parsers/models available
"""
description = testbed_interface["details"]["link"]
# ToDO: Managmenet Check
mgmt_only = False
# Get netbox interface object
nb_interface = netbox_interface(
nb_device,
interface_name=testbed_interface["name"],
interface_description=description,
mgmt_only=mgmt_only,
)
# Identify management interface to set VRF
if (
"management" in testbed_interface["name"].lower()
or "mgmt" in testbed_interface["name"].lower()
):
nb_vrf = netbox_vrf("management")
else:
nb_vrf = None
# If IP listed on interface, add to it
if "ipv4" in testbed_interface["details"].keys():
# Get netbox IP object
nb_ip4 = netbox_ip4(
prefix=testbed_interface["details"]["ipv4"],
nb_vrf=nb_vrf,
description="Configured on {} interface {}".format(
nb_device.name, nb_interface.name
),
)
# Add interface to ip object
nb_ip4.interface = nb_interface.id
nb_ip4.save()
# Identify Primary IPs
if nb_vrf is not None and nb_vrf.name == "oob_mgmt":
nb_device.primary_ip4 = nb_ip4.id
nb_device.save()
# return the nb_interface
return nb_interface
|
gpytorch/variational/unwhitened_variational_strategy.py
|
jrg365/gpytorch
| 188 |
96175
|
#!/usr/bin/env python3
import math
import torch
from gpytorch.variational.cholesky_variational_distribution import CholeskyVariationalDistribution
from .. import settings
from ..distributions import MultivariateNormal
from ..lazy import (
CholLazyTensor,
DiagLazyTensor,
PsdSumLazyTensor,
RootLazyTensor,
TriangularLazyTensor,
ZeroLazyTensor,
delazify,
)
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.cholesky import psd_safe_cholesky
from ..utils.errors import NotPSDError
from ..utils.memoize import add_to_cache, cached
from ._variational_strategy import _VariationalStrategy
class UnwhitenedVariationalStrategy(_VariationalStrategy):
r"""
Similar to :obj:`~gpytorch.variational.VariationalStrategy`, but does not perform the
whitening operation. In almost all cases :obj:`~gpytorch.variational.VariationalStrategy`
is preferable, with a few exceptions:
- When the inducing points are exactly equal to the training points (i.e. :math:`\mathbf Z = \mathbf X`).
Unwhitened models are faster in this case.
- When the number of inducing points is very large (e.g. >2000). Unwhitened models can use CG for faster
computation.
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param torch.Tensor inducing_points: Tensor containing a set of inducing
points to use for variational inference.
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
:param bool learn_inducing_points: (optional, default True): Whether or not
the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
parameters of the model).
"""
has_fantasy_strategy = True
@cached(name="cholesky_factor", ignore_args=True)
def _cholesky_factor(self, induc_induc_covar):
# Maybe used - if we're not using CG
L = psd_safe_cholesky(delazify(induc_induc_covar))
return TriangularLazyTensor(L)
@property
@cached(name="prior_distribution_memo")
def prior_distribution(self):
out = self.model.forward(self.inducing_points)
res = MultivariateNormal(out.mean, out.lazy_covariance_matrix.add_jitter())
return res
@property
@cached(name="pseudo_points_memo")
def pseudo_points(self):
# TODO: implement for other distributions
# retrieve the variational mean, m and covariance matrix, S.
if not isinstance(self._variational_distribution, CholeskyVariationalDistribution):
raise NotImplementedError(
"Only CholeskyVariationalDistribution has pseudo-point support currently, ",
"but your _variational_distribution is a ",
self._variational_distribution.__name__,
)
# retrieve the variational mean, m and covariance matrix, S.
var_cov_root = TriangularLazyTensor(self._variational_distribution.chol_variational_covar)
var_cov = CholLazyTensor(var_cov_root)
var_mean = self.variational_distribution.mean # .unsqueeze(-1)
if var_mean.shape[-1] != 1:
var_mean = var_mean.unsqueeze(-1)
# R = K - S
Kmm = self.model.covar_module(self.inducing_points)
res = Kmm - var_cov
cov_diff = res
# D_a = (S^{-1} - K^{-1})^{-1} = S + S R^{-1} S
# note that in the whitened case R = I - S, unwhitened R = K - S
# we compute (R R^{T})^{-1} R^T S for stability reasons as R is probably not PSD.
eval_lhs = var_cov.evaluate()
eval_rhs = cov_diff.transpose(-1, -2).matmul(eval_lhs)
inner_term = cov_diff.matmul(cov_diff.transpose(-1, -2))
# TODO: flag the jitter here
inner_solve = inner_term.add_jitter(1e-3).inv_matmul(eval_rhs, eval_lhs.transpose(-1, -2))
inducing_covar = var_cov + inner_solve
# mean term: D_a S^{-1} m
# unwhitened: (S - S R^{-1} S) S^{-1} m = (I - S R^{-1}) m
rhs = cov_diff.transpose(-1, -2).matmul(var_mean)
inner_rhs_mean_solve = inner_term.add_jitter(1e-3).inv_matmul(rhs)
pseudo_target_mean = var_mean + var_cov.matmul(inner_rhs_mean_solve)
# ensure inducing covar is psd
try:
pseudo_target_covar = CholLazyTensor(inducing_covar.add_jitter(1e-3).cholesky()).evaluate()
except NotPSDError:
from gpytorch.lazy import DiagLazyTensor
evals, evecs = inducing_covar.symeig(eigenvectors=True)
pseudo_target_covar = evecs.matmul(DiagLazyTensor(evals + 1e-4)).matmul(evecs.transpose(-1, -2)).evaluate()
return pseudo_target_covar, pseudo_target_mean
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
if variational_inducing_covar is None:
raise RuntimeError
else:
return MultivariateNormal(inducing_values, variational_inducing_covar)
# Otherwise, we have to marginalize
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (inducing_values - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# Compute Cholesky factorization of inducing covariance matrix
if settings.fast_computations.log_prob.off() or (num_induc <= settings.max_cholesky_size.value()):
induc_induc_covar = CholLazyTensor(self._cholesky_factor(induc_induc_covar))
# If we are making predictions and don't need variances, we can do things very quickly.
if not self.training and settings.skip_posterior_variances.on():
self._mean_cache = induc_induc_covar.inv_matmul(mean_diff).detach()
predictive_mean = torch.add(
test_mean, induc_data_covar.transpose(-2, -1).matmul(self._mean_cache).squeeze(-1)
)
predictive_covar = ZeroLazyTensor(test_mean.size(-1), test_mean.size(-1))
return MultivariateNormal(predictive_mean, predictive_covar)
# Expand everything to the right size
shapes = [mean_diff.shape[:-1], induc_data_covar.shape[:-1], induc_induc_covar.shape[:-1]]
if variational_inducing_covar is not None:
root_variational_covar = variational_inducing_covar.root_decomposition().root.evaluate()
shapes.append(root_variational_covar.shape[:-1])
shape = _mul_broadcast_shape(*shapes)
mean_diff = mean_diff.expand(*shape, mean_diff.size(-1))
induc_data_covar = induc_data_covar.expand(*shape, induc_data_covar.size(-1))
induc_induc_covar = induc_induc_covar.expand(*shape, induc_induc_covar.size(-1))
if variational_inducing_covar is not None:
root_variational_covar = root_variational_covar.expand(*shape, root_variational_covar.size(-1))
# Cache the kernel matrix with the cached CG calls
if self.training:
prior_dist = MultivariateNormal(induc_mean, induc_induc_covar)
add_to_cache(self, "prior_distribution_memo", prior_dist)
# Compute predictive mean
if variational_inducing_covar is None:
left_tensors = mean_diff
else:
left_tensors = torch.cat([mean_diff, root_variational_covar], -1)
inv_products = induc_induc_covar.inv_matmul(induc_data_covar, left_tensors.transpose(-1, -2))
predictive_mean = torch.add(test_mean, inv_products[..., 0, :])
# Compute covariance
if self.training:
interp_data_data_var, _ = induc_induc_covar.inv_quad_logdet(
induc_data_covar, logdet=False, reduce_inv_quad=False
)
data_covariance = DiagLazyTensor((data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf))
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1), induc_induc_covar.inv_matmul(induc_data_covar)
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(RootLazyTensor(inv_products[..., 1:, :].transpose(-1, -2)), data_covariance)
# Done!
return MultivariateNormal(predictive_mean, predictive_covar)
|
docs/tutorial_data-6.py
|
ankitshah009/dcase_util
| 122 |
96202
|
import dcase_util
data = dcase_util.utils.Example.feature_container()
data_aggregator = dcase_util.data.Aggregator(
recipe=['flatten'],
win_length_frames=10,
hop_length_frames=1,
)
data_aggregator.aggregate(data)
data.plot()
|
foliant/backends/base.py
|
foliant-docs/foliant
| 105 |
96208
|
from importlib import import_module
from shutil import copytree
from datetime import date
from logging import Logger
from foliant.utils import spinner
class BaseBackend():
'''Base backend. All backends must inherit from this one.'''
targets = ()
required_preprocessors_before = ()
required_preprocessors_after = ()
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False):
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.working_dir = self.project_path / self.config['tmp_dir']
def get_slug(self) -> str:
'''Generate a slug from the project title and version and the current date.
Spaces in title are replaced with underscores, then the version and the current date
are appended.
'''
if 'slug' in self.config:
return self.config['slug']
components = []
components.append(self.config['title'].replace(' ', '_'))
version = self.config.get('version')
if version:
components.append(str(version))
components.append(str(date.today()))
return '-'.join(components)
def apply_preprocessor(self, preprocessor: str or dict):
'''Apply preprocessor.
:param preprocessor: Preprocessor name or a dict of the preprocessor name and its options
'''
if isinstance(preprocessor, str):
preprocessor_name, preprocessor_options = preprocessor, {}
elif isinstance(preprocessor, dict):
(preprocessor_name, preprocessor_options), = (*preprocessor.items(),)
with spinner(
f'Applying preprocessor {preprocessor_name}',
self.logger,
self.quiet,
self.debug
):
try:
preprocessor_module = import_module(f'foliant.preprocessors.{preprocessor_name}')
preprocessor_module.Preprocessor(
self.context,
self.logger,
self.quiet,
self.debug,
preprocessor_options
).apply()
except ModuleNotFoundError:
raise ModuleNotFoundError(f'Preprocessor {preprocessor_name} is not installed')
except Exception as exception:
raise RuntimeError(
f'Failed to apply preprocessor {preprocessor_name}: {exception}'
)
def preprocess_and_make(self, target: str) -> str:
'''Apply preprocessors required by the selected backend and defined in the config file,
then run the ``make`` method.
:param target: Output format: pdf, docx, html, etc.
:returns: Result as returned by the ``make`` method
'''
src_path = self.project_path / self.config['src_dir']
copytree(src_path, self.working_dir)
common_preprocessors = (
*self.required_preprocessors_before,
*self.config.get('preprocessors', ()),
*self.required_preprocessors_after
)
if self.config.get('escape_code', False):
if isinstance(self.config['escape_code'], dict):
escapecode_preprocessor = {
'escapecode': self.config['escape_code'].get('options', {})
}
else:
escapecode_preprocessor = 'escapecode'
preprocessors = (
escapecode_preprocessor,
*common_preprocessors,
'unescapecode'
)
elif self.config.get('disable_implicit_unescape', False):
preprocessors = common_preprocessors
else:
preprocessors = (
*common_preprocessors,
'_unescape'
)
for preprocessor in preprocessors:
self.apply_preprocessor(preprocessor)
return self.make(target)
def make(self, target: str) -> str:
'''Make the output from the source. Must be implemented by every backend.
:param target: Output format: pdf, docx, html, etc.
:returns: Typically, the path to the output file, but in general any string
'''
raise NotImplementedError
|
deps/cndict/bundle_friso.py
|
rrelledge/RediSearch
| 2,098 |
96230
|
<gh_stars>1000+
#!/usr/bin/env python
"""
This script gathers settings and dictionaries from friso (a chinese
tokenization library) and generates a C source file that can later be
compiled into RediSearch, allowing the module to have a built-in chinese
dictionary. By default this script will generate a C source file of
compressed data but there are other options to control output (mainly for
debugging).
The `read_friso` script can be used to analyze the dumped data for debugging
purposes
"""
import zlib
import errno
import os
import re
import struct
import sys
import time
import string
from argparse import ArgumentParser
# Load the ini file
ap = ArgumentParser()
ap.add_argument('-i', '--ini', default='friso/friso.ini',
help='ini file to use for initialization')
ap.add_argument('-m', '--mode', default='c', help='output mode',
choices=['c', 'raw_z', 'raw_u'])
ap.add_argument('-d', '--dir', default='.',
help='Override directory of lex files')
ap.add_argument('-o', '--out', help='Name of destination directory',
default='cndict_generated')
opts = ap.parse_args()
lexdir = opts.dir
DICT_VARNAME = 'ChineseDict'
SIZE_COMP_VARNAME = 'ChineseDictCompressedLength'
SIZE_FULL_VARNME = 'ChineseDictFullLength'
class ConfigEntry(object):
def __init__(self, srcname, dstname, pytype):
self.srcname = srcname
self.dstname = dstname
self.pytype = pytype
self.value = None
configs = [
ConfigEntry('max_len', 'max_len', int),
ConfigEntry('r_name', 'r_name', int),
ConfigEntry('mix_len', 'mix_len', int),
ConfigEntry('lna_len', 'lna_len', int),
ConfigEntry('add_syn', 'add_syn', int),
ConfigEntry('clr_stw', 'clr_stw', int),
ConfigEntry('keep_urec', 'keep_urec', int),
ConfigEntry('spx_out', 'spx_out', int),
ConfigEntry('nthreshold', 'nthreshold', int),
ConfigEntry('mode', 'mode', int),
ConfigEntry('charset', 'charset', int),
ConfigEntry('en_sseg', 'en_sseg', int),
ConfigEntry('st_minl', 'st_minl', int),
ConfigEntry('kpuncs', 'kpuncs', str)
]
def write_config_init(varname, configs):
ret = []
for config in configs:
if config.value is None:
continue
if config.srcname == 'mode':
ret.append('friso_set_mode({},{});'.format(varname, config.value))
elif config.dstname == 'kpuncs':
ret.append('strcpy({}->kpuncs, "{}");'.format(varname, config.value))
elif config.dstname == 'charset':
pass
# Skip
elif config.pytype == int:
ret.append('{}->{} = {};'.format(varname, config.dstname, config.value))
else:
raise ValueError("Don't understand config!", config)
return ret
def set_key_value(name, value):
for config in configs:
name = name.lower().replace("friso.", "").strip()
# print name, config.srcname
if config.srcname == name:
config.value = config.pytype(value)
return
raise ValueError('Bad config key', name)
with open(opts.ini, 'r') as fp:
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
continue
key, value = line.split('=')
key = key.strip()
value = value.strip()
if key == 'friso.lex_dir':
if not lexdir:
lexdir = value
else:
set_key_value(key, value)
# Parse the header snippet in order to emit the correct constant.
_LEXTYPE_MAP_STRS = \
r'''
__LEX_CJK_WORDS__ = 0,
__LEX_CJK_UNITS__ = 1,
__LEX_ECM_WORDS__ = 2, //english and chinese mixed words.
__LEX_CEM_WORDS__ = 3, //chinese and english mixed words.
__LEX_CN_LNAME__ = 4,
__LEX_CN_SNAME__ = 5,
__LEX_CN_DNAME1__ = 6,
__LEX_CN_DNAME2__ = 7,
__LEX_CN_LNA__ = 8,
__LEX_STOPWORDS__ = 9,
__LEX_ENPUN_WORDS__ = 10,
__LEX_EN_WORDS__ = 11,
__LEX_OTHER_WORDS__ = 15,
__LEX_NCSYN_WORDS__ = 16,
__LEX_PUNC_WORDS__ = 17, //punctuations
__LEX_UNKNOW_WORDS__ = 18 //unrecognized words.
'''
LEXTYPE_MAP = {}
for m in re.findall('\s*(__[^=]*__)\s*=\s*([\d]*)', _LEXTYPE_MAP_STRS):
LEXTYPE_MAP[m[0]] = int(m[1])
# Lex type currently occupies
TYPE_MASK = 0x1F
F_SYNS = 0x01 << 5
F_FREQS = 0x02 << 5
class LexBuffer(object):
# Size of input buffer before flushing to a zlib block
CHUNK_SIZE = 65536
VERSION = 0
def __init__(self, fp, use_compression=True):
self._buf = bytearray()
self._fp = fp
self._compressor = zlib.compressobj(-1)
self._use_compression = use_compression
# Write the file header
self._fp.write(struct.pack("!I", self.VERSION))
self._fp.flush()
self.compressed_size = 0
self.full_size = 4 # For the 'version' byte
def _write_data(self, data):
self._fp.write(data)
self.compressed_size += len(data)
def flush(self, is_final=False):
if not self._use_compression:
self._write_data(self._buf)
else:
# Flush any outstanding data in the buffer
self._write_data(self._compressor.compress(bytes(self._buf)))
if is_final:
self._write_data(self._compressor.flush(zlib.Z_FINISH))
self._fp.flush()
self.full_size += len(self._buf)
self._buf = bytearray()
def _maybe_flush(self):
if len(self._buf) > self.CHUNK_SIZE:
self.flush()
def add_entry(self, lextype, term, syns, freq):
# Perform the encoding...
header = LEXTYPE_MAP[lextype]
if syns:
header |= F_SYNS
if freq:
header |= F_FREQS
self._buf.append(header)
self._buf += term
self._buf.append(0) # NUL terminator
if syns:
self._buf += struct.pack("!h", len(syns))
for syn in syns:
self._buf += syn
self._buf.append(0)
if freq:
self._buf += struct.pack("!I", freq)
self._maybe_flush()
def encode_pair(c):
if c in string.hexdigits:
return '\\x{0:x}'.format(ord(c))
elif c in ('"', '\\', '?'):
return '\\' + c
else:
return repr('%c' % (c,))[1:-1]
# return '\\x{0:x}'.format(ord(c)) if _needs_escape(c) else c
class SourceEncoder(object):
LINE_LEN = 40
def __init__(self, fp):
self._fp = fp
self._curlen = 0
def write(self, blob):
blob = buffer(blob)
while len(blob):
chunk = buffer(blob, 0, self.LINE_LEN)
blob = buffer(blob, len(chunk), len(blob)-len(chunk))
encoded = ''.join([encode_pair(c) for c in chunk])
self._fp.write('"' + encoded + '"\n')
return len(blob)
def flush(self):
self._fp.flush()
def close(self):
pass
def process_lex_entry(type, file, buf):
print type, file
fp = open(file, 'r')
for line in fp:
line = line.strip()
comps = line.split('/')
# print comps
term = comps[0]
syns = comps[1].split(',') if len(comps) > 1 else []
if len(syns) == 1 and syns[0].lower() == 'null':
syns = []
freq = int(comps[2]) if len(comps) > 2 else 0
buf.add_entry(type, term, syns, freq)
# print "Term:", term, "Syns:", syns, "Freq", freq
# Now dump it, somehow
def strip_comment_lines(blob):
lines = [line.strip() for line in blob.split('\n')]
lines = [line for line in lines if line and not line.startswith('#')]
return lines
def sanitize_file_entry(typestr, filestr):
typestr = strip_comment_lines(typestr)[0]
filestr = strip_comment_lines(filestr)
filestr = [f.rstrip(';') for f in filestr]
return typestr, filestr
lexre = re.compile(r'([^:]+)\w*:\w*\[([^\]]*)\]', re.MULTILINE)
lexindex = os.path.join(lexdir, 'friso.lex.ini')
lexinfo = open(lexindex, 'r').read()
matches = lexre.findall(lexinfo)
# print matches
dstdir = opts.out
if opts.mode == 'c':
dstfile = 'cndict_data.c'
else:
dstfile = 'cndict_data.out'
try:
os.makedirs(dstdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dstfile = os.path.join(dstdir, dstfile)
ofp = open(dstfile, 'w')
if opts.mode == 'c':
ofp.write(r'''
// Compressed chinese dictionary
// Generated by {}
// at {}
#include "friso/friso.h"
#include <stdlib.h>
#include <string.h>
const char {}[] =
'''.format(' '.join(sys.argv), time.ctime(), DICT_VARNAME))
ofp.flush()
lexout = SourceEncoder(ofp)
lexbuf = LexBuffer(lexout)
for m in matches:
typestr, filestr = sanitize_file_entry(m[0], m[1])
# print typestr
# print filestr
for filename in filestr:
filename = os.path.join(os.path.dirname(lexindex), filename)
process_lex_entry(typestr, filename, lexbuf)
lexbuf.flush(is_final=True)
ofp.write(';\n')
ofp.write('const size_t {} = {};\n'.format(SIZE_COMP_VARNAME, lexbuf.compressed_size))
ofp.write('const size_t {} = {};\n'.format(SIZE_FULL_VARNME, lexbuf.full_size))
config_lines = write_config_init('frisoConfig', configs)
config_fn = '\n'.join(config_lines)
friso_config_txt = '''
void ChineseDictConfigure(friso_t friso, friso_config_t frisoConfig) {
'''
friso_config_txt += config_fn
friso_config_txt += '\n}\n'
ofp.write(friso_config_txt)
ofp.flush()
ofp.close()
# hdrfile = os.path.join(dstdir, 'cndict_data.h')
# hdrfp = open(hdrfile, 'w')
# hdrfp.write(r'''
#ifndef CNDICT_DATA_H
#define CNDICT_DATA_H
# extern const char {data_var}[];
# extern const size_t {uncomp_len_var};
# extern const size_t {comp_len_var};
# {config_fn_txt}
# #endif
# '''.format(
# data_var=DICT_VARNAME,
# uncomp_len_var=SIZE_FULL_VARNME,
# comp_len_var=SIZE_COMP_VARNAME,
# config_fn_txt=friso_config_txt
# ))
# hdrfp.flush()
|
textract/async-form-table/lambda-process-response/helper/parser.py
|
srcecde/aws-tutorial-code
| 105 |
96243
|
<reponame>srcecde/aws-tutorial-code
"""
-*- coding: utf-8 -*-
========================
AWS Lambda
========================
Contributor: <NAME> (<NAME>)
========================
"""
import uuid
class Parse:
def __init__(self, page, get_table, get_kv, get_text):
self.response = page
self.word_map = {}
self.table_page_map = {}
self.key_map_list = []
self.value_map = {}
self.final_map_list = []
self.line_text = {}
self.get_table = get_table
self.get_kv = get_kv
self.get_text = get_text
def extract_text(self, extract_by="LINE"):
for block in self.response["Blocks"]:
if block["BlockType"] == extract_by:
page_key = f'page_{block["Page"]}'
if page_key in self.line_text.keys():
self.line_text[page_key].append(block["Text"])
else:
self.line_text[page_key] = [block["Text"]]
return self.line_text
def map_word_id(self):
# self.word_map = {}
for block in self.response["Blocks"]:
if block["BlockType"] == "WORD":
self.word_map[block["Id"]] = block["Text"]
if block["BlockType"] == "SELECTION_ELEMENT":
self.word_map[block["Id"]] = block["SelectionStatus"]
def extract_table_info(self):
row = []
table = {}
table_page_map = {}
ri = 0
flag = False
page = self.response["Blocks"][0]['Page']
response_block_len = len(self.response["Blocks"]) - 1
for n, block in enumerate(self.response["Blocks"]):
if block["BlockType"] == "TABLE":
key = f"table_{uuid.uuid4().hex}_page_{block['Page']}"
table_n = +1
temp_table = []
if block["BlockType"] == "CELL":
if block["RowIndex"] != ri:
flag = True
row = []
ri = block["RowIndex"]
if "Relationships" in block:
for relation in block["Relationships"]:
if relation["Type"] == "CHILD":
row.append(" ".join([self.word_map[i] for i in relation["Ids"]]))
else:
row.append(" ")
if flag:
temp_table.append(row)
table[key] = temp_table
flag = False
if table:
if block['Page'] != page:
self.table_page_map[page] = table
page = block['Page']
table = {}
if response_block_len == n:
self.table_page_map[page] = table
return self.table_page_map
def get_key_map(self):
key_map = {}
page = self.response["Blocks"][0]['Page']
response_block_len = len(self.response["Blocks"]) - 1
for n, block in enumerate(self.response["Blocks"]):
if block["BlockType"] == "KEY_VALUE_SET" and "KEY" in block["EntityTypes"]:
for relation in block["Relationships"]:
if relation["Type"] == "VALUE":
value_id = relation["Ids"]
if relation["Type"] == "CHILD":
v = " ".join([self.word_map[i] for i in relation["Ids"]])
key_map[v] = value_id
if key_map:
if block['Page'] != page:
self.key_map_list.append(key_map)
page = block['Page']
key_map = {}
if response_block_len == n:
self.key_map_list.append(key_map)
def get_value_map(self):
for block in self.response["Blocks"]:
if block["BlockType"] == "KEY_VALUE_SET" and "VALUE" in block["EntityTypes"]:
if "Relationships" in block:
for relation in block["Relationships"]:
if relation["Type"] == "CHILD":
v = " ".join([self.word_map[i] for i in relation["Ids"]])
self.value_map[block["Id"]] = v
else:
self.value_map[block["Id"]] = "VALUE_NOT_FOUND"
def get_kv_map(self):
final_map = {}
for key in self.key_map_list:
for i, j in key.items():
final_map[i] = "".join(["".join(self.value_map[k]) for k in j])
if final_map:
self.final_map_list.append(final_map)
final_map = {}
return self.final_map_list
def process_response(self):
final_map, table_info, text = None, None, None
self.map_word_id()
if self.get_text:
text = self.extract_text()
if self.get_kv:
self.get_key_map()
self.get_value_map()
final_map = self.get_kv_map()
if self.get_table:
table_info = self.extract_table_info()
return table_info, final_map, text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.