code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Classification methods."""
import numpy as np
from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED
from machine_learning.utilities import k_fold_split_indexes, get_k_nn
def classification(method, error_func, train, test, **kwargs):
"""Perform classification for data and return error.
Arguments:
method {function} -- Classification method.
error_func {function} -- Error function.
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
All extra keyword arguments are passed to method.
Returns:
float -- Error value returned by error_func.
"""
y_pred = method(train, test, **kwargs)
return error_func(y_pred, test.y.values)
def max_classifier(train, test):
"""Maximum classifier.
Classifies using the most common class in training data.
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
Returns:
ndarray -- Predicted values.
"""
max_category = max_classifier_fit(train.X, train.y)
y_pred = max_classifier_predict(test.X, max_category)
return y_pred
def max_classifier_fit(X, y):
"""Determines the most common class in input.
Arguments:
X {DataFrame} -- Indendent variables.
y {DataFrame} -- Dependent variable.
Returns:
int -- Most common class.
"""
y = y.values
max_category = np.bincount(y.astype(int)).argmax()
return max_category
def max_classifier_predict(X, max_category):
"""Classify using max classifier.
Arguments:
X {DataFrame} -- Independent variables.
max_category {int} -- Class to classify to.
Returns:
ndarray -- Predicted values.
"""
y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category
return y_pred
def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES):
"""Multinomial naive bayes classifier.
See more at:
https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
Keyword Arguments:
n_classes {int} -- Number of classes. (default: {N_CLASSES})
Returns:
ndarray -- Predicted values.
"""
train_X = train.X.values
train_y = train.y.values
test_X = test.X.values
class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y,
n_classes)
y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods)
return y_pred
def mnb_classifier_fit(X, y, n_classes):
"""Fit MNB classifier.
Calculates class priors and feature likelihoods.
Arguments:
X {ndarray} -- Independent variables.
y {ndarray} -- Dependent variables.
n_classes {int} -- Number of classes.
Returns:
ndarray -- Class priors.
ndarray -- Feature likelihoods.
"""
class_priors = mnb_class_priors(y, n_classes)
feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes)
return class_priors, feature_likelihoods
def mnb_class_priors(y, n_classes):
"""Calculates the logaritm of the probability of belonging to each class.
Arguments:
y {ndarray} -- Class labels.
n_classes {int} -- Number of class labels.
Returns:
ndarray -- Log of prior probabilities.
"""
priors = np.zeros(n_classes)
for c in range(n_classes):
priors[c] = np.log(np.sum(y == c) / y.size)
return priors
def mnb_feature_likelihoods(X, y, n_classes):
"""Calculates the probability of feature j, given class k, using Laplace smoothing.
Arguments:
X {ndarray} -- Features.
y {ndarray} -- Class labels.
n_classes {int} -- Number of classes.
Returns:
ndarray -- Logs of feature likelihoods.
"""
n_features = X.shape[1]
p_ij = np.zeros((n_classes, n_features))
for c in range(n_classes):
Fc_sum = np.sum(X[y == c, :])
for j in range(n_features):
Fnc = np.sum(X[y == c, j])
p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum))
return p_ij
def mnb_classifier_predict(X, class_priors, feature_likelihoods):
"""Classify using MNB classifier.
Arguments:
X {ndarray} -- Independent variables.
class_priors {ndarray} -- Class priors.
feature_likelihoods {ndarray} -- Feature likelihoods.
Returns:
ndarray -- Predicted values.
"""
n_classes = class_priors.size
N = X.shape[0]
posterior = np.zeros((N, n_classes))
for i in range(N):
posterior[i, :] = feature_likelihoods.dot(X[i, :])
for c in range(n_classes):
posterior[:, c] = posterior[:, c] + class_priors[c]
y_pred = np.argmax(posterior, axis=1)
return y_pred
def k_nn_classifier(train, test, k):
"""K-nearest neighbors classifier.
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
k {int} -- Value for k.
Returns:
ndarray -- Predicted values.
"""
y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k)
return y_pred
def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K):
"""'Fit' K-nearest neighbors classifier by finding optimal value for k using cross validation.
Arguments:
train {DataTuple} -- Training data.
Keyword Arguments:
n_folds {int} -- Number of folds to use for validation. (default: {FOLDS})
max_k {int} -- Maximum value for k. (default: {MAX_K})
Returns:
int -- Optimal value for k.
float -- Error for selected k.
"""
# TODO: combine with k_nn_regression_fit()?
X = train.X.values
y = train.y.values
N = X.shape[0]
folds = k_fold_split_indexes(N, n_folds)
min_error = np.infty
best_k = 1
for k in range(1, max_k):
errors = np.zeros(n_folds)
for i in range(n_folds):
tmp_folds = folds[:]
valid_ix = tmp_folds.pop(i)
train_ix = np.concatenate(tmp_folds)
y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :],
y[train_ix], k)
error = classification_error(y_pred, y[valid_ix])
errors[i] = (valid_ix.size * error)
mean_error = np.sum(errors) / N
if mean_error < min_error:
min_error = mean_error
best_k = k
return int(best_k), min_error
def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES):
"""Classify using K-nearest neighbors classifier.
Assigns class labels based on the most common class in k-nearest neighbors.
Arguments:
X {DataFrame} -- Independent variables.
X_train {DataFrame} -- Independent training variables.
y_train {DataFrame} -- Dependent training variables.
k {int} -- Value of k.
Keyword Arguments:
n_classes {int} -- Number of classes. (default: {N_CLASSES})
Returns:
ndarray -- Predicted variables.
"""
try:
X = X.values
except AttributeError:
pass
try:
X_train = X_train.values
except AttributeError:
pass
try:
y_train = y_train.values
except AttributeError:
pass
assert X.shape[1] == X_train.shape[1]
N = X.shape[0]
y_pred = np.zeros((N, 1))
for i in range(N):
point = X[i, :]
neighbors, _ = get_k_nn(point, X_train, k)
train_labels = y_train[neighbors]
class_sums = [np.sum(train_labels == i) for i in range(n_classes)]
y_pred[i] = k_nn_assign_label(class_sums)
return y_pred
def k_nn_assign_label(class_sums):
"""Assing label according the most common class.
If there are multiple candidates, pick one randomly.
Arguments:
class_sums {list} -- Class frequencies.
Returns:
int -- Assinged class label.
"""
order = np.argsort(class_sums)[::-1]
candidates = [x for x in order if x == order[0]]
return np.random.RandomState(RANDOM_SEED).choice(candidates)
def classification_error(y_pred, y_true):
"""Return classification error.
Sum of incorrectly assinged classes divided by the number of points.
Arguments:
y_pred {ndarray} -- Predicted values.
y_true {ndarray} -- True values.
Returns:
float -- Error.
"""
y_true = y_true.reshape(y_pred.shape)
return np.sum(y_pred.astype(np.int)
!= y_true.astype(np.int)) / float(y_pred.size)
| [
"numpy.sum",
"numpy.log",
"numpy.argmax",
"machine_learning.utilities.k_fold_split_indexes",
"numpy.zeros",
"numpy.ones",
"numpy.random.RandomState",
"numpy.argsort",
"machine_learning.utilities.get_k_nn",
"numpy.concatenate"
] | [((3476, 3495), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (3484, 3495), True, 'import numpy as np\n'), ((3975, 4008), 'numpy.zeros', 'np.zeros', (['(n_classes, n_features)'], {}), '((n_classes, n_features))\n', (3983, 4008), True, 'import numpy as np\n'), ((4645, 4669), 'numpy.zeros', 'np.zeros', (['(N, n_classes)'], {}), '((N, n_classes))\n', (4653, 4669), True, 'import numpy as np\n'), ((4856, 4884), 'numpy.argmax', 'np.argmax', (['posterior'], {'axis': '(1)'}), '(posterior, axis=1)\n', (4865, 4884), True, 'import numpy as np\n'), ((5870, 5902), 'machine_learning.utilities.k_fold_split_indexes', 'k_fold_split_indexes', (['N', 'n_folds'], {}), '(N, n_folds)\n', (5890, 5902), False, 'from machine_learning.utilities import k_fold_split_indexes, get_k_nn\n'), ((7472, 7488), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (7480, 7488), True, 'import numpy as np\n'), ((1769, 1807), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'np.int'}), '((X.shape[0], 1), dtype=np.int)\n', (1776, 1807), True, 'import numpy as np\n'), ((4057, 4077), 'numpy.sum', 'np.sum', (['X[y == c, :]'], {}), '(X[y == c, :])\n', (4063, 4077), True, 'import numpy as np\n'), ((5990, 6007), 'numpy.zeros', 'np.zeros', (['n_folds'], {}), '(n_folds)\n', (5998, 6007), True, 'import numpy as np\n'), ((7559, 7586), 'machine_learning.utilities.get_k_nn', 'get_k_nn', (['point', 'X_train', 'k'], {}), '(point, X_train, k)\n', (7567, 7586), False, 'from machine_learning.utilities import k_fold_split_indexes, get_k_nn\n'), ((8055, 8077), 'numpy.argsort', 'np.argsort', (['class_sums'], {}), '(class_sums)\n', (8065, 8077), True, 'import numpy as np\n'), ((4132, 4152), 'numpy.sum', 'np.sum', (['X[y == c, j]'], {}), '(X[y == c, j])\n', (4138, 4152), True, 'import numpy as np\n'), ((4178, 4221), 'numpy.log', 'np.log', (['((1.0 + Fnc) / (n_features + Fc_sum))'], {}), '((1.0 + Fnc) / (n_features + Fc_sum))\n', (4184, 4221), True, 'import numpy as np\n'), ((6137, 6162), 'numpy.concatenate', 'np.concatenate', (['tmp_folds'], {}), '(tmp_folds)\n', (6151, 6162), True, 'import numpy as np\n'), ((6432, 6446), 'numpy.sum', 'np.sum', (['errors'], {}), '(errors)\n', (6438, 6446), True, 'import numpy as np\n'), ((7651, 7676), 'numpy.sum', 'np.sum', (['(train_labels == i)'], {}), '(train_labels == i)\n', (7657, 7676), True, 'import numpy as np\n'), ((8148, 8182), 'numpy.random.RandomState', 'np.random.RandomState', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (8169, 8182), True, 'import numpy as np\n'), ((3554, 3568), 'numpy.sum', 'np.sum', (['(y == c)'], {}), '(y == c)\n', (3560, 3568), True, 'import numpy as np\n')] |
"""Django database functions.
This module supplements Django's own coverage of Postgres
and PostGIS functions.
https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1
"""
from django.contrib.gis.db.models import GeometryField, LineStringField, PointField
from django.db.models import FloatField, Func
class Box3D(Func):
"""Compute the 3D bounding box of a geometry."""
function = "Box3D"
output_field = GeometryField()
class XMax(Func):
"""Returns the X maxima of a 2D or 3D bounding box or a geometry."""
function = "ST_XMax"
output_field = FloatField()
class XMin(Func):
"""Returns the X minima of a 2D or 3D bounding box or a geometry."""
function = "ST_XMin"
output_field = FloatField()
class YMax(Func):
"""Returns the Y maxima of a 2D or 3D bounding box or a geometry."""
function = "ST_YMax"
output_field = FloatField()
class YMin(Func):
"""Returns the Y minima of a 2D or 3D bounding box or a geometry."""
function = "ST_YMin"
output_field = FloatField()
class ZMax(Func):
"""Returns the Z maxima of a 2D or 3D bounding box or a geometry."""
function = "ST_ZMax"
output_field = FloatField()
class ZMin(Func):
"""Returns the Z minima of a 2D or 3D bounding box or a geometry."""
function = "ST_ZMin"
output_field = FloatField()
class MakePoint(Func):
"""Compute the pixel type of the first band of a raster."""
function = "ST_MakePoint"
output_field = PointField(srid=0)
class MakeLine(Func):
"""Compute the pixel type of the first band of a raster."""
function = "ST_MakeLine"
output_field = LineStringField(srid=0)
| [
"django.contrib.gis.db.models.PointField",
"django.contrib.gis.db.models.GeometryField",
"django.db.models.FloatField",
"django.contrib.gis.db.models.LineStringField"
] | [((447, 462), 'django.contrib.gis.db.models.GeometryField', 'GeometryField', ([], {}), '()\n', (460, 462), False, 'from django.contrib.gis.db.models import GeometryField, LineStringField, PointField\n'), ((601, 613), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (611, 613), False, 'from django.db.models import FloatField, Func\n'), ((752, 764), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (762, 764), False, 'from django.db.models import FloatField, Func\n'), ((903, 915), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (913, 915), False, 'from django.db.models import FloatField, Func\n'), ((1054, 1066), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (1064, 1066), False, 'from django.db.models import FloatField, Func\n'), ((1205, 1217), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (1215, 1217), False, 'from django.db.models import FloatField, Func\n'), ((1356, 1368), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (1366, 1368), False, 'from django.db.models import FloatField, Func\n'), ((1508, 1526), 'django.contrib.gis.db.models.PointField', 'PointField', ([], {'srid': '(0)'}), '(srid=0)\n', (1518, 1526), False, 'from django.contrib.gis.db.models import GeometryField, LineStringField, PointField\n'), ((1664, 1687), 'django.contrib.gis.db.models.LineStringField', 'LineStringField', ([], {'srid': '(0)'}), '(srid=0)\n', (1679, 1687), False, 'from django.contrib.gis.db.models import GeometryField, LineStringField, PointField\n')] |
import numpy as np
import pandas as pd
from . import ResError
def remove_leap_day(timeseries):
"""Removes leap days from a given timeseries
Parameters
----------
timeseries : array_like
The time series data to remove leap days from
* If something array_like is given, the length must be 8784
* If a pandas DataFrame or Series is given, time indexes will be used
directly
Returns
-------
Array
"""
if isinstance(timeseries, np.ndarray):
if timeseries.shape[0] == 8760:
return timeseries
elif timeseries.shape[0] == 8784:
times = pd.date_range("01-01-2000 00:00:00",
"12-31-2000 23:00:00", freq="H")
sel = np.logical_and((times.day == 29), (times.month == 2))
if len(timeseries.shape) == 1:
return timeseries[~sel]
else:
return timeseries[~sel, :]
else:
raise ResError('Cannot handle array shape ' + str(timeseries.shape))
elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame):
times = timeseries.index
sel = np.logical_and((times.day == 29), (times.month == 2))
if isinstance(timeseries, pd.Series):
return timeseries[~sel]
else:
return timeseries.loc[~sel]
else:
return remove_leap_day(np.array(timeseries))
| [
"numpy.array",
"pandas.date_range",
"numpy.logical_and"
] | [((1197, 1246), 'numpy.logical_and', 'np.logical_and', (['(times.day == 29)', '(times.month == 2)'], {}), '(times.day == 29, times.month == 2)\n', (1211, 1246), True, 'import numpy as np\n'), ((650, 719), 'pandas.date_range', 'pd.date_range', (['"""01-01-2000 00:00:00"""', '"""12-31-2000 23:00:00"""'], {'freq': '"""H"""'}), "('01-01-2000 00:00:00', '12-31-2000 23:00:00', freq='H')\n", (663, 719), True, 'import pandas as pd\n'), ((772, 821), 'numpy.logical_and', 'np.logical_and', (['(times.day == 29)', '(times.month == 2)'], {}), '(times.day == 29, times.month == 2)\n', (786, 821), True, 'import numpy as np\n'), ((1429, 1449), 'numpy.array', 'np.array', (['timeseries'], {}), '(timeseries)\n', (1437, 1449), True, 'import numpy as np\n')] |
# Generated by Django 2.2.3 on 2019-07-27 06:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('desc', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_usr', models.IntegerField()),
('phone', models.CharField(max_length=15)),
],
),
migrations.AddField(
model_name='registration',
name='userType',
field=models.CharField(blank=True, max_length=50),
),
]
| [
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.AutoField"
] | [((1016, 1059), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(50)'}), '(blank=True, max_length=50)\n', (1032, 1059), False, 'from django.db import migrations, models\n'), ((321, 414), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (337, 414), False, 'from django.db import migrations, models\n'), ((439, 472), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (455, 472), False, 'from django.db import migrations, models\n'), ((500, 533), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (516, 533), False, 'from django.db import migrations, models\n'), ((670, 763), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (686, 763), False, 'from django.db import migrations, models\n'), ((791, 812), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (810, 812), False, 'from django.db import migrations, models\n'), ((841, 872), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (857, 872), False, 'from django.db import migrations, models\n')] |
import csv
import sys
def write_welsh_translations(csv_file_name, output_file_name):
with open(csv_file_name, newline='') as csv_file:
messages = csv.reader(csv_file, delimiter=',')
output_file = open(output_file_name, 'w+')
# skip headers
for i in range(2):
next(messages, None)
# write translations
try:
invalid = []
for message in messages:
key = message[0].strip()
welsh = message[2].strip()
if not key:
output_file.write('\n')
elif key.startswith('#'):
if len(key) == 1:
output_file.write(key + '===================================================\n')
else:
output_file.write(key + '\n')
elif len(welsh) > 0:
output_file.write('{}={}\n'.format(key, welsh))
else:
invalid.append(message)
print('Finished')
print('Invalid records: ', len(invalid))
for x in invalid:
print('* {}'.format(x))
except IOError:
print("Error writing translations")
output_file.close()
if __name__ == '__main__':
output_file_name = "messages.cy"
if len(sys.argv) < 2:
print('Error: please provide the source CSV file name including fullpath in command line arguments')
print('Usage: create_welsh_messages.py [CSV file name] [output file name]\n')
print('Note: output file name is optional, default name is "{}"'.format(output_file_name))
else:
write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv) == 3 else output_file_name)
| [
"csv.reader"
] | [((160, 195), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (170, 195), False, 'import csv\n')] |
from flask import Flask, render_template, request, redirect, Blueprint
from flask_jsonpify import jsonpify
from werkzeug import secure_filename
import urllib.request
import urllib.error
import urllib.parse
import os
import json
import sys
import ase.io.cif
from ase import Atoms
import zlib
import sqlite3
import ntpath
import numpy as np
from matplotlib import pyplot as plt
import sys
#so that we can import globals
sys.path.append('../..')
from globals import *
#backgroud save_cells_unitcells_data job
def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema):
inserted_key = None
sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema)
sql = '''insert into tdmaps ( id ) VALUES( ? );'''
id_key = None
sqlite3_conn.execute( sql,[ id_key ] )
sqlite3_conn.commit()
cur = sqlite3_conn.cursor()
sql_select = "SELECT last_insert_rowid();"
cur.execute(sql_select)
result_string = cur.fetchone()
if result_string:
inserted_key = result_string[0]
sqlite3_conn.close()
return inserted_key
def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ):
result = None
inserted_key = None
sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath)
cur = sqlite3_conn.cursor()
sql = "SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}".format(id)
cur.execute(sql)
result_binary = cur.fetchone()
if result_binary:
result = {
'id': result_binary[0],
'exp_setup_conf_id': result_binary[1],
'cells_conf_id': result_binary[2],
'slices_conf_id': result_binary[3],
'waves_conf_id': result_binary[4],
'dats_conf_id': result_binary[5],
'simgrids_conf_id': result_binary[6]
}
sqlite3_conn.close()
return result
#####################################################
##################### BLUEPRINT #####################
#####################################################
tdmaps = Blueprint(
'tdmaps', #name of module
__name__,
template_folder='templates' # templates folder
)
@tdmaps.route('/')
def index_tdmaps():
return render_template('index_tdmaps.html')
@tdmaps.route('/<string:tdmapid>', methods = ['GET'])
def api_tdmaps_get(tdmapid):
global apiVersion
global tdmapsDBPath
global tdmapsDBSchema
status = None
result = None
data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema)
if data is None:
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"error" : {
"code": 404,
"message": "Something went wrong.",
"url": request.url,
},
}
return_code = 404
else:
tdmap_link = "{0}api/tdmaps/{1}".format( request.host_url, tdmapid )
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"data" : data,
"links" : { "tdmap" : { "self" : tdmap_link } },
}
return_code = 200
return jsonpify(result), return_code
@tdmaps.route('/setup', methods = ['POST'])
def api_tdmaps_setup():
global apiVersion
global tdmapsDBPath
global tdmapsDBSchema
status = None
data_dict = None
if len(request.data) > 0:
data_dict = json.loads( request.data )
data = {}
inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema)
tdmap_link = "{0}api/tdmaps/{1}".format( request.host_url, inserted_tdmap_id )
status = True
data = { 'id' : inserted_tdmap_id }
if status is None:
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"error" : {
"code": 404,
"message": "Something went wrong.",
"url": request.url,
},
}
return_code = 404
else:
result = {
"apiVersion": apiVersion,
"params": request.args,
"method": request.method,
"took": 0,
"data" : data,
"links" : { "tdmap" : { "self" : tdmap_link } },
}
return_code = 200
return jsonpify(result), return_code
| [
"sys.path.append",
"flask.Blueprint",
"json.loads",
"flask.render_template",
"flask_jsonpify.jsonpify"
] | [((421, 445), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (436, 445), False, 'import sys\n'), ((1927, 1985), 'flask.Blueprint', 'Blueprint', (['"""tdmaps"""', '__name__'], {'template_folder': '"""templates"""'}), "('tdmaps', __name__, template_folder='templates')\n", (1936, 1985), False, 'from flask import Flask, render_template, request, redirect, Blueprint\n'), ((2086, 2122), 'flask.render_template', 'render_template', (['"""index_tdmaps.html"""'], {}), "('index_tdmaps.html')\n", (2101, 2122), False, 'from flask import Flask, render_template, request, redirect, Blueprint\n'), ((3048, 3064), 'flask_jsonpify.jsonpify', 'jsonpify', (['result'], {}), '(result)\n', (3056, 3064), False, 'from flask_jsonpify import jsonpify\n'), ((3285, 3309), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (3295, 3309), False, 'import json\n'), ((4164, 4180), 'flask_jsonpify.jsonpify', 'jsonpify', (['result'], {}), '(result)\n', (4172, 4180), False, 'from flask_jsonpify import jsonpify\n')] |
from joblib import Memory
import math
import music21 as m21
import numpy as np
import os
from scipy.fftpack import fft, ifft
def get_composers():
return ["Haydn", "Mozart"]
def get_data_dir():
return "/scratch/vl1019/nemisig2018_data"
def get_dataset_name():
return "nemisig2018"
def concatenate_layers(Sx, depth):
layers = []
for m in range(depth+1):
layers.append(Sx[m].flatten())
return np.concatenate(layers)
def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16):
N = 2**J_fr
filterbank = np.zeros((N, 1, 2*(J_fr-2)+1))
for j in range(J_fr-2):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi = morlet(center, den, N, n_periods=4)
filterbank[:, 0, j] = psi
for j in range(J_fr-2, 2*(J_fr-2)):
psi = filterbank[:, 0, j - (J_fr-2)]
rev_psi = np.concatenate((psi[0:1], psi[1:][::-1]))
filterbank[:, 0, j] = rev_psi
sigma_phi = 2.0 * sigma * 2**(-(J_fr-2))
center_phi = 0.0
den_phi = sigma_phi * sigma_phi * N * N
phi = gabor(center_phi, den_phi, N)
rev_phi = np.concatenate((phi[0:1], phi[1:][::-1]))
phi = phi + rev_phi
phi[0] = 1.0
filterbank[:, 0, -1] = phi
for m in range(dim):
filterbank = np.expand_dims(filterbank, axis=2)
return filterbank
def gabor(center, den, N):
omegas = np.array(range(N))
return gauss(omegas - center, den)
def gauss(omega, den):
return np.exp(- omega*omega / den)
def is_even(n):
return (n%2 == 0)
def morlet(center, den, N, n_periods):
half_N = N >> 1
p_start = - ((n_periods-1) >> 1) - is_even(n_periods)
p_stop = ((n_periods-1) >> 1) + 1
omega_start = p_start * N
omega_stop = p_stop * N
omegas = np.array(range(omega_start, omega_stop))
gauss_center = gauss(omegas - center, den)
corrective_gaussians = np.zeros((N*n_periods, n_periods))
for p in range(n_periods):
offset = (p_start + p) * N
corrective_gaussians[:, p] = gauss(omegas - offset, den)
p_range = range(p_start, p_stop)
b = np.array([gauss(p*N - center, den) for p in p_range])
A = np.array([gauss((q-p)*N, den)
for p in range(n_periods)
for q in range(n_periods)]).reshape(n_periods, n_periods)
corrective_factors = np.linalg.solve(A, b)
y = gauss_center - np.dot(corrective_gaussians, corrective_factors)
y = np.fft.fftshift(y)
y = np.reshape(y, (n_periods, N))
y = np.sum(y, axis=0)
return y
def scatter(U, filterbank, dim):
U_ft = fft(U, axis=dim)
U_ft = np.expand_dims(U_ft, axis=-1)
Y_ft = U_ft * filterbank
Y = ifft(Y_ft, axis=dim)
return Y
def setup_timefrequency_scattering(J_tm, J_fr, depth):
filterbanks_tm = []
filterbanks_fr = []
for m in range(depth):
filterbank_tm = temporal_filterbank(2*m, J_tm)
filterbank_fr = frequential_filterbank(2*m+1, J_fr)
filterbanks_tm.append(filterbank_tm)
filterbanks_fr.append(filterbank_fr)
return (filterbanks_tm, filterbanks_fr)
def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16):
N = 2**J_tm
filterbank = np.zeros((1, N, J_tm-2))
for j in range(J_tm-2):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi = morlet(center, den, N, n_periods=4)
filterbank[0, :, j] = psi
for m in range(dim):
filterbank = np.expand_dims(filterbank, axis=2)
return filterbank
def temporal_scattering(pianoroll, filterbanks, nonlinearity):
depth = len(filterbanks)
Us = [pianoroll]
Ss = []
for m in range(depth):
U = Us[m]
S = np.sum(U, axis=(0, 1))
filterbank = filterbanks[m]
Y = scatter(U, filterbank, 1)
if nonlinearity == "abs":
U = np.abs(Y)
else:
raise NotImplementedError
Us.append(U)
Ss.append(S)
S = np.sum(U, axis=(0, 1))
Ss.append(S)
return Ss
def timefrequency_scattering(pianoroll, filterbanks, nonlinearity):
filterbanks_tm = filterbanks[0]
filterbanks_fr = filterbanks[1]
depth = len(filterbanks_tm)
Us = [pianoroll]
Ss = []
for m in range(depth):
U = Us[m]
S = np.sum(U, axis=(0,1))
filterbank_tm = filterbanks_tm[m]
filterbank_fr = filterbanks_fr[m]
Y_tm = scatter(U, filterbank_tm, 1)
Y_fr = scatter(Y_tm, filterbank_fr, 0)
if nonlinearity == "abs":
U = np.abs(Y_fr)
else:
raise NotImplementedError
Us.append(U)
Ss.append(S)
S = np.sum(U, axis=(0, 1))
Ss.append(S)
return Ss
| [
"numpy.sum",
"numpy.abs",
"numpy.zeros",
"numpy.expand_dims",
"scipy.fftpack.fft",
"scipy.fftpack.ifft",
"numpy.fft.fftshift",
"numpy.exp",
"numpy.reshape",
"numpy.dot",
"numpy.linalg.solve",
"numpy.concatenate"
] | [((429, 451), 'numpy.concatenate', 'np.concatenate', (['layers'], {}), '(layers)\n', (443, 451), True, 'import numpy as np\n'), ((546, 582), 'numpy.zeros', 'np.zeros', (['(N, 1, 2 * (J_fr - 2) + 1)'], {}), '((N, 1, 2 * (J_fr - 2) + 1))\n', (554, 582), True, 'import numpy as np\n'), ((1168, 1209), 'numpy.concatenate', 'np.concatenate', (['(phi[0:1], phi[1:][::-1])'], {}), '((phi[0:1], phi[1:][::-1]))\n', (1182, 1209), True, 'import numpy as np\n'), ((1521, 1549), 'numpy.exp', 'np.exp', (['(-omega * omega / den)'], {}), '(-omega * omega / den)\n', (1527, 1549), True, 'import numpy as np\n'), ((1932, 1968), 'numpy.zeros', 'np.zeros', (['(N * n_periods, n_periods)'], {}), '((N * n_periods, n_periods))\n', (1940, 1968), True, 'import numpy as np\n'), ((2378, 2399), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2393, 2399), True, 'import numpy as np\n'), ((2480, 2498), 'numpy.fft.fftshift', 'np.fft.fftshift', (['y'], {}), '(y)\n', (2495, 2498), True, 'import numpy as np\n'), ((2507, 2536), 'numpy.reshape', 'np.reshape', (['y', '(n_periods, N)'], {}), '(y, (n_periods, N))\n', (2517, 2536), True, 'import numpy as np\n'), ((2545, 2562), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (2551, 2562), True, 'import numpy as np\n'), ((2622, 2638), 'scipy.fftpack.fft', 'fft', (['U'], {'axis': 'dim'}), '(U, axis=dim)\n', (2625, 2638), False, 'from scipy.fftpack import fft, ifft\n'), ((2650, 2679), 'numpy.expand_dims', 'np.expand_dims', (['U_ft'], {'axis': '(-1)'}), '(U_ft, axis=-1)\n', (2664, 2679), True, 'import numpy as np\n'), ((2717, 2737), 'scipy.fftpack.ifft', 'ifft', (['Y_ft'], {'axis': 'dim'}), '(Y_ft, axis=dim)\n', (2721, 2737), False, 'from scipy.fftpack import fft, ifft\n'), ((3223, 3249), 'numpy.zeros', 'np.zeros', (['(1, N, J_tm - 2)'], {}), '((1, N, J_tm - 2))\n', (3231, 3249), True, 'import numpy as np\n'), ((4038, 4060), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (4044, 4060), True, 'import numpy as np\n'), ((4718, 4740), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (4724, 4740), True, 'import numpy as np\n'), ((924, 965), 'numpy.concatenate', 'np.concatenate', (['(psi[0:1], psi[1:][::-1])'], {}), '((psi[0:1], psi[1:][::-1]))\n', (938, 965), True, 'import numpy as np\n'), ((1328, 1362), 'numpy.expand_dims', 'np.expand_dims', (['filterbank'], {'axis': '(2)'}), '(filterbank, axis=2)\n', (1342, 1362), True, 'import numpy as np\n'), ((2423, 2471), 'numpy.dot', 'np.dot', (['corrective_gaussians', 'corrective_factors'], {}), '(corrective_gaussians, corrective_factors)\n', (2429, 2471), True, 'import numpy as np\n'), ((3538, 3572), 'numpy.expand_dims', 'np.expand_dims', (['filterbank'], {'axis': '(2)'}), '(filterbank, axis=2)\n', (3552, 3572), True, 'import numpy as np\n'), ((3779, 3801), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (3785, 3801), True, 'import numpy as np\n'), ((4356, 4378), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (4362, 4378), True, 'import numpy as np\n'), ((3926, 3935), 'numpy.abs', 'np.abs', (['Y'], {}), '(Y)\n', (3932, 3935), True, 'import numpy as np\n'), ((4603, 4615), 'numpy.abs', 'np.abs', (['Y_fr'], {}), '(Y_fr)\n', (4609, 4615), True, 'import numpy as np\n')] |
## Comborbidities:
## Comborbidities:
## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension
## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache,
# Pneumonia, Stomach, Myocarditis, Blood-Clots, Death
## Mild symptoms: Taste, Fever, Headache, Stomach
## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots
import numpy as np
import pickle
class Person:
def __init__(self, pop):
self.genes = np.random.choice(2, size=pop.n_genes)
self.gender = np.random.choice(2, 1)
self.age = np.random.gamma(3, 11)
self.age_adj = self.age / 100 # age affects everything
self.income = np.random.gamma(1, 10000)
self.comorbidities = [0] * pop.n_comorbidities
self.comorbidities[0] = pop.asthma
self.comorbidities[1] = pop.obesity * self.age_adj
self.comorbidities[2] = pop.smoking
self.diab = pop.diabetes + self.comorbidities[1] * 0.5
self.HT = pop.htension + self.comorbidities[2] * 0.5
self.comorbidities[3] = self.diab
self.comorbidities[4] = pop.heart * self.age_adj
self.comorbidities[5] = self.HT * self.age_adj
for i in range(pop.n_comorbidities):
if (np.random.uniform() < self.comorbidities[i]):
self.comorbidities[i] = 1
else:
self.comorbidities[i] = 0
self.symptom_baseline = np.array(
[pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01,
0.02, 0.001, 0.001, 0.001]);
self.symptom_baseline = np.array(
np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline
self.symptom_baseline[0] = pop.historical_prevalence;
self.symptom_baseline[1] = pop.prevalence;
if (self.gender == 1):
self.symptom_baseline[8] += 0.01
else:
self.symptom_baseline[7] += 0.01
self.symptom_baseline[9] += 0.01
# Initially no symptoms apart from Covid+/CovidPre
self.symptoms = [0] * pop.n_symptoms
if (np.random.uniform() <= self.symptom_baseline[0]):
self.symptoms[0] = 1
# increase symptom probabilities for symptoms when covid+
if (np.random.uniform() <= self.symptom_baseline[1]):
self.symptoms[1] = 1
self.symptom_baseline = np.array(
[pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05,
0.2, 0.02, 0.05, 0.2, 0.1]);
self.vaccines = [0] * pop.n_vaccines
# use vaccine = -1 if no vaccine is given
def vaccinate(self, vaccine_array, pop):
## Vaccinated
if (sum(vaccine_array) >= 0):
vaccinated = True
else:
vaccinated = False
if (vaccinated):
vaccine = np.argmax(vaccine_array)
self.vaccines = vaccine_array
self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine]
if (vaccinated and self.symptoms[1] == 1):
self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine]
self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine]
self.symptom_baseline[9] *= pop.death_efficacy[vaccine]
if (self.symptoms[0] == 1):
self.symptom_baseline *= 0.5
# baseline symptoms of non-covid patients
if (self.symptoms[0] == 0 and self.symptoms[1] == 0):
self.symptom_baseline = np.array(
[0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001])
## Common side-effects
if (vaccine == 1):
self.symptom_baseline[8] += 0.01
self.symptom_baseline[9] += 0.001
if (vaccine == 2):
self.symptom_baseline[7] += 0.01
if (vaccine >= 0):
self.symptom_baseline[3] += 0.2
self.symptom_baseline[4] += 0.1
# model long covid sufferers by increasing the chances of various
# symptoms slightly
if (self.symptoms[0] == 1 and self.symptoms[1] == 0):
self.symptom_baseline += np.array(
[0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]);
# genetic factors
self.symptom_baseline = np.array(
np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline
# print("V:", vaccine, symptom_baseline)
for s in range(2, pop.n_symptoms):
if (np.random.uniform() < self.symptom_baseline[s]):
self.symptoms[s] = 1
class Population:
def __init__(self, n_genes, n_vaccines, n_treatments):
self.n_genes = n_genes
self.n_comorbidities = 6;
self.n_symptoms = 10
self.n_vaccines = n_vaccines
self.n_treatments = n_treatments
self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms])
self.G /= sum(self.G)
self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms])
self.asthma = 0.08
self.obesity = 0.3
self.smoking = 0.2
self.diabetes = 0.1
self.heart = 0.15
self.htension = 0.3
self.baseline_efficacy = [0.5, 0.6, 0.7]
self.mild_efficacy = [0.6, 0.7, 0.8]
self.critical_efficacy = [0.8, 0.75, 0.85]
self.death_efficacy = [0.9, 0.95, 0.9]
self.vaccination_rate = [0.7, 0.1, 0.1, 0.1]
self.prevalence = 0.1
self.historical_prevalence = 0.1
## Generates data with the following structure:
## X: characteristics before treatment, including whether or not they
# were vaccinated
## The generated population may already be vaccinated.
def generate(self, n_individuals):
"""Generate a population.
Call this function before anything else is done.
Calling this function again generates a completely new population
sample, purging the previous one from memory.
:param int n_individuals: the number of individuals to generate
"""
self.n_individuals = n_individuals
X = np.zeros([n_individuals,
3 + self.n_genes + self.n_comorbidities
+ self.n_vaccines + self.n_symptoms])
Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms])
self.persons = []
for t in range(n_individuals):
person = Person(self)
vaccine = np.random.choice(4, p=self.vaccination_rate) - 1
vaccine_array = np.zeros(self.n_vaccines)
if (vaccine >= 0):
vaccine_array[vaccine] = 1
person.vaccinate(vaccine_array, self)
self.persons.append(person)
x_t = np.concatenate(
[person.symptoms, [person.age, person.gender, person.income],
person.genes, person.comorbidities, person.vaccines])
X[t, :] = x_t
self.X = X
return X
def vaccinate(self, person_index, vaccine_array):
""" Give a vaccine to a specific person.
Args:
person_index (int array), indices of person in the population
vaccine_array (n*|A| array), array indicating which vaccines are to
be given to each patient
Returns:
The symptoms of the selected individuals
Notes:
Currently only one vaccine dose is implemented, but in the future
multiple doses may be modelled.
"""
outcome = np.zeros([len(person_index), self.n_symptoms])
i = 0
for t in person_index:
self.persons[t].vaccinate(vaccine_array[i], self)
outcome[i] = self.persons[i].symptoms
i += 1
return outcome
def treat(self, person_index, treatment):
""" Treat a patient.
Args:
person_index (int array), indices of persons in the population to treat
treatment_array (n*|A| array), array indicating which treatments are
to be given to each patient
Returns:
The symptoms of the selected individuals
"""
N = len(person_index)
result = np.zeros([N, self.n_symptoms])
# use i to index the treated
# use t to index the original population
# print(treatment)
for i in range(N):
t = person_index[i]
r = np.array(np.matrix(treatment[i]) * self.A).flatten()
for k in range(self.n_symptoms):
if (k <= 1):
result[i, k] = self.X[t, k]
else:
if (np.random.uniform() < r[k]):
result[i, k] = 0
else:
result[i, k] = self.X[t, k]
return result
def get_features(self, person_index):
x_t = np.concatenate([self.persons[t].symptoms,
[self.persons[t].age, self.persons[t].gender,
self.persons[t].income], self.persons[t].genes,
self.persons[t].comorbidities,
self.persons[t].vaccines])
return x_t
## Treats a population
def treatment(self, X, policy):
treatments = np.zeros([X.shape[0], self.n_treatments])
result = np.zeros([X.shape[0], self.n_symptoms])
for t in range(X.shape[0]):
# print ("X:", result[t])
treatments[t][policy.get_action(X[t])] = 1
r = np.array(np.matrix(treatments[t]) * self.A).flatten()
for k in range(self.n_symptoms):
if (k <= 1):
result[t, k] = X[t, k]
else:
if (np.random.uniform() < r[k]):
result[t, k] = 0
else:
result[t, k] = X[t, k]
##print("X:", X[t,:self.n_symptoms] , "Y:", result[t])
return treatments, result
# main
if __name__ == "__main__":
import pandas
try:
import policy
except:
import project2.src.covid.policy
n_symptoms = 10
n_genes = 128
n_vaccines = 3
n_treatments = 4
pop = Population(n_genes, n_vaccines, n_treatments)
n_observations = 1000
X_observation = pop.generate(n_observations)
pandas.DataFrame(X_observation).to_csv('observation_features.csv',
header=False, index=False)
n_treated = 1000
X_treatment = pop.generate(n_treated)
X_treatment = X_treatment[X_treatment[:, 1] == 1]
print("Generating treatment outcomes")
a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments))
pandas.DataFrame(X_treatment).to_csv('treatment_features.csv',
header=False, index=False)
pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False,
index=False)
pandas.DataFrame(y).to_csv('treatment_outcomes.csv', header=False,
index=False)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.matrix",
"policy.RandomPolicy",
"numpy.argmax",
"numpy.zeros",
"policy.get_action",
"numpy.random.gamma",
"numpy.array",
"numpy.random.choice",
"numpy.concatenate"
] | [((440, 477), 'numpy.random.choice', 'np.random.choice', (['(2)'], {'size': 'pop.n_genes'}), '(2, size=pop.n_genes)\n', (456, 477), True, 'import numpy as np\n'), ((500, 522), 'numpy.random.choice', 'np.random.choice', (['(2)', '(1)'], {}), '(2, 1)\n', (516, 522), True, 'import numpy as np\n'), ((542, 564), 'numpy.random.gamma', 'np.random.gamma', (['(3)', '(11)'], {}), '(3, 11)\n', (557, 564), True, 'import numpy as np\n'), ((651, 676), 'numpy.random.gamma', 'np.random.gamma', (['(1)', '(10000)'], {}), '(1, 10000)\n', (666, 676), True, 'import numpy as np\n'), ((1399, 1507), 'numpy.array', 'np.array', (['[pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, \n 0.001, 0.001, 0.001]'], {}), '([pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01,\n 0.02, 0.001, 0.001, 0.001])\n', (1407, 1507), True, 'import numpy as np\n'), ((4791, 4846), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[self.n_genes, self.n_symptoms]'}), '(size=[self.n_genes, self.n_symptoms])\n', (4808, 4846), True, 'import numpy as np\n'), ((4894, 4954), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[self.n_treatments, self.n_symptoms]'}), '(size=[self.n_treatments, self.n_symptoms])\n', (4911, 4954), True, 'import numpy as np\n'), ((6039, 6146), 'numpy.zeros', 'np.zeros', (['[n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines +\n self.n_symptoms]'], {}), '([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.\n n_vaccines + self.n_symptoms])\n', (6047, 6146), True, 'import numpy as np\n'), ((6198, 6259), 'numpy.zeros', 'np.zeros', (['[n_individuals, self.n_treatments, self.n_symptoms]'], {}), '([n_individuals, self.n_treatments, self.n_symptoms])\n', (6206, 6259), True, 'import numpy as np\n'), ((8072, 8102), 'numpy.zeros', 'np.zeros', (['[N, self.n_symptoms]'], {}), '([N, self.n_symptoms])\n', (8080, 8102), True, 'import numpy as np\n'), ((8739, 8942), 'numpy.concatenate', 'np.concatenate', (['[self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender,\n self.persons[t].income], self.persons[t].genes, self.persons[t].\n comorbidities, self.persons[t].vaccines]'], {}), '([self.persons[t].symptoms, [self.persons[t].age, self.\n persons[t].gender, self.persons[t].income], self.persons[t].genes, self\n .persons[t].comorbidities, self.persons[t].vaccines])\n', (8753, 8942), True, 'import numpy as np\n'), ((9158, 9199), 'numpy.zeros', 'np.zeros', (['[X.shape[0], self.n_treatments]'], {}), '([X.shape[0], self.n_treatments])\n', (9166, 9199), True, 'import numpy as np\n'), ((9217, 9256), 'numpy.zeros', 'np.zeros', (['[X.shape[0], self.n_symptoms]'], {}), '([X.shape[0], self.n_symptoms])\n', (9225, 9256), True, 'import numpy as np\n'), ((10552, 10585), 'policy.RandomPolicy', 'policy.RandomPolicy', (['n_treatments'], {}), '(n_treatments)\n', (10571, 10585), False, 'import policy\n'), ((2060, 2079), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2077, 2079), True, 'import numpy as np\n'), ((2222, 2241), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2239, 2241), True, 'import numpy as np\n'), ((2341, 2442), 'numpy.array', 'np.array', (['[pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05,\n 0.2, 0.1]'], {}), '([pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, \n 0.02, 0.05, 0.2, 0.1])\n', (2349, 2442), True, 'import numpy as np\n'), ((2794, 2818), 'numpy.argmax', 'np.argmax', (['vaccine_array'], {}), '(vaccine_array)\n', (2803, 2818), True, 'import numpy as np\n'), ((3436, 3507), 'numpy.array', 'np.array', (['[0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]'], {}), '([0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001])\n', (3444, 3507), True, 'import numpy as np\n'), ((4099, 4164), 'numpy.array', 'np.array', (['[0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]'], {}), '([0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01])\n', (4107, 4164), True, 'import numpy as np\n'), ((6458, 6483), 'numpy.zeros', 'np.zeros', (['self.n_vaccines'], {}), '(self.n_vaccines)\n', (6466, 6483), True, 'import numpy as np\n'), ((6667, 6801), 'numpy.concatenate', 'np.concatenate', (['[person.symptoms, [person.age, person.gender, person.income], person.genes,\n person.comorbidities, person.vaccines]'], {}), '([person.symptoms, [person.age, person.gender, person.income],\n person.genes, person.comorbidities, person.vaccines])\n', (6681, 6801), True, 'import numpy as np\n'), ((10217, 10248), 'pandas.DataFrame', 'pandas.DataFrame', (['X_observation'], {}), '(X_observation)\n', (10233, 10248), False, 'import pandas\n'), ((10591, 10620), 'pandas.DataFrame', 'pandas.DataFrame', (['X_treatment'], {}), '(X_treatment)\n', (10607, 10620), False, 'import pandas\n'), ((10726, 10745), 'pandas.DataFrame', 'pandas.DataFrame', (['a'], {}), '(a)\n', (10742, 10745), False, 'import pandas\n'), ((10840, 10859), 'pandas.DataFrame', 'pandas.DataFrame', (['y'], {}), '(y)\n', (10856, 10859), False, 'import pandas\n'), ((1218, 1237), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1235, 1237), True, 'import numpy as np\n'), ((4437, 4456), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4454, 4456), True, 'import numpy as np\n'), ((6381, 6425), 'numpy.random.choice', 'np.random.choice', (['(4)'], {'p': 'self.vaccination_rate'}), '(4, p=self.vaccination_rate)\n', (6397, 6425), True, 'import numpy as np\n'), ((9357, 9380), 'policy.get_action', 'policy.get_action', (['X[t]'], {}), '(X[t])\n', (9374, 9380), False, 'import policy\n'), ((8512, 8531), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8529, 8531), True, 'import numpy as np\n'), ((9619, 9638), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9636, 9638), True, 'import numpy as np\n'), ((1585, 1606), 'numpy.matrix', 'np.matrix', (['self.genes'], {}), '(self.genes)\n', (1594, 1606), True, 'import numpy as np\n'), ((4264, 4285), 'numpy.matrix', 'np.matrix', (['self.genes'], {}), '(self.genes)\n', (4273, 4285), True, 'import numpy as np\n'), ((8300, 8323), 'numpy.matrix', 'np.matrix', (['treatment[i]'], {}), '(treatment[i])\n', (8309, 8323), True, 'import numpy as np\n'), ((9411, 9435), 'numpy.matrix', 'np.matrix', (['treatments[t]'], {}), '(treatments[t])\n', (9420, 9435), True, 'import numpy as np\n')] |
from typing import Any, Dict
from asyncio import sleep
from logging import getLogger
import aioredis
log = getLogger(__name__)
class _ConnectionsPool(aioredis.ConnectionsPool):
def __init__(
self, *args: Any, retry_count: int = 5, retry_interval: int = 2, **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self._retry_count = retry_count
self._retry_interval = retry_interval
async def execute(self, command: str, *args: Any, **kwargs: Any) -> Any:
exc: Exception
for i in range(self._retry_count):
try:
return await super().execute(command, *args, **kwargs)
except (
aioredis.ConnectionClosedError,
aioredis.PoolClosedError,
ConnectionRefusedError,
) as e:
log.debug(
f"Command {command} failed, remaining attempts: {self._retry_count - i}"
)
exc = e
await sleep(self._retry_interval)
log.error(f"Command {command} has failed after {self._retry_count} retries")
raise exc
async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool:
config = config.copy()
address = (config.pop("host"), config.pop("port"))
return await aioredis.create_pool(address, pool_cls=_ConnectionsPool, **config)
| [
"aioredis.create_pool",
"asyncio.sleep",
"logging.getLogger"
] | [((109, 128), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (118, 128), False, 'from logging import getLogger\n'), ((1326, 1392), 'aioredis.create_pool', 'aioredis.create_pool', (['address'], {'pool_cls': '_ConnectionsPool'}), '(address, pool_cls=_ConnectionsPool, **config)\n', (1346, 1392), False, 'import aioredis\n'), ((1018, 1045), 'asyncio.sleep', 'sleep', (['self._retry_interval'], {}), '(self._retry_interval)\n', (1023, 1045), False, 'from asyncio import sleep\n')] |
# <NAME>
# <EMAIL>
# MIT License
# As-simple-as-possible training loop for an autoencoder.
import torch
import numpy as np
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from model.shallow_autoencoder import ConvAutoencoder
# load model definition
model = ConvAutoencoder()
model = model.double() # tackles a type error
# define loss and optimizer
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Toy data:
# Using separate input and output variables to cover all cases,
# since Y could differ from X (e.g. for denoising autoencoders).
X = np.random.random((300, 1, 100))
Y = X
# prepare pytorch dataloader
dataset = TensorDataset(torch.tensor(X), torch.tensor(Y))
dataloader = DataLoader(dataset, batch_size=256, shuffle=True)
# Training loop
for epoch in range(200):
for x, y in dataloader:
optimizer.zero_grad()
# forward and backward pass
out = model(x)
loss = criterion(out, y)
loss.backward()
optimizer.step()
print(loss.item()) # loss should be decreasing
| [
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"numpy.random.random",
"model.shallow_autoencoder.ConvAutoencoder",
"torch.tensor"
] | [((318, 335), 'model.shallow_autoencoder.ConvAutoencoder', 'ConvAutoencoder', ([], {}), '()\n', (333, 335), False, 'from model.shallow_autoencoder import ConvAutoencoder\n'), ((424, 436), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (434, 436), False, 'from torch import nn\n'), ((649, 680), 'numpy.random.random', 'np.random.random', (['(300, 1, 100)'], {}), '((300, 1, 100))\n', (665, 680), True, 'import numpy as np\n'), ((790, 839), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(256)', 'shuffle': '(True)'}), '(dataset, batch_size=256, shuffle=True)\n', (800, 839), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((743, 758), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (755, 758), False, 'import torch\n'), ((760, 775), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (772, 775), False, 'import torch\n')] |
import asyncio
import dataclasses
import enum
from typing import Dict, Optional, Set, Tuple
from supriya.clocks import AsyncTempoClock, Moment
from ..bases import Event
from .bases import ApplicationObject
from .parameters import ParameterGroup, ParameterObject
class Transport(ApplicationObject):
### CLASS VARIABLES ###
class EventType(enum.IntEnum):
CHANGE = 0
SCHEDULE = 1
MIDI_PERFORM = 2
DEVICE_NOTE_OFF = 3
DEVICE_NOTE_ON = 4
CLIP_LAUNCH = 5
CLIP_EDIT = 6
CLIP_PERFORM = 7
### INITIALIZER ###
def __init__(self):
ApplicationObject.__init__(self)
self._parameter_group = ParameterGroup()
self._parameters: Dict[str, ParameterObject] = {}
self._clock = AsyncTempoClock()
self._dependencies: Set[ApplicationObject] = set()
self._mutate(slice(None), [self._parameter_group])
self._tick_event_id = None
### PRIVATE METHODS ###
async def _application_perform_callback(self, clock_context, midi_message):
await self.application.perform(
[midi_message], moment=clock_context.current_moment
)
@classmethod
async def _deserialize(cls, data, transport_object):
await transport_object.set_tempo(data["spec"]["tempo"])
await transport_object.set_time_signature(*data["spec"]["time_signature"])
def _serialize(self):
return {
"kind": type(self).__name__,
"spec": {
"tempo": self._clock.beats_per_minute,
"time_signature": list(self._clock.time_signature),
},
}
def _tick_callback(self, clock_context):
self.application.pubsub.publish(TransportTicked(clock_context.desired_moment))
return 1 / clock_context.desired_moment.time_signature[1] / 4
### PUBLIC METHODS ###
async def cue(self, *args, **kwargs) -> int:
return self._clock.cue(*args, **kwargs)
async def cancel(self, *args, **kwargs) -> Optional[Tuple]:
return self._clock.cancel(*args, **kwargs)
async def perform(self, midi_messages):
if (
self.application is None
or self.application.status != self.application.Status.REALTIME
):
return
self._debug_tree(
self, "Perform", suffix=repr([type(_).__name__ for _ in midi_messages])
)
await self.schedule(self._application_perform_callback, args=midi_messages)
if not self.is_running:
await self.start()
async def reschedule(self, *args, **kwargs) -> Optional[int]:
return self._clock.reschedule(*args, **kwargs)
async def schedule(self, *args, **kwargs) -> int:
return self._clock.schedule(*args, **kwargs)
async def set_tempo(self, beats_per_minute: float):
self._clock.change(beats_per_minute=beats_per_minute)
async def set_time_signature(self, numerator, denominator):
self._clock.change(time_signature=[numerator, denominator])
async def start(self):
async with self.lock([self]):
self._tick_event_id = await self.cue(self._tick_callback)
await asyncio.gather(*[_._start() for _ in self._dependencies])
await self._clock.start()
self.application.pubsub.publish(TransportStarted())
async def stop(self):
await self._clock.stop()
async with self.lock([self]):
await asyncio.gather(*[_._stop() for _ in self._dependencies])
await self.application.flush()
await self.cancel(self._tick_event_id)
self.application.pubsub.publish(TransportStopped())
### PUBLIC PROPERTIES ###
@property
def clock(self):
return self._clock
@property
def is_running(self):
return self._clock.is_running
@property
def parameters(self):
return self._parameters
@dataclasses.dataclass
class TransportStarted(Event):
pass
@dataclasses.dataclass
class TransportStopped(Event):
pass
@dataclasses.dataclass
class TransportTicked(Event): # TODO: ClipView needs to know start delta
moment: Moment
| [
"supriya.clocks.AsyncTempoClock"
] | [((778, 795), 'supriya.clocks.AsyncTempoClock', 'AsyncTempoClock', ([], {}), '()\n', (793, 795), False, 'from supriya.clocks import AsyncTempoClock, Moment\n')] |
#
# twitter csv process
# write by @jiyang_viz
#
# require:
# https://github.com/edburnett/twitter-text-python
#
# download csv file from:
# https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump
#
import json
import csv
from ttp import ttp
from dateutil import parser as date_parser
# read csv to Dict
with open('realdonaldtrump.csv', 'r') as f:
reader = csv.DictReader(f, delimiter = ',')
data = list(reader)
# write to json file (same fields as csv)
with open('realdonaldtrump.json', 'w') as f:
for item in data:
f.write(json.dumps(item) + '\n')
# get more info from text message
parser = ttp.Parser()
for item in data:
result = parser.parse(item['text'])
item['tags'] = result.tags
item['users'] = result.users
item['reply'] = result.reply
item['tweet_time'] = str(date_parser.parse(item['created_at']))
# write to json file (more fields)
with open('realdonaldtrump_more.json', 'w') as f:
for item in data:
f.write(json.dumps(item) + '\n') | [
"csv.DictReader",
"dateutil.parser.parse",
"json.dumps",
"ttp.ttp.Parser"
] | [((641, 653), 'ttp.ttp.Parser', 'ttp.Parser', ([], {}), '()\n', (651, 653), False, 'from ttp import ttp\n'), ((387, 419), 'csv.DictReader', 'csv.DictReader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (401, 419), False, 'import csv\n'), ((838, 875), 'dateutil.parser.parse', 'date_parser.parse', (["item['created_at']"], {}), "(item['created_at'])\n", (855, 875), True, 'from dateutil import parser as date_parser\n'), ((572, 588), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (582, 588), False, 'import json\n'), ((1001, 1017), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (1011, 1017), False, 'import json\n')] |
#!/usr/bin/env python
import optparse
import os
import shutil
import sys
def revert(path):
bkup = path + os.path.extsep + options.backup_suffix
if os.access(bkup, os.R_OK):
shutil.copy2(bkup, path)
os.remove(bkup)
def corrupt(path, offset, value):
if options.backup:
shutil.copy2(path, path + os.path.extsep + options.backup_suffix)
with open(path, 'r+b', buffering=0) as f:
f.seek(offset)
f.write(bytes(chr(value)))
parser = optparse.OptionParser(
usage='usage: %prog file byte-offset replacment-value')
parser.add_option("", "--revert",
action="store_true", default=False,
help="restore the path to pristine condition if possible.")
parser.add_option("-b", "--backup",
action="store_true", default=True, dest="backup",
help="create a backup of the uncorrupted original"
" [default: %default]")
parser.add_option("", "--no-backup",
action="store_false", dest="backup",
help="do not create a backup of the uncorrupted original.")
parser.add_option("", "--backup-suffix",
type="string", default="pristine",
help="suffix for uncorrupted copy of the file"
" [default: %default]")
(options, args) = parser.parse_args()
if options.revert:
if len(args) != 1:
parser.print_help()
sys.exit(1)
elif len(args) != 3:
parser.print_help()
sys.exit(1)
path = args[0]
revert(path)
if not options.revert:
offset = int(eval(args[1]))
value = int(eval(args[2]))
corrupt(path, offset, value)
| [
"os.remove",
"optparse.OptionParser",
"shutil.copy2",
"os.access",
"sys.exit"
] | [((488, 565), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': '"""usage: %prog file byte-offset replacment-value"""'}), "(usage='usage: %prog file byte-offset replacment-value')\n", (509, 565), False, 'import optparse\n'), ((157, 181), 'os.access', 'os.access', (['bkup', 'os.R_OK'], {}), '(bkup, os.R_OK)\n', (166, 181), False, 'import os\n'), ((191, 215), 'shutil.copy2', 'shutil.copy2', (['bkup', 'path'], {}), '(bkup, path)\n', (203, 215), False, 'import shutil\n'), ((224, 239), 'os.remove', 'os.remove', (['bkup'], {}), '(bkup)\n', (233, 239), False, 'import os\n'), ((307, 372), 'shutil.copy2', 'shutil.copy2', (['path', '(path + os.path.extsep + options.backup_suffix)'], {}), '(path, path + os.path.extsep + options.backup_suffix)\n', (319, 372), False, 'import shutil\n'), ((1450, 1461), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1458, 1461), False, 'import sys\n'), ((1511, 1522), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1519, 1522), False, 'import sys\n')] |
# Copyright 2017 <NAME>. See LICENSE.md file for terms.
import socket
import threading
import api
import usersim
TCP_IP = 'localhost'
TCP_PORT = 5005
def run_test():
telnet_config = {'type': 'telnet',
'config': {'host': TCP_IP,
'username': 'admin',
'password': 'password',
'commandlist': ['printstuff', 'do other stuff', 'do this thing'],
'port': TCP_PORT}}
sim = usersim.UserSim(True)
task_id = api.validate_config(telnet_config)
def start_server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print('Connection Address: ' + str(addr))
while True:
data = conn.recv(20)
if not data:
break
print('received data: ' + str(data))
conn.send(data)
conn.close()
if __name__ == '__main__':
run_test()
| [
"usersim.UserSim",
"socket.socket",
"api.validate_config"
] | [((527, 548), 'usersim.UserSim', 'usersim.UserSim', (['(True)'], {}), '(True)\n', (542, 548), False, 'import usersim\n'), ((564, 598), 'api.validate_config', 'api.validate_config', (['telnet_config'], {}), '(telnet_config)\n', (583, 598), False, 'import api\n'), ((628, 677), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (641, 677), False, 'import socket\n')] |
import copy
import json
import os
from urllib.parse import urlparse
import pkg_resources
from guillotina import app_settings
from guillotina import configure
from guillotina.api.service import Service
from guillotina.component import getMultiAdapter
from guillotina.interfaces import IAbsoluteURL
from guillotina.utils import get_authenticated_user
from guillotina.utils import get_full_content_path
from guillotina.utils import get_request_scheme
from guillotina.utils import get_security_policy
from guillotina.utils import resolve_dotted_name
from zope.interface import Interface
from zope.interface.interfaces import ComponentLookupError
here = os.path.dirname(os.path.realpath(__file__))
@configure.service(
method="GET",
context=Interface,
name="@swagger",
permission="guillotina.swagger.View",
ignore=True,
)
class SwaggerDefinitionService(Service):
__allow_access__ = True
def get_data(self, data):
if callable(data):
data = data(self.context)
return data
def load_swagger_info(self, api_def, path, method, tags, service_def):
path = path.rstrip("/")
if path not in api_def:
api_def[path or "/"] = {}
desc = self.get_data(service_def.get("description", ""))
swagger_conf = service_def.get("swagger", {})
if swagger_conf.get("display_permission", True):
if desc:
desc += f" 〜 permission: {service_def['permission']}"
else:
desc += f"permission: {service_def['permission']}"
api_def[path or "/"][method.lower()] = {
"tags": swagger_conf.get("tags", [""]) or tags,
"parameters": self.get_data(service_def.get("parameters", {})),
"produces": self.get_data(service_def.get("produces", [])),
"summary": self.get_data(service_def.get("summary", "")),
"description": desc,
"responses": self.get_data(service_def.get("responses", {})),
}
def get_endpoints(self, iface_conf, base_path, api_def, tags=None):
tags = tags or []
for method in iface_conf.keys():
if method == "endpoints":
for name in iface_conf["endpoints"]:
self.get_endpoints(
iface_conf["endpoints"][name],
os.path.join(base_path, name),
api_def,
tags=[name.strip("@")],
)
else:
if method.lower() == "options":
continue
service_def = iface_conf[method]
swagger_conf = service_def.get("swagger", {})
if (service_def.get("ignore") or
service_def.get("swagger_ignore") or swagger_conf.get("ignore")):
continue
if not self.policy.check_permission(
service_def["permission"], self.context
):
continue
for sub_path in [""] + swagger_conf.get("extra_paths", []):
path = os.path.join(base_path, sub_path)
if "traversed_service_definitions" in service_def:
trav_defs = service_def[
"traversed_service_definitions"
]
if isinstance(trav_defs, dict):
for sub_path, sub_service_def in trav_defs.items():
sub_service_def["permission"] = service_def[
"permission"
]
self.load_swagger_info(
api_def,
os.path.join(path, sub_path),
method,
tags,
sub_service_def,
)
else:
self.load_swagger_info(
api_def, path, method, tags, service_def
)
async def __call__(self):
user = get_authenticated_user()
self.policy = get_security_policy(user)
definition = copy.deepcopy(
app_settings["swagger"]["base_configuration"]
)
vhm = self.request.headers.get("X-VirtualHost-Monster")
if vhm:
parsed_url = urlparse(vhm)
definition["host"] = parsed_url.netloc
definition["schemes"] = [parsed_url.scheme]
definition["basePath"] = parsed_url.path
else:
definition["host"] = self.request.host
definition["schemes"] = [get_request_scheme(self.request)]
if 'version' not in definition['info']:
definition["info"]["version"] = pkg_resources.get_distribution(
"guillotina"
).version
api_defs = app_settings["api_definition"]
path = get_full_content_path(self.context)
for dotted_iface in api_defs.keys():
iface = resolve_dotted_name(dotted_iface)
if iface.providedBy(self.context):
iface_conf = api_defs[dotted_iface]
self.get_endpoints(iface_conf, path, definition["paths"])
definition["definitions"] = app_settings["json_schema_definitions"]
return definition
AUTH_HTML = '''
<form id='api_selector'>
<div id="auth_container">
<div>
<a class="authorize__btn" href="#">Authorize</a>
</div>
</div>
</form>
'''
@configure.service(
method="GET",
context=Interface,
name="@docs",
permission="guillotina.swagger.View",
ignore=True,
)
async def render_docs_index(context, request):
if app_settings['swagger'].get('index_html'):
index_file = app_settings['swagger']['index_html']
else:
index_file = os.path.join(here, "index.html")
with open(index_file) as fi:
html = fi.read()
swagger_settings = app_settings["swagger"]
url = swagger_settings["base_url"] or request.headers.get(
"X-VirtualHost-Monster"
)
if url is None:
try:
url = getMultiAdapter((context, request), IAbsoluteURL)()
except ComponentLookupError:
url = "{}://{}".format(get_request_scheme(request), request.host)
swagger_settings["initial_swagger_url"] = url
if swagger_settings['authentication_allowed']:
auth = AUTH_HTML
else:
auth = ''
return html.format(
app_settings=app_settings,
request=request,
swagger_settings=json.dumps(swagger_settings),
base_url=url,
static_url="{}/swagger_static/".format(url if url != "/" else ""),
auth=auth,
title=swagger_settings['base_configuration']['info']['title']
)
| [
"pkg_resources.get_distribution",
"guillotina.utils.get_request_scheme",
"copy.deepcopy",
"guillotina.utils.get_full_content_path",
"guillotina.utils.get_security_policy",
"os.path.realpath",
"json.dumps",
"guillotina.configure.service",
"guillotina.utils.get_authenticated_user",
"guillotina.component.getMultiAdapter",
"guillotina.utils.resolve_dotted_name",
"os.path.join",
"urllib.parse.urlparse"
] | [((700, 822), 'guillotina.configure.service', 'configure.service', ([], {'method': '"""GET"""', 'context': 'Interface', 'name': '"""@swagger"""', 'permission': '"""guillotina.swagger.View"""', 'ignore': '(True)'}), "(method='GET', context=Interface, name='@swagger',\n permission='guillotina.swagger.View', ignore=True)\n", (717, 822), False, 'from guillotina import configure\n'), ((5663, 5783), 'guillotina.configure.service', 'configure.service', ([], {'method': '"""GET"""', 'context': 'Interface', 'name': '"""@docs"""', 'permission': '"""guillotina.swagger.View"""', 'ignore': '(True)'}), "(method='GET', context=Interface, name='@docs', permission\n ='guillotina.swagger.View', ignore=True)\n", (5680, 5783), False, 'from guillotina import configure\n'), ((669, 695), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (685, 695), False, 'import os\n'), ((4218, 4242), 'guillotina.utils.get_authenticated_user', 'get_authenticated_user', ([], {}), '()\n', (4240, 4242), False, 'from guillotina.utils import get_authenticated_user\n'), ((4265, 4290), 'guillotina.utils.get_security_policy', 'get_security_policy', (['user'], {}), '(user)\n', (4284, 4290), False, 'from guillotina.utils import get_security_policy\n'), ((4312, 4372), 'copy.deepcopy', 'copy.deepcopy', (["app_settings['swagger']['base_configuration']"], {}), "(app_settings['swagger']['base_configuration'])\n", (4325, 4372), False, 'import copy\n'), ((5052, 5087), 'guillotina.utils.get_full_content_path', 'get_full_content_path', (['self.context'], {}), '(self.context)\n', (5073, 5087), False, 'from guillotina.utils import get_full_content_path\n'), ((5989, 6021), 'os.path.join', 'os.path.join', (['here', '"""index.html"""'], {}), "(here, 'index.html')\n", (6001, 6021), False, 'import os\n'), ((4500, 4513), 'urllib.parse.urlparse', 'urlparse', (['vhm'], {}), '(vhm)\n', (4508, 4513), False, 'from urllib.parse import urlparse\n'), ((5154, 5187), 'guillotina.utils.resolve_dotted_name', 'resolve_dotted_name', (['dotted_iface'], {}), '(dotted_iface)\n', (5173, 5187), False, 'from guillotina.utils import resolve_dotted_name\n'), ((6711, 6739), 'json.dumps', 'json.dumps', (['swagger_settings'], {}), '(swagger_settings)\n', (6721, 6739), False, 'import json\n'), ((4776, 4808), 'guillotina.utils.get_request_scheme', 'get_request_scheme', (['self.request'], {}), '(self.request)\n', (4794, 4808), False, 'from guillotina.utils import get_request_scheme\n'), ((4902, 4946), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""guillotina"""'], {}), "('guillotina')\n", (4932, 4946), False, 'import pkg_resources\n'), ((6280, 6329), 'guillotina.component.getMultiAdapter', 'getMultiAdapter', (['(context, request)', 'IAbsoluteURL'], {}), '((context, request), IAbsoluteURL)\n', (6295, 6329), False, 'from guillotina.component import getMultiAdapter\n'), ((3127, 3160), 'os.path.join', 'os.path.join', (['base_path', 'sub_path'], {}), '(base_path, sub_path)\n', (3139, 3160), False, 'import os\n'), ((6404, 6431), 'guillotina.utils.get_request_scheme', 'get_request_scheme', (['request'], {}), '(request)\n', (6422, 6431), False, 'from guillotina.utils import get_request_scheme\n'), ((2352, 2381), 'os.path.join', 'os.path.join', (['base_path', 'name'], {}), '(base_path, name)\n', (2364, 2381), False, 'import os\n'), ((3800, 3828), 'os.path.join', 'os.path.join', (['path', 'sub_path'], {}), '(path, sub_path)\n', (3812, 3828), False, 'import os\n')] |
import asyncio
import getpass
import json
import os
import websockets
from shape import S, Z, I, O, J, T, L, Shape
from search import *
async def agent_loop(server_address="localhost:8000", agent_name="student"):
async with websockets.connect(f"ws://{server_address}/player") as websocket:
# Receive information about static game properties
await websocket.send(json.dumps({"cmd": "join", "name": agent_name}))
initial_info = json.loads(
await websocket.recv()
) # receive game update, this must be called timely or your game will get out of sync with the server
shapes_keys = shapesKeys(SHAPES,initial_info)
A = -0.510066
B = -0.184483
C = -0.35663
D = 0.760666
variables = [A,B,C,D]
new_piece = True #variavel para saber é uma nova peça e, assim, calcular a search tree
keys = [] # isto pode ser um array de arrays, cada sub-array é o conjunto de chaves para uma das peças especificas no lookahead
first_piece = True #quando está é true, temos de usar o search() e calcular as keys consoante o lookahead
all_keys = []
# grid = {(tup[0],tup[1]) for tup in initial_info['grid']}
# x = max(grid, key = lambda coord : coord[0])[0] + 1
# y = max(grid, key = lambda coord : coord[1])[1]
# print(x,y)
while True:
try:
state = json.loads(
await websocket.recv()
) # receive game update, this must be called timely or your game will get out of sync with the server
if keys:
await websocket.send(
json.dumps({"cmd": "key", "key": keys.pop(0)})
)
# Peça recebida
if 'piece' in state:
piece = state['piece']
next_pieces = state['next_pieces'] # apenas a prineira peça
game_speed = state['game_speed']
else:
piece = None
# A peça foi encaixada, não existindo nenhuma nova, por agora
if piece is None:
new_piece = True
# Nova peça
elif new_piece:
# Caso a peça faça parte do lookahead de uma peça anterior (só verifica se keys existe porque o pop das keys ja acontece acima)
if not first_piece:
# se todas as chaves/keys do lookahead já foram enviadas, então acabou e a próxima peça recebida vai fazer o search
if not all_keys:
first_piece = True
else:
new_piece = False
keys = all_keys.pop(0)
# Encontrar a melhor solução para a nova peça
elif first_piece:
current_shape = findShape(piece)
next_shapes = [findShape(shape) for shape in next_pieces]
shapes = None
if game_speed <= 25:
# lookahead 3
shapes = [current_shape] + next_shapes[:]
elif game_speed > 25 and game_speed < 32:
#lookahead 2
shapes = [current_shape] + next_shapes[:-1]
elif game_speed >= 32:
#lookahead 1
shapes = [current_shape] + next_shapes[:-2]
#shapes = [current_shape] + next_shapes[:-2]
s = Search(state,initial_info,shapes,variables,shapes_keys)
s.search()
all_keys = None
try:
all_keys = [sol.keys for sol in s.best_solution.solutions]
except:
all_keys = [["s"]]*len(shapes)
keys = all_keys.pop(0)
new_piece = False
first_piece = False
except websockets.exceptions.ConnectionClosedOK:
print("Server has cleanly disconnected us")
return
def shapesKeys(shapes, initial_info):
grid = {(tup[0],tup[1]) for tup in initial_info['grid']}
x = max(grid, key = lambda coord : coord[0])[0] + 1
shapekeys = dict() #dicionario q guarda shape+rotation e todas as teclas possiveis no tabuleiro deça peça para essa rotaçao
for fshape in shapes: # para cada shape existente vai descobrir TODAS as combinaçoes de teclas q podem ser premidas
fshape.set_pos((x - fshape.dimensions.x) / 2, 0)
for rot in range(0, len(fshape.plan)): # loop para fazer cada rotaçao da peça atual
_fs = copy(fshape)
_fs.rotate(rot)
min_x = min(_fs.positions, key=lambda coords: coords[0])[0]
max_x = max(_fs.positions, key=lambda coords: coords[0])[0]
name = _fs.name + str(rot)
# percorrer colunas [1,8]
for a in range(1, x-1):
x_differential = a - min_x
# dispensa soluções não válidas
if (x_differential + max_x >= x - 1):
break
keys = ["w"]*rot
keys += ["a"]*abs(x_differential) + ["s"] if x_differential < 0 else ["d"]*abs(x_differential) + ["s"]
shapekeys.setdefault(name, []).append(keys)
return shapekeys
def findShape(piece):
#S (done)
if piece[0][0] == piece[1][0] and piece[1][1] == piece[2][1] and piece[2][0] == piece[3][0]:
fshape = Shape(S)
#Z (done)
elif piece[0][0] == piece[2][0] and piece[1][1] == piece[2][1] and piece[1][0] == piece[3][0]:
fshape = Shape(Z)
#I (done)
elif piece[0][1] == piece[1][1] and piece[1][1] == piece[2][1] and piece[2][1] == piece[3][1]:
fshape = Shape(I)
#O (done)
elif piece[0][0] == piece[2][0] and piece[0][1] == piece[1][1] and piece[1][0] == piece[3][0] and piece[2][1] == piece[3][1]:
fshape = Shape(O)
#J (done)
elif piece[0][1] == piece[1][1] and piece[0][0] == piece[2][0] and piece[2][0] == piece[3][0]:
fshape = Shape(J)
#T (done)
elif piece[0][0] == piece[1][0] and piece[1][1] == piece[2][1] and piece[1][0] == piece[3][0]:
fshape = Shape(T)
#L (done)
elif piece[0][0] == piece[1][0] and piece[1][0] == piece[2][0] and piece[2][1] == piece[3][1]:
fshape = Shape(L)
return fshape
# DO NOT CHANGE THE LINES BELLOW
# You can change the default values using the command line, example:
# $ NAME='arrumador' python3 client.py
loop = asyncio.get_event_loop()
SERVER = os.environ.get("SERVER", "localhost")
PORT = os.environ.get("PORT", "8000")
NAME = os.environ.get("NAME", getpass.getuser())
loop.run_until_complete(agent_loop(f"{SERVER}:{PORT}", NAME)) | [
"shape.Shape",
"getpass.getuser",
"asyncio.get_event_loop",
"websockets.connect",
"json.dumps",
"os.environ.get"
] | [((7052, 7076), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (7074, 7076), False, 'import asyncio\n'), ((7086, 7123), 'os.environ.get', 'os.environ.get', (['"""SERVER"""', '"""localhost"""'], {}), "('SERVER', 'localhost')\n", (7100, 7123), False, 'import os\n'), ((7131, 7161), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '"""8000"""'], {}), "('PORT', '8000')\n", (7145, 7161), False, 'import os\n'), ((7192, 7209), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (7207, 7209), False, 'import getpass\n'), ((230, 281), 'websockets.connect', 'websockets.connect', (['f"""ws://{server_address}/player"""'], {}), "(f'ws://{server_address}/player')\n", (248, 281), False, 'import websockets\n'), ((5991, 5999), 'shape.Shape', 'Shape', (['S'], {}), '(S)\n', (5996, 5999), False, 'from shape import S, Z, I, O, J, T, L, Shape\n'), ((6131, 6139), 'shape.Shape', 'Shape', (['Z'], {}), '(Z)\n', (6136, 6139), False, 'from shape import S, Z, I, O, J, T, L, Shape\n'), ((385, 432), 'json.dumps', 'json.dumps', (["{'cmd': 'join', 'name': agent_name}"], {}), "({'cmd': 'join', 'name': agent_name})\n", (395, 432), False, 'import json\n'), ((6271, 6279), 'shape.Shape', 'Shape', (['I'], {}), '(I)\n', (6276, 6279), False, 'from shape import S, Z, I, O, J, T, L, Shape\n'), ((6442, 6450), 'shape.Shape', 'Shape', (['O'], {}), '(O)\n', (6447, 6450), False, 'from shape import S, Z, I, O, J, T, L, Shape\n'), ((6582, 6590), 'shape.Shape', 'Shape', (['J'], {}), '(J)\n', (6587, 6590), False, 'from shape import S, Z, I, O, J, T, L, Shape\n'), ((6722, 6730), 'shape.Shape', 'Shape', (['T'], {}), '(T)\n', (6727, 6730), False, 'from shape import S, Z, I, O, J, T, L, Shape\n'), ((6862, 6870), 'shape.Shape', 'Shape', (['L'], {}), '(L)\n', (6867, 6870), False, 'from shape import S, Z, I, O, J, T, L, Shape\n')] |
import os.path
from blaze.test_utils import temp_dir
import blaze.toplevel as toplevel
from blaze.params import params
from blaze import dshape
from blaze.sources.chunked import CArraySource, CTableSource
from blaze.eclass import eclass
def test_open_carray():
with temp_dir() as temp:
# Create an array on disk
array_filename = os.path.join(temp, 'carray')
p = params(storage=array_filename)
ds = dshape('1,int32')
a = CArraySource([2], dshape=ds, params=p)
del a
# Open array with open function
uri = 'carray://' + array_filename
c = toplevel.open(uri)
assert c.datashape == ds
# Test delayed mode
c = toplevel.open(uri, eclass=eclass.delayed)
assert c.datashape == ds
def test_open_ctable():
with temp_dir() as temp:
# Create an table on disk
table_filename = os.path.join(temp, 'ctable')
p = params(storage=table_filename)
ds = dshape('1,{ x: int32; y: int32 }')
t = CTableSource(data=[(1, 1), (2, 2)], dshape=ds, params=p)
del t
# Open table with open function
uri = 'ctable://' + table_filename
c = toplevel.open(uri)
assert c.datashape == ds
# Test delayed mode
c = toplevel.open(uri, eclass=eclass.delayed)
assert c.datashape == ds
| [
"blaze.test_utils.temp_dir",
"blaze.sources.chunked.CTableSource",
"blaze.sources.chunked.CArraySource",
"blaze.dshape",
"blaze.params.params",
"blaze.toplevel.open"
] | [((273, 283), 'blaze.test_utils.temp_dir', 'temp_dir', ([], {}), '()\n', (281, 283), False, 'from blaze.test_utils import temp_dir\n'), ((393, 423), 'blaze.params.params', 'params', ([], {'storage': 'array_filename'}), '(storage=array_filename)\n', (399, 423), False, 'from blaze.params import params\n'), ((437, 454), 'blaze.dshape', 'dshape', (['"""1,int32"""'], {}), "('1,int32')\n", (443, 454), False, 'from blaze import dshape\n'), ((467, 505), 'blaze.sources.chunked.CArraySource', 'CArraySource', (['[2]'], {'dshape': 'ds', 'params': 'p'}), '([2], dshape=ds, params=p)\n', (479, 505), False, 'from blaze.sources.chunked import CArraySource, CTableSource\n'), ((616, 634), 'blaze.toplevel.open', 'toplevel.open', (['uri'], {}), '(uri)\n', (629, 634), True, 'import blaze.toplevel as toplevel\n'), ((709, 750), 'blaze.toplevel.open', 'toplevel.open', (['uri'], {'eclass': 'eclass.delayed'}), '(uri, eclass=eclass.delayed)\n', (722, 750), True, 'import blaze.toplevel as toplevel\n'), ((818, 828), 'blaze.test_utils.temp_dir', 'temp_dir', ([], {}), '()\n', (826, 828), False, 'from blaze.test_utils import temp_dir\n'), ((938, 968), 'blaze.params.params', 'params', ([], {'storage': 'table_filename'}), '(storage=table_filename)\n', (944, 968), False, 'from blaze.params import params\n'), ((982, 1016), 'blaze.dshape', 'dshape', (['"""1,{ x: int32; y: int32 }"""'], {}), "('1,{ x: int32; y: int32 }')\n", (988, 1016), False, 'from blaze import dshape\n'), ((1029, 1085), 'blaze.sources.chunked.CTableSource', 'CTableSource', ([], {'data': '[(1, 1), (2, 2)]', 'dshape': 'ds', 'params': 'p'}), '(data=[(1, 1), (2, 2)], dshape=ds, params=p)\n', (1041, 1085), False, 'from blaze.sources.chunked import CArraySource, CTableSource\n'), ((1196, 1214), 'blaze.toplevel.open', 'toplevel.open', (['uri'], {}), '(uri)\n', (1209, 1214), True, 'import blaze.toplevel as toplevel\n'), ((1289, 1330), 'blaze.toplevel.open', 'toplevel.open', (['uri'], {'eclass': 'eclass.delayed'}), '(uri, eclass=eclass.delayed)\n', (1302, 1330), True, 'import blaze.toplevel as toplevel\n')] |
"""Module providing miscellaneous utilities used by cmkinitramfs"""
from __future__ import annotations
import functools
import hashlib
import os.path
# Function needed for python < 3.9
def removeprefix(string: str, prefix: str) -> str:
"""Remove a prefix from a string
Add support for :meth:`str.removeprefix` for Python < 3.9.
:param string: String to remove prefix from
:param prefix: Prefix to remove
"""
# return string.removeprefix(prefix)
if string.startswith(prefix):
return string[len(prefix):]
return string
def normpath(path: str) -> str:
"""Normalize path (actually eliminates double slashes)
:param path: Path to normalize
"""
return os.path.normpath(path).replace('//', '/')
@functools.lru_cache()
def hash_file(filepath: str, chunk_size: int = 65536) -> bytes:
"""Calculate the SHA512 of a file
:param filepath: Path of the file to hash
:param chunk_size: Number of bytes per chunk of file to hash
:return: File hash in a :class:`bytes` object
"""
sha512 = hashlib.sha512()
with open(filepath, 'rb') as src:
for chunk in iter(lambda: src.read(chunk_size), b''):
sha512.update(chunk)
return sha512.digest()
| [
"hashlib.sha512",
"functools.lru_cache"
] | [((756, 777), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (775, 777), False, 'import functools\n'), ((1063, 1079), 'hashlib.sha512', 'hashlib.sha512', ([], {}), '()\n', (1077, 1079), False, 'import hashlib\n')] |
# Generated by Django 3.2 on 2022-04-08 15:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travellifestyleblog22', '0008_alter_category_image'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='image',
),
]
| [
"django.db.migrations.RemoveField"
] | [((241, 300), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""category"""', 'name': '"""image"""'}), "(model_name='category', name='image')\n", (263, 300), False, 'from django.db import migrations\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']= '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
physical_devices = tf.config.list_physical_devices('GPU')
print(physical_devices)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28*28).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28*28).astype("float32") / 255.0
#Sequential API
model = keras.Sequential(
[
layers.Dense(512,activation='relu'),
layers.Dense(216, activation = 'relu'),
layers.Dense(10)
]
)
model.compile(
loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
model.fit(x_train,y_train,batch_size=32,epochs=5,verbose=2)
model.evaluate(x_test, y_test, batch_size= 32, verbose=2)
| [
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.config.list_physical_devices",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.optimizers.Adam"
] | [((204, 242), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (235, 242), True, 'import tensorflow as tf\n'), ((308, 325), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (323, 325), False, 'from tensorflow.keras.datasets import mnist\n'), ((501, 537), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (513, 537), False, 'from tensorflow.keras import layers\n'), ((540, 576), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(216)'], {'activation': '"""relu"""'}), "(216, activation='relu')\n", (552, 576), False, 'from tensorflow.keras import layers\n'), ((582, 598), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (594, 598), False, 'from tensorflow.keras import layers\n'), ((627, 687), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (669, 687), False, 'from tensorflow import keras\n'), ((700, 731), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (721, 731), False, 'from tensorflow import keras\n')] |
from django.conf.urls import url
import payments.views
urlpatterns = [
url(r"^webhook/$", payments.views.webhook,
name="webhook"),
]
| [
"django.conf.urls.url"
] | [((77, 134), 'django.conf.urls.url', 'url', (['"""^webhook/$"""', 'payments.views.webhook'], {'name': '"""webhook"""'}), "('^webhook/$', payments.views.webhook, name='webhook')\n", (80, 134), False, 'from django.conf.urls import url\n')] |
###############################################################
# SPDX-License-Identifier: BSD-2-Clause-Patent
# SPDX-FileCopyrightText: 2020 the prplMesh contributors (see AUTHORS.md)
# This code is subject to the terms of the BSD+Patent license.
# See LICENSE file for more details.
###############################################################
from .prplmesh_base_test import PrplMeshBaseTest
from boardfarm.exceptions import SkipTest
from capi import tlv
from opts import debug
import time
class BeaconReportQueryAndResponse(PrplMeshBaseTest):
''' This test verifies that a MAUT with an associated STA responds
to a Beacon Metrics Query by sending a Beacon Report request to its associated STA,
receiving a response from the STA, and sending the contents of that response
in a Beacon Metrics Response message to the Controller '''
def runTest(self):
# Locate test participants
try:
sta = self.dev.wifi
agent = self.dev.DUT.agent_entity
controller = self.dev.lan.controller_entity
except AttributeError as ae:
raise SkipTest(ae)
sniffer = self.dev.DUT.wired_sniffer
sniffer.start(self.__class__.__name__ + "-" + self.dev.DUT.name)
# Step 3. MAUT sends Association Response frame to STA
sta.wifi_connect_check(agent.radios[0].vaps[0])
time.sleep(1)
debug("Send Associated STA Link Metrics Query message")
mid = controller.ucc_socket.dev_send_1905(
agent.mac, self.ieee1905['eMessageType']['ASSOCIATED_STA_LINK_METRICS_QUERY_MESSAGE'],
tlv(self.ieee1905['eTlvTypeMap']['TLV_STAMAC_ADDRESS_TYPE'], sta.mac))
time.sleep(5)
debug("STA sends a valid Association Request frame to MAUT")
self.check_log(agent,
"Send AssociatedStaLinkMetrics to controller, mid = {}".format(mid),
timeout=20)
self.check_cmdu_type_single("Associated STA Link Metrics Response", 0x800E,
agent.mac, controller.mac, mid)
# Step 4. Send Beacon Metrics Query to agent.
agent.radios[0].send_bwl_event(
"DATA RRM-BEACON-REP-RECEIVED {} channel=1 dialog_token=0 measurement_rep_mode=0 \
op_class=0 duration=50 rcpi=-80 rsni=10 bssid=aa: bb:cc:11:00:10".format(sta.mac))
'''- Operating Class field equal to 115
- Channel Number field equal to 255
- BSSID field equal to wildcard (0xFFFFFFFFFFFF)
- Reporting Detail equal to 2
- SSID length field equal to 0 (SSID field missing)
- Number of AP Channel Reports equal to 1
- Length of AP Channel Report equal to 0x03
- Operating Class in AP Channel Report equal to 115
- Channel List in AP Channel Report equal to 36 and 48 '''
beacon_query_tlv_val = "{sta_mac} ".format(sta_mac=sta.mac)
beacon_query_tlv_val += "{0x73 0xFF 0xFFFFFFFFFFFF 0x02 0x00 0x01 0x03 0x73 0x24 0x30}"
debug("Send Beacon Metrics Query from controller to agent.")
mid = controller.ucc_socket.dev_send_1905(
agent.mac, self.ieee1905['eMessageType']['BEACON_METRICS_QUERY_MESSAGE'],
tlv(self.ieee1905['eTlvTypeMap']['TLV_BEACON_METRICS_QUERY'],
beacon_query_tlv_val))
# Step 5. Verify that MAUT sends a 1905 ACK to Controller.
time.sleep(1)
self.check_cmdu_type_single(
"ACK", self.ieee1905['eMessageType']['ACK_MESSAGE'], agent.mac, controller.mac, mid)
debug("Confirming ACK message was received.")
# Step 6. Verify that MAUT sends a correct Beacon request to STA.
time.sleep(1)
self.check_log(agent.radios[0], r"BEACON_METRICS_QUERY")
debug("Confirming that MAUT sends a Beacon request to STA.")
# Step 7. STA responds with Beacon report
time.sleep(1)
self.check_log(controller, r"got beacon response from STA. mid:", timeout=10)
# Step 8. MAUT sends Beacon Metrics Response to Controller
beacon_resp = self.check_cmdu_type_single(
"Agent send Beacon Response to controller.",
self.ieee1905['eMessageType']['BEACON_METRICS_RESPONSE_MESSAGE'],
agent.mac, controller.mac)
debug("Confirming MAUT sends Beacon Metrics Response to Controller.")
beacon_resp_tlv = self.check_cmdu_has_tlv_single(beacon_resp, 154)
''' Don't check Beacon Metrics measurement report, as it's always empty
https://jira.prplfoundation.org/browse/PPM-52 '''
assert beacon_resp_tlv.beacon_metrics_mac_addr == sta.mac, \
"Wrong beacon metrics mac addr in Beacon Respond"
sta.wifi_disconnect(agent.radios[0].vaps[0])
| [
"boardfarm.exceptions.SkipTest",
"opts.debug",
"capi.tlv",
"time.sleep"
] | [((1378, 1391), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1388, 1391), False, 'import time\n'), ((1400, 1455), 'opts.debug', 'debug', (['"""Send Associated STA Link Metrics Query message"""'], {}), "('Send Associated STA Link Metrics Query message')\n", (1405, 1455), False, 'from opts import debug\n'), ((1697, 1710), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1707, 1710), False, 'import time\n'), ((1719, 1779), 'opts.debug', 'debug', (['"""STA sends a valid Association Request frame to MAUT"""'], {}), "('STA sends a valid Association Request frame to MAUT')\n", (1724, 1779), False, 'from opts import debug\n'), ((3024, 3084), 'opts.debug', 'debug', (['"""Send Beacon Metrics Query from controller to agent."""'], {}), "('Send Beacon Metrics Query from controller to agent.')\n", (3029, 3084), False, 'from opts import debug\n'), ((3411, 3424), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3421, 3424), False, 'import time\n'), ((3567, 3612), 'opts.debug', 'debug', (['"""Confirming ACK message was received."""'], {}), "('Confirming ACK message was received.')\n", (3572, 3612), False, 'from opts import debug\n'), ((3696, 3709), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3706, 3709), False, 'import time\n'), ((3783, 3843), 'opts.debug', 'debug', (['"""Confirming that MAUT sends a Beacon request to STA."""'], {}), "('Confirming that MAUT sends a Beacon request to STA.')\n", (3788, 3843), False, 'from opts import debug\n'), ((3903, 3916), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3913, 3916), False, 'import time\n'), ((4304, 4373), 'opts.debug', 'debug', (['"""Confirming MAUT sends Beacon Metrics Response to Controller."""'], {}), "('Confirming MAUT sends Beacon Metrics Response to Controller.')\n", (4309, 4373), False, 'from opts import debug\n'), ((1618, 1687), 'capi.tlv', 'tlv', (["self.ieee1905['eTlvTypeMap']['TLV_STAMAC_ADDRESS_TYPE']", 'sta.mac'], {}), "(self.ieee1905['eTlvTypeMap']['TLV_STAMAC_ADDRESS_TYPE'], sta.mac)\n", (1621, 1687), False, 'from capi import tlv\n'), ((3234, 3321), 'capi.tlv', 'tlv', (["self.ieee1905['eTlvTypeMap']['TLV_BEACON_METRICS_QUERY']", 'beacon_query_tlv_val'], {}), "(self.ieee1905['eTlvTypeMap']['TLV_BEACON_METRICS_QUERY'],\n beacon_query_tlv_val)\n", (3237, 3321), False, 'from capi import tlv\n'), ((1118, 1130), 'boardfarm.exceptions.SkipTest', 'SkipTest', (['ae'], {}), '(ae)\n', (1126, 1130), False, 'from boardfarm.exceptions import SkipTest\n')] |
import unittest
import requests
from assertpy import assert_that
from requests.exceptions import Timeout
from unittest.mock import Mock, patch
from src.Api import Api
from src.todos import todos
class TestApiMonkeyPatch(unittest.TestCase):
@patch('src.Api.Api', autospec=True)
def test_method_api_delete_raises_timeout(self, mock_class):
mock_id = Mock()
mock_id.return_value = 1
mock_class.api_delete.side_effect = Timeout
with self.assertRaises(Timeout):
mock_class.api_delete(mock_id)
def test_method_api_delete_assert_that_called_once(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
mock_api.api_delete.assert_called_once()
def test_method_api_delete_assert_that_called(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id)
mock_api.api_delete(mock_id2)
mock_api.api_delete.assert_called()
def test_method_api_delete_assert_that_not_called(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete.assert_not_called()
def test_method_api_delete_assert_that_called_with_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
mock_api.api_delete.assert_called_with(mock_id)
def test_method_api_delete_assert_that_called_once_with_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
mock_api.api_delete.assert_called_once_with(mock_id)
def test_method_api_delete_assert_that_response_has_status_code_200(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response).has_status_code(200)
def test_method_api_delete_assert_that_response_status_code_is_not_200(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"status_code": 408}
response = mock_api.api_delete(todo_id)
assert_that(response["status_code"]).is_not_equal_to(200)
def test_method_api_delete_assert_that_response_is_instance_of_dict(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response).is_instance_of(dict)
def test_method_api_delete_assert_that_response_has_key_delete_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response).has_delete_id(1)
def test_method_api_delete_assert_that_response_returns_deleted_data(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response["deleted_data"]).is_equal_to(todos[0])
def test_method_api_delete_assert_that_response_deleted_data_contain_all_keys_userId_id_title_completed(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response["deleted_data"]).contains_key("userId", "id", "title", "completed")
def test_method_api_delete_assert_that_not_called_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_not_called()
def test_method_api_delete_assert_that_called_once_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id)
mock_api.api_delete(mock_id2)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_called_once()
def test_method_api_delete_assert_that_called_with_id_1_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id2)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_called_with(mock_id)
def test_method_api_delete_assert_that_called_once_with_id_1_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id)
mock_api.api_delete(mock_id2)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_called_once_with(mock_id)
def test_method_api_delete_no_parameter_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
with self.assertRaises(TypeError):
mock_api.api_delete()
def test_method_api_delete_assert_that_response_returns_ValueError_when_called_with_id_0_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 0
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = ValueError
assert_that(mock_api.api_delete).raises(ValueError).when_called_with(todo_id)
def test_method_api_delete_assert_that_response_returns_ValueError_when_called_with_id_300_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 300
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = ValueError
assert_that(mock_api.api_delete).raises(ValueError).when_called_with(todo_id)
def test_method_api_delete_assert_that_response_returns_TypeError_when_called_with_id_not_int_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = "1"
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = TypeError
assert_that(mock_api.api_delete).raises(TypeError).when_called_with(todo_id)
def test_method_api_delete_assert_that_response_returns_AttributeError_when_called_with_None_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = None
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = AttributeError
assert_that(mock_api.api_delete).raises(AttributeError).when_called_with(todo_id)
if __name__ == '__main__':
unittest.main() | [
"unittest.mock.patch",
"unittest.main",
"unittest.mock.Mock",
"assertpy.assert_that"
] | [((250, 285), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (255, 285), False, 'from unittest.mock import Mock, patch\n'), ((8712, 8727), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8725, 8727), False, 'import unittest\n'), ((369, 375), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (373, 375), False, 'from unittest.mock import Mock, patch\n'), ((621, 656), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (626, 656), False, 'from unittest.mock import Mock, patch\n'), ((692, 698), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (696, 698), False, 'from unittest.mock import Mock, patch\n'), ((901, 936), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (906, 936), False, 'from unittest.mock import Mock, patch\n'), ((972, 978), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (976, 978), False, 'from unittest.mock import Mock, patch\n'), ((1039, 1045), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1043, 1045), False, 'from unittest.mock import Mock, patch\n'), ((1290, 1325), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (1295, 1325), False, 'from unittest.mock import Mock, patch\n'), ((1361, 1367), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1365, 1367), False, 'from unittest.mock import Mock, patch\n'), ((1538, 1573), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (1543, 1573), False, 'from unittest.mock import Mock, patch\n'), ((1609, 1615), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1613, 1615), False, 'from unittest.mock import Mock, patch\n'), ((1840, 1875), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (1845, 1875), False, 'from unittest.mock import Mock, patch\n'), ((1911, 1917), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1915, 1917), False, 'from unittest.mock import Mock, patch\n'), ((2158, 2193), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (2163, 2193), False, 'from unittest.mock import Mock, patch\n'), ((2665, 2700), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (2670, 2700), False, 'from unittest.mock import Mock, patch\n'), ((3030, 3065), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (3035, 3065), False, 'from unittest.mock import Mock, patch\n'), ((3534, 3569), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (3539, 3569), False, 'from unittest.mock import Mock, patch\n'), ((4031, 4066), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (4036, 4066), False, 'from unittest.mock import Mock, patch\n'), ((4588, 4623), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (4593, 4623), False, 'from unittest.mock import Mock, patch\n'), ((5126, 5161), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (5131, 5161), False, 'from unittest.mock import Mock, patch\n'), ((5197, 5203), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (5201, 5203), False, 'from unittest.mock import Mock, patch\n'), ((5476, 5511), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (5481, 5511), False, 'from unittest.mock import Mock, patch\n'), ((5547, 5553), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (5551, 5553), False, 'from unittest.mock import Mock, patch\n'), ((5614, 5620), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (5618, 5620), False, 'from unittest.mock import Mock, patch\n'), ((5942, 5977), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (5947, 5977), False, 'from unittest.mock import Mock, patch\n'), ((6013, 6019), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (6017, 6019), False, 'from unittest.mock import Mock, patch\n'), ((6080, 6086), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (6084, 6086), False, 'from unittest.mock import Mock, patch\n'), ((6379, 6414), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (6384, 6414), False, 'from unittest.mock import Mock, patch\n'), ((6450, 6456), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (6454, 6456), False, 'from unittest.mock import Mock, patch\n'), ((6517, 6523), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (6521, 6523), False, 'from unittest.mock import Mock, patch\n'), ((6841, 6876), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (6846, 6876), False, 'from unittest.mock import Mock, patch\n'), ((7103, 7138), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (7108, 7138), False, 'from unittest.mock import Mock, patch\n'), ((7522, 7557), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (7527, 7557), False, 'from unittest.mock import Mock, patch\n'), ((7950, 7985), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (7955, 7985), False, 'from unittest.mock import Mock, patch\n'), ((8375, 8410), 'unittest.mock.patch', 'patch', (['"""src.Api.Api"""'], {'autospec': '(True)'}), "('src.Api.Api', autospec=True)\n", (8380, 8410), False, 'from unittest.mock import Mock, patch\n'), ((2522, 2543), 'assertpy.assert_that', 'assert_that', (['response'], {}), '(response)\n', (2533, 2543), False, 'from assertpy import assert_that\n'), ((2875, 2911), 'assertpy.assert_that', 'assert_that', (["response['status_code']"], {}), "(response['status_code'])\n", (2886, 2911), False, 'from assertpy import assert_that\n'), ((3394, 3415), 'assertpy.assert_that', 'assert_that', (['response'], {}), '(response)\n', (3405, 3415), False, 'from assertpy import assert_that\n'), ((3894, 3915), 'assertpy.assert_that', 'assert_that', (['response'], {}), '(response)\n', (3905, 3915), False, 'from assertpy import assert_that\n'), ((4395, 4432), 'assertpy.assert_that', 'assert_that', (["response['deleted_data']"], {}), "(response['deleted_data'])\n", (4406, 4432), False, 'from assertpy import assert_that\n'), ((4952, 4989), 'assertpy.assert_that', 'assert_that', (["response['deleted_data']"], {}), "(response['deleted_data'])\n", (4963, 4989), False, 'from assertpy import assert_that\n'), ((7318, 7350), 'assertpy.assert_that', 'assert_that', (['mock_api.api_delete'], {}), '(mock_api.api_delete)\n', (7329, 7350), False, 'from assertpy import assert_that\n'), ((7739, 7771), 'assertpy.assert_that', 'assert_that', (['mock_api.api_delete'], {}), '(mock_api.api_delete)\n', (7750, 7771), False, 'from assertpy import assert_that\n'), ((8166, 8198), 'assertpy.assert_that', 'assert_that', (['mock_api.api_delete'], {}), '(mock_api.api_delete)\n', (8177, 8198), False, 'from assertpy import assert_that\n'), ((8597, 8629), 'assertpy.assert_that', 'assert_that', (['mock_api.api_delete'], {}), '(mock_api.api_delete)\n', (8608, 8629), False, 'from assertpy import assert_that\n')] |
from functools import cmp_to_key
from Transaction import Transaction
class pEFIM():
highUtilityItemsets = []
candidateCount = 0
utilityBinArrayLU = {}
utilityBinArraySU = {}
# a temporary buffer
temp = []
for i in range(5000):
temp.append(0)
def __init__(self, mapItemsToneighbors, minUtility, itemsToExplore, itemsToKeep, transactions, newNamesToOldNames, oldNamesToNewNames):
self.minUtil = minUtility
self.Neighbours = mapItemsToneighbors
self.itemsToExplore = itemsToExplore
self.itemsToKeep = itemsToKeep
self.transactions = transactions
self.newNamesToOldNames = newNamesToOldNames
self.oldNamesToNewNames = oldNamesToNewNames
def runAlgo(self):
# now we will sort the transactions according to proposed total order on transaction
self.sortDatabase(self.transactions)
self.backtrackingEFIM(self.transactions, self.itemsToKeep, self.itemsToExplore, 0)
return (1, self.highUtilityItemsets)
def backtrackingEFIM(self, transactionsOfP, itemsToKeep, itemsToExplore, prefixLength):
self.candidateCount += len(itemsToExplore)
for idx, e in enumerate(itemsToExplore):
# caluclate the transactions containing p U {e}
# at the same time project transactions to keep what appears after e
transactionsPe = []
# variable to caluclate the utility of Pe
utilityPe = 0
# merging transactions
previousTransaction = transactionsOfP[0]
consecutiveMergeCount = 0
for transaction in transactionsOfP:
items = transaction.getItems()
if e in items:
# if e was found in the transaction
positionE = items.index(e)
if transaction.getLastPosition() == positionE:
utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility
else:
projectedTransaction = transaction.projectTransaction(positionE)
utilityPe += projectedTransaction.prefixUtility
if previousTransaction == transactionsOfP[0]:
# if it is the first transactoin
previousTransaction = projectedTransaction
elif self.is_equal(projectedTransaction, previousTransaction):
if consecutiveMergeCount == 0:
# if the first consecutive merge
items = previousTransaction.items[previousTransaction.offset:]
utilities = previousTransaction.utilities[previousTransaction.offset:]
itemsCount = len(items)
positionPrevious = 0
positionProjection = projectedTransaction.offset
while positionPrevious < itemsCount:
utilities[positionPrevious] += projectedTransaction.utilities[positionProjection]
positionPrevious += 1
positionProjection += 1
previousTransaction.prefixUtility += projectedTransaction.prefixUtility
sumUtilities = previousTransaction.prefixUtility
previousTransaction = Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility)
previousTransaction.prefixUtility = sumUtilities
else:
positionPrevious = 0
positionProjected = projectedTransaction.offset
itemsCount = len(previousTransaction.items)
while positionPrevious < itemsCount:
previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[
positionProjected]
positionPrevious += 1
positionProjected += 1
previousTransaction.transactionUtility += projectedTransaction.transactionUtility
previousTransaction.prefixUtility += projectedTransaction.prefixUtility
consecutiveMergeCount += 1
else:
transactionsPe.append(previousTransaction)
previousTransaction = projectedTransaction
consecutiveMergeCount = 0
transaction.offset = positionE
if previousTransaction != transactionsOfP[0]:
transactionsPe.append(previousTransaction)
self.temp[prefixLength] = self.newNamesToOldNames[e]
if utilityPe >= self.minUtil:
self.highUtilityItemsets.append((utilityPe , self.temp[:prefixLength + 1]))
# caluclate the set which is intersection of all the neighbours of items present in P U {e}
neighbourhoodList = self.caluclateNeighbourIntersection(prefixLength)
# caluclate the local utility and subtree utility
self.useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep, neighbourhoodList)
newItemsToKeep = []
newItemsToExplore = []
for l in range(idx + 1, len(itemsToKeep)):
itemk = itemsToKeep[l]
if self.utilityBinArraySU[itemk] >= self.minUtil:
if itemk in neighbourhoodList:
newItemsToExplore.append(itemk)
newItemsToKeep.append(itemk)
elif self.utilityBinArrayLU[itemk] >= self.minUtil:
if itemk in neighbourhoodList:
newItemsToKeep.append(itemk)
self.backtrackingEFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1)
def intersection(self, lst1, lst2):
# Use of hybrid method
temp = set(lst2)
lst3 = [value for value in lst1 if value in temp]
return lst3
def caluclateNeighbourIntersection(self, prefixLength):
intersectionList = []
if self.temp[0] in self.Neighbours:
intersectionList = self.Neighbours[self.temp[0]]
else:
return intersectionList
for i in range(1, prefixLength+1):
if self.temp[i] in self.Neighbours:
intersectionList = self.intersection(self.Neighbours[self.temp[i]], intersectionList)
finalIntersectionList = []
for item in intersectionList:
if item in self.oldNamesToNewNames:
finalIntersectionList.append(self.oldNamesToNewNames[item])
return finalIntersectionList
def useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe, j, itemsToKeep, neighbourhoodList):
for i in range(j + 1, len(itemsToKeep)):
item = itemsToKeep[i]
self.utilityBinArrayLU[item] = 0
self.utilityBinArraySU[item] = 0
for transaction in transactionsPe:
length = len(transaction.getItems())
i = length - 1
while i >= transaction.offset:
item = transaction.getItems()[i]
if item in itemsToKeep:
remainingUtility = 0
if self.newNamesToOldNames[item] in self.Neighbours:
item_neighbours = self.Neighbours[self.newNamesToOldNames[item]]
for k in range(i, length):
transaction_item = transaction.getItems()[k]
if self.newNamesToOldNames[transaction_item] in item_neighbours and transaction_item in neighbourhoodList:
remainingUtility += transaction.getUtilities()[k]
remainingUtility += transaction.getUtilities()[i]
self.utilityBinArraySU[item] += remainingUtility + transaction.prefixUtility
self.utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility
i -= 1
def is_equal(self, transaction1, transaction2):
length1 = len(transaction1.items) - transaction1.offset
length2 = len(transaction2.items) - transaction2.offset
if length1 != length2:
return False
position1 = transaction1.offset
position2 = transaction2.offset
while position1 < len(transaction1.items):
if transaction1.items[position1] != transaction2.items[position2]:
return False
position1 += 1
position2 += 1
return True
def sortDatabase(self, transactions):
cmp_items = cmp_to_key(self.sort_transaction)
transactions.sort(key=cmp_items)
def sort_transaction(self, trans1, trans2):
trans1_items = trans1.getItems()
trans2_items = trans2.getItems()
pos1 = len(trans1_items) - 1
pos2 = len(trans2_items) - 1
if len(trans1_items) < len(trans2_items):
while pos1 >= 0:
sub = trans2_items[pos2] - trans1_items[pos1]
if sub != 0:
return sub
pos1 -= 1
pos2 -= 1
return -1
elif len(trans1_items) > len(trans2_items):
while pos2 >= 0:
sub = trans2_items[pos2] - trans1_items[pos1]
if sub != 0:
return sub
pos1 -= 1
pos2 -= 1
return 1
else:
while pos2 >= 0:
sub = trans2_items[pos2] - trans1_items[pos1]
if sub != 0:
return sub
pos1 -= 1
pos2 -= 1
return 0
| [
"Transaction.Transaction",
"functools.cmp_to_key"
] | [((9074, 9107), 'functools.cmp_to_key', 'cmp_to_key', (['self.sort_transaction'], {}), '(self.sort_transaction)\n', (9084, 9107), False, 'from functools import cmp_to_key\n'), ((3546, 3661), 'Transaction.Transaction', 'Transaction', (['items', 'utilities', '(previousTransaction.transactionUtility + projectedTransaction.\n transactionUtility)'], {}), '(items, utilities, previousTransaction.transactionUtility +\n projectedTransaction.transactionUtility)\n', (3557, 3661), False, 'from Transaction import Transaction\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Liblouis test harness
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
#
# Copyright (c) 2012, liblouis team, <NAME>.
"""Liblouis test harness:
Please see the liblouis documentation for information of how to add a new harness or more tests for your braille table.
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
"""
import json
import os
import sys
import traceback
from glob import iglob
from louis import translate, backTranslateString, hyphenate
from louis import noContractions, compbrlAtCursor, dotsIO, comp8Dots, pass1Only, compbrlLeftCursor, otherTrans, ucBrl
try:
from nose.plugins import Plugin
from nose import run
except ImportError:
sys.stderr.write("The harness tests require nose. Skipping...\n")
sys.exit(0)
### Nosetest plugin for controlling the output format. ###
class Reporter(Plugin):
name = 'reporter'
def __init__(self):
super(Reporter, self).__init__()
self.res = []
self.stream = None
def setOutputStream(self, stream):
# grab for own use
self.stream = stream
# return dummy stream
class dummy:
def write(self, *arg):
pass
def writeln(self, *arg):
pass
def flush(self):
pass
d = dummy()
return d
def addError(self, test, err):
exctype, value, tb = err
errMsg = ''.join(traceback.format_exception(exctype, value, tb))
self.res.append("--- Error: ---\n%s\n--- end ---\n" % errMsg)
def addFailure(self, test, err):
exctype, value, tb = err
#errMsg = ''.join(traceback.format_exception(exctype, value, None))
self.res.append("%s\n" % value)
def finalize(self, result):
failures=len(result.failures)
errors=len(result.errors)
total=result.testsRun
percent_string = " ({percent}% success)".format(percent=round((total-failures-errors+0.0)/total*100,2)) if total > 0 else ""
self.res.append("Ran {total} tests{percent_string}, with {failures} failures and {errors} errors.\n".format(total=total, percent_string=percent_string, failures=failures, errors=errors))
self.stream.write("\n".join(self.res))
### End of nosetest plugin for controlling the output format. ###
PY2 = sys.version_info[0] == 2
def u(a):
if PY2:
return a.encode("utf-8")
return a
modes = {
'noContractions': noContractions,
'compbrlAtCursor': compbrlAtCursor,
'dotsIO': dotsIO,
'comp8Dots': comp8Dots,
'pass1Only': pass1Only,
'compbrlLeftCursor': compbrlLeftCursor,
'otherTrans': otherTrans,
'ucBrl': ucBrl
}
def showCurPos(length, pos1, marker1="^", pos2=None, marker2="*"):
"""A helper function to make a string to show the position of the given cursor."""
display = [" "] *length
display[pos1] = marker1
if pos2:
display[pos2] = marker2
return "".join(display)
class BrailleTest():
def __init__(self, harnessName, tables, input, output, outputUniBrl=False, mode=0, cursorPos=None, brlCursorPos=None, testmode='translate', comment=[]):
self.harnessName = harnessName
self.tables = tables
if outputUniBrl:
self.tables.insert(0, 'unicode.dis')
self.input = input
self.expectedOutput = output
self.mode = mode if not mode else modes[mode]
self.cursorPos = cursorPos
self.expectedBrlCursorPos = brlCursorPos
self.comment = comment
self.testmode = testmode
def __str__(self):
return "%s" % self.harnessName
def hyphenateword(self, tables, word, mode):
# FIXME: liblouis currently crashes if we dont add space at end of the word, probably due to a counter running past the end of the string.
# medium/longterm this hack should be removed, and the root of the problem found/resolved.
hyphen_mask=hyphenate(tables, word+' ', mode)
# FIXME: why on python 2 do we need to remove the last item, and on python3 it is needed?
# i.e. in python2 word and hyphen_mask not of the same length.
if PY2:
return "".join( map(lambda a,b: "-"+a if b=='1' else a, word, hyphen_mask)[:-1] )
else:
return "".join( list(map(lambda a,b: "-"+a if b=='1' else a, word, hyphen_mask)) )
def check_translate(self):
if self.cursorPos is not None:
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)
else:
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode)
template = "%-25s '%s'"
tBrlCurPosStr = showCurPos(len(tBrl), tBrlCurPos)
report = [
"--- Braille Difference Failure: %s ---" % self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("expected brl:", self.expectedOutput),
template % ("actual brl:", tBrl),
"--- end ---",
]
assert tBrl == self.expectedOutput, u("\n".join(report))
def check_backtranslate(self):
backtranslate_output = backTranslateString(self.tables, self.input, None, mode=self.mode)
template = "%-25s '%s'"
report = [
"--- Backtranslate failure: %s ---" % self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("expected text:", self.expectedOutput),
template % ("actual backtranslated text:", backtranslate_output),
"--- end ---",
]
assert backtranslate_output == self.expectedOutput, u("\n".join(report))
def check_cursor(self):
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)
template = "%-25s '%s'"
etBrlCurPosStr = showCurPos(len(tBrl), tBrlCurPos, pos2=self.expectedBrlCursorPos)
report = [
"--- Braille Cursor Difference Failure: %s ---" %self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("received brl:", tBrl),
template % ("BRLCursorAt %d expected %d:" %(tBrlCurPos, self.expectedBrlCursorPos),
etBrlCurPosStr),
"--- end ---"
]
assert tBrlCurPos == self.expectedBrlCursorPos, u("\n".join(report))
def check_hyphenate(self):
hyphenated_word = self.hyphenateword(self.tables, self.input, mode=self.mode)
template = "%-25s '%s'"
report = [
"--- Hyphenation failure: %s ---" % self.__str__(),
template % ("input:", self.input),
template % ("expected hyphenated word:", self.expectedOutput),
template % ("actual hyphenated word:", hyphenated_word),
"--- end ---",
]
assert hyphenated_word == self.expectedOutput, u("\n".join(report))
def test_allCases():
if 'HARNESS_DIR' in os.environ:
# we assume that if HARNESS_DIR is set that we are invoked from
# the Makefile, i.e. all the paths to the Python test files and
# the test tables are set correctly.
harness_dir = os.environ['HARNESS_DIR']
else:
# we are not invoked via the Makefile, i.e. we have to set up the
# paths (LOUIS_TABLEPATH) manually.
harness_dir = "."
# make sure local test braille tables are found
os.environ['LOUIS_TABLEPATH'] = '../tables,../../tables'
testfiles=[]
if len(sys.argv)>1:
# grab the test files from the arguments
for test_file in sys.argv[1:]:
testfiles.extend(iglob(os.path.join(harness_dir, test_file)))
else:
# Process all *_harness.txt files in the harness directory.
testfiles=iglob(os.path.join(harness_dir, '*_harness.txt'))
for harness in testfiles:
f = open(harness, 'r')
try:
harnessModule = json.load(f, encoding="UTF-8")
except ValueError as e:
raise ValueError("%s doesn't look like a harness file, %s" %(harness, e.message))
f.close()
tableList = []
if isinstance(harnessModule['tables'], list):
tableList.extend(harnessModule['tables'])
else:
tableList.append(harnessModule['tables'])
origflags = {'testmode':'translate'}
for section in harnessModule['tests']:
flags = origflags.copy()
flags.update(section.get('flags', {}))
for testData in section['data']:
test = flags.copy()
testTables = tableList[:]
test.update(testData)
bt = BrailleTest(harness, testTables, **test)
if test['testmode'] == 'translate':
yield bt.check_translate
if 'cursorPos' in test:
yield bt.check_cursor
if test['testmode'] == 'backtranslate':
yield bt.check_backtranslate
if test['testmode'] == 'hyphenate':
yield bt.check_hyphenate
if __name__ == '__main__':
result = run(addplugins=[Reporter()], argv=['-v', '--with-reporter', sys.argv[0]], defaultTest=__name__)
# FIXME: Ideally the harness tests should return the result of the
# tests. However since there is no way to mark a test as expected
# failure ATM we would have to disable a whole file of tests. So,
# for this release we will pretend all tests succeeded and will
# add a @expected_test feature for the next release. See also
# http://stackoverflow.com/questions/9613932/nose-plugin-for-expected-failures
result = True
sys.exit(0 if result else 1)
| [
"louis.hyphenate",
"traceback.format_exception",
"json.load",
"louis.translate",
"louis.backTranslateString",
"sys.stderr.write",
"os.path.join",
"sys.exit"
] | [((10586, 10614), 'sys.exit', 'sys.exit', (['(0 if result else 1)'], {}), '(0 if result else 1)\n', (10594, 10614), False, 'import sys\n'), ((1419, 1484), 'sys.stderr.write', 'sys.stderr.write', (['"""The harness tests require nose. Skipping...\n"""'], {}), "('The harness tests require nose. Skipping...\\n')\n", (1435, 1484), False, 'import sys\n'), ((1489, 1500), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1497, 1500), False, 'import sys\n'), ((4659, 4694), 'louis.hyphenate', 'hyphenate', (['tables', "(word + ' ')", 'mode'], {}), "(tables, word + ' ', mode)\n", (4668, 4694), False, 'from louis import translate, backTranslateString, hyphenate\n'), ((5950, 6016), 'louis.backTranslateString', 'backTranslateString', (['self.tables', 'self.input', 'None'], {'mode': 'self.mode'}), '(self.tables, self.input, None, mode=self.mode)\n', (5969, 6016), False, 'from louis import translate, backTranslateString, hyphenate\n'), ((6571, 6647), 'louis.translate', 'translate', (['self.tables', 'self.input'], {'mode': 'self.mode', 'cursorPos': 'self.cursorPos'}), '(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)\n', (6580, 6647), False, 'from louis import translate, backTranslateString, hyphenate\n'), ((2164, 2210), 'traceback.format_exception', 'traceback.format_exception', (['exctype', 'value', 'tb'], {}), '(exctype, value, tb)\n', (2190, 2210), False, 'import traceback\n'), ((5198, 5274), 'louis.translate', 'translate', (['self.tables', 'self.input'], {'mode': 'self.mode', 'cursorPos': 'self.cursorPos'}), '(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)\n', (5207, 5274), False, 'from louis import translate, backTranslateString, hyphenate\n'), ((5334, 5384), 'louis.translate', 'translate', (['self.tables', 'self.input'], {'mode': 'self.mode'}), '(self.tables, self.input, mode=self.mode)\n', (5343, 5384), False, 'from louis import translate, backTranslateString, hyphenate\n'), ((8685, 8727), 'os.path.join', 'os.path.join', (['harness_dir', '"""*_harness.txt"""'], {}), "(harness_dir, '*_harness.txt')\n", (8697, 8727), False, 'import os\n'), ((8831, 8861), 'json.load', 'json.load', (['f'], {'encoding': '"""UTF-8"""'}), "(f, encoding='UTF-8')\n", (8840, 8861), False, 'import json\n'), ((8544, 8580), 'os.path.join', 'os.path.join', (['harness_dir', 'test_file'], {}), '(harness_dir, test_file)\n', (8556, 8580), False, 'import os\n')] |
'''
File: Viewer.py
Author: <NAME>. & <NAME>.
Date: 06/04/19
Description: This viewer can be run on a RaspberryPI, and pulls timelapse photos from a webserver hosted by Server.py
'''
from kivy.config import Config
import timelapseshare as tls
import PIL
import _thread
import time
import os
os.environ['KIVY_GL_BACKEND'] = 'gl' # FIXES A SEGFAULT ????
import urllib.request as urllib
#Config.set('graphics', 'fullscreen','auto')
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
#Config.set('kivy', 'exit_on_escape', '1')
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition
from kivy.uix.label import Label
from kivy.uix.image import Image
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.graphics import Rectangle, Color
from kivy.clock import Clock
# ==========================
# Defaults
# ==========================
_SPEED = 1
_UPDATE_INTERVAL = 10 # every 10 seconds
_CLEAR_CACHE = False
_FILESERVER = "http://localhost:8000"
# ==========================
# Command-Line Arguments
# ==========================
import argparse
import platform
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise
parser = argparse.ArgumentParser(description="Interactive Timelapse scroller")
parser.add_argument("-i", "--image_directory", type=dir_path, help="Sets the directory where the images are stored")
parser.add_argument("-pre", "--image_prefix", type=str, help="Sets the prefix of the image Eg. 'IMG'")
parser.add_argument("-post", "--image_postfix", type=str, help="Sets the postfix of the image Eg. '.jpg'")
parser.add_argument("-url", "--server_url", type=str, help="Sets the link to the server hosted by the webcam")
args = parser.parse_args()
if args.image_directory:
print("[*] SETTING IMAGE DIRECTORY : " + args.image_directory)
tls.setImageDirectory(args.image_directory)
if args.server_url:
print("[*] SETTING URL TO SERVER : " + args.server_url)
_FILESERVER = args.server_url
# ==========================
# Runtime Calculations
# ==========================
tls.updateStats()
def getImageDateTime(ID):
datafile = open(tls.getDataByID(ID))
teasis = datafile.read()
datafile.close()
return teasis
print("Highest: %d\nLowest: %d" % (tls.getMax(), tls.getMin()))
# ==========================
# WebServer stuff
# ==========================
def update_imgs(min_i, max_i):
global _CLEAR_CACHE
if tls._MIN > min_i and _CLEAR_CACHE:
for i in range(tls._MIN, min_i): # delete files in that range
try:
print("removing " + str(i))
os.remove(tls.getImageByID(i))
except:
print(str(i) + " doesn't exist!")
if tls._MAX < max_i:
for i in range(tls._MAX, max_i): # gets files in that range
try:
print("retrieving " + str(i))
urllib.urlretrieve(_FILESERVER + "/frame" + str(i) + ".jpg", tls.getImageByID(i))
except:
print(str(i) + " doesn't exist!")
tls.updateStatsManually(min_i, max_i)
def get_update():
try:
urllib.urlretrieve(_FILESERVER + "/index.txt", "index.txt")
indx = open("index.txt")
lines = indx.readlines()
mi = int(lines[0])
ma = int(lines[1])
update_imgs(mi, ma)
return True
except:
print("server down!")
return False
# ==========================
# Update thread
# ==========================
get_update()
def update_loop():
global _UPDATE_INTERVAL
while True:
time.sleep(_UPDATE_INTERVAL)
get_update()
_thread.start_new_thread(update_loop, ())
# ==========================
# User-Interface
# ==========================
class DebugScreen(Screen):
def __init__(self, *args, **kwargs):
super(DebugScreen, self).__init__(*args, **kwargs)
self.index = tls._MIN
master_layout = BoxLayout(orientation='vertical', size_hint=(1, 0.1))
self.title = Label(text='', font_size=80, size_hint=(1, 1))
master_layout.add_widget(self.title)
background_container = FloatLayout()
self.image = Image(source=tls.getImageByID(self.index), size_hint=(1, 0.9), nocache=True, allow_stretch=True)
background_container.add_widget(self.image)
background_container.add_widget(master_layout)
self.add_widget(background_container)
Clock.schedule_interval(self.updateScroll, 0.10)
Clock.schedule_interval(self.TLS_update, 1)
# Keyboard Input
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down, on_key_up=self._on_keyboard_up)
self.leftKey = False
self.rightKey = False
self.leftCount = 0
self.rightCount = 0
self.velo = 0
# Keyboard callbacks
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'left':
self.leftKey = True
elif keycode[1] == 'right':
self.rightKey = True
return True
def _on_keyboard_up(self, keyboard, keycode):
if keycode[1] == 'left':
self.leftKey = False
elif keycode[1] == 'right':
self.rightKey = False
return True
# Mouse callbacks
def on_touch_down(self, touch):
if touch.is_mouse_scrolling:
if touch.button == 'scrolldown':
if self.index > tls._MIN:
self.index = self.index - _SPEED
elif touch.button == 'scrollup':
if self.index < tls._MAX:
self.index = self.index + _SPEED
GridLayout.on_touch_down(self, touch)
def updateScroll(self, *args):
app = App.get_running_app()
if self.leftKey:
if self.leftCount >= 4:
self.velo = -4
else:
self.velo = self.velo - 1
elif self.rightKey:
if self.rightCount >= 4:
self.velo = 4
else:
self.velo = self.velo + 1
else:
self.velo = 0
self.leftCount = 0
self.rightCount = 0
if (self.index+self.velo) > tls._MAX or (self.index+self.velo) < tls._MIN:
if (self.index+self.velo) > tls._MAX:
self.index = tls._MAX
elif (self.index+self.velo) < tls._MIN:
self.index = tls._MIN
else:
self.index = self.index+self.velo
#print("moving : " + str(self.index))
try:
self.title.text = tls.getTimeByID(self.index)
self.image.source = tls.getImageByID(self.index)
except:
pass
# Timelapse Share auto-updating stuff
def TLS_update(self, *args):
#tls.updateStats();
if self.index > tls._MAX:
self.index = tls._MAX
if self.index < tls._MIN:
self.index = tls._MIN
try:
self.title.text = tls.getTimeByID(self.index)
self.image.source = tls.getImageByID(self.index)
except:
pass
class ScreenManagement(ScreenManager):
def __init__(self, *args, **kwargs):
super(ScreenManagement, self).__init__(*args, **kwargs)
self.DBscreen = DebugScreen(name='scrollDebug')
self.add_widget(self.DBscreen)
self.current = 'scrollDebug'
class MainApp(App):
def build(self):
self.manager = ScreenManagement(transition=NoTransition())
return(self.manager)
# Start the app
MainApp().run()
| [
"kivy.config.Config.set",
"argparse.ArgumentParser",
"timelapseshare.getMax",
"kivy.uix.gridlayout.GridLayout.on_touch_down",
"timelapseshare.updateStatsManually",
"kivy.core.window.Window.request_keyboard",
"_thread.start_new_thread",
"timelapseshare.setImageDirectory",
"time.sleep",
"urllib.request.urlretrieve",
"kivy.uix.boxlayout.BoxLayout",
"kivy.uix.label.Label",
"timelapseshare.updateStats",
"kivy.uix.screenmanager.NoTransition",
"timelapseshare.getImageByID",
"kivy.uix.floatlayout.FloatLayout",
"os.path.isdir",
"timelapseshare.getDataByID",
"kivy.app.App.get_running_app",
"timelapseshare.getMin",
"kivy.clock.Clock.schedule_interval",
"timelapseshare.getTimeByID"
] | [((430, 488), 'kivy.config.Config.set', 'Config.set', (['"""input"""', '"""mouse"""', '"""mouse,multitouch_on_demand"""'], {}), "('input', 'mouse', 'mouse,multitouch_on_demand')\n", (440, 488), False, 'from kivy.config import Config\n'), ((1357, 1426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Interactive Timelapse scroller"""'}), "(description='Interactive Timelapse scroller')\n", (1380, 1426), False, 'import argparse\n'), ((2220, 2237), 'timelapseshare.updateStats', 'tls.updateStats', ([], {}), '()\n', (2235, 2237), True, 'import timelapseshare as tls\n'), ((3546, 3587), '_thread.start_new_thread', '_thread.start_new_thread', (['update_loop', '()'], {}), '(update_loop, ())\n', (3570, 3587), False, 'import _thread\n'), ((1293, 1314), 'os.path.isdir', 'os.path.isdir', (['string'], {}), '(string)\n', (1306, 1314), False, 'import os\n'), ((1984, 2027), 'timelapseshare.setImageDirectory', 'tls.setImageDirectory', (['args.image_directory'], {}), '(args.image_directory)\n', (2005, 2027), True, 'import timelapseshare as tls\n'), ((3047, 3084), 'timelapseshare.updateStatsManually', 'tls.updateStatsManually', (['min_i', 'max_i'], {}), '(min_i, max_i)\n', (3070, 3084), True, 'import timelapseshare as tls\n'), ((2282, 2301), 'timelapseshare.getDataByID', 'tls.getDataByID', (['ID'], {}), '(ID)\n', (2297, 2301), True, 'import timelapseshare as tls\n'), ((3112, 3171), 'urllib.request.urlretrieve', 'urllib.urlretrieve', (["(_FILESERVER + '/index.txt')", '"""index.txt"""'], {}), "(_FILESERVER + '/index.txt', 'index.txt')\n", (3130, 3171), True, 'import urllib.request as urllib\n'), ((3501, 3529), 'time.sleep', 'time.sleep', (['_UPDATE_INTERVAL'], {}), '(_UPDATE_INTERVAL)\n', (3511, 3529), False, 'import time\n'), ((3826, 3879), 'kivy.uix.boxlayout.BoxLayout', 'BoxLayout', ([], {'orientation': '"""vertical"""', 'size_hint': '(1, 0.1)'}), "(orientation='vertical', size_hint=(1, 0.1))\n", (3835, 3879), False, 'from kivy.uix.boxlayout import BoxLayout\n'), ((3895, 3941), 'kivy.uix.label.Label', 'Label', ([], {'text': '""""""', 'font_size': '(80)', 'size_hint': '(1, 1)'}), "(text='', font_size=80, size_hint=(1, 1))\n", (3900, 3941), False, 'from kivy.uix.label import Label\n'), ((4008, 4021), 'kivy.uix.floatlayout.FloatLayout', 'FloatLayout', ([], {}), '()\n', (4019, 4021), False, 'from kivy.uix.floatlayout import FloatLayout\n'), ((4274, 4321), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['self.updateScroll', '(0.1)'], {}), '(self.updateScroll, 0.1)\n', (4297, 4321), False, 'from kivy.clock import Clock\n'), ((4325, 4368), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['self.TLS_update', '(1)'], {}), '(self.TLS_update, 1)\n', (4348, 4368), False, 'from kivy.clock import Clock\n'), ((4408, 4460), 'kivy.core.window.Window.request_keyboard', 'Window.request_keyboard', (['self._keyboard_closed', 'self'], {}), '(self._keyboard_closed, self)\n', (4431, 4460), False, 'from kivy.core.window import Window\n'), ((5445, 5482), 'kivy.uix.gridlayout.GridLayout.on_touch_down', 'GridLayout.on_touch_down', (['self', 'touch'], {}), '(self, touch)\n', (5469, 5482), False, 'from kivy.uix.gridlayout import GridLayout\n'), ((5524, 5545), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (5543, 5545), False, 'from kivy.app import App\n'), ((2398, 2410), 'timelapseshare.getMax', 'tls.getMax', ([], {}), '()\n', (2408, 2410), True, 'import timelapseshare as tls\n'), ((2412, 2424), 'timelapseshare.getMin', 'tls.getMin', ([], {}), '()\n', (2422, 2424), True, 'import timelapseshare as tls\n'), ((6157, 6184), 'timelapseshare.getTimeByID', 'tls.getTimeByID', (['self.index'], {}), '(self.index)\n', (6172, 6184), True, 'import timelapseshare as tls\n'), ((6208, 6236), 'timelapseshare.getImageByID', 'tls.getImageByID', (['self.index'], {}), '(self.index)\n', (6224, 6236), True, 'import timelapseshare as tls\n'), ((6484, 6511), 'timelapseshare.getTimeByID', 'tls.getTimeByID', (['self.index'], {}), '(self.index)\n', (6499, 6511), True, 'import timelapseshare as tls\n'), ((6535, 6563), 'timelapseshare.getImageByID', 'tls.getImageByID', (['self.index'], {}), '(self.index)\n', (6551, 6563), True, 'import timelapseshare as tls\n'), ((4050, 4078), 'timelapseshare.getImageByID', 'tls.getImageByID', (['self.index'], {}), '(self.index)\n', (4066, 4078), True, 'import timelapseshare as tls\n'), ((6919, 6933), 'kivy.uix.screenmanager.NoTransition', 'NoTransition', ([], {}), '()\n', (6931, 6933), False, 'from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition\n'), ((2714, 2733), 'timelapseshare.getImageByID', 'tls.getImageByID', (['i'], {}), '(i)\n', (2730, 2733), True, 'import timelapseshare as tls\n'), ((2976, 2995), 'timelapseshare.getImageByID', 'tls.getImageByID', (['i'], {}), '(i)\n', (2992, 2995), True, 'import timelapseshare as tls\n')] |
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication,QMainWindow, QWidget, QPushButton
from PyQt5.QtGui import QPainter,QBrush, QPen
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QTransform
from PyQt5.QtCore import QPointF
from CarMaintainer import CarMaintainer
from Algorithm import Algorithm
class Window(QMainWindow):
STATE_OF_EMERGENCY=1
def __init__(self):
super().__init__()
timer = QTimer(self)
timer.setInterval(20) # interval in ms
timer.timeout.connect(self.update)
timer.start(0)
self.title= "Emergency Response System"
self.top=100
self.left=100
self.width=500
self.height=500
#button = QPushButton('button', self)
#button.move(0,0)
#button.clicked.connect(self.on_click)
CarMaintainer()
Algorithm()
self.InitWindow()
def InitWindow(self):
self.setWindowIcon(QtGui.QIcon('icon.png'))
self.setWindowTitle(self.title)
self.setGeometry(self.top,self.left,self.width,self.height)
self.show()
def on_click(self):
Window.STATE_OF_EMERGENCY=1
def paintEvent(self, e):
painter= QPainter(self)
reflecting_axis= QTransform(1,0,0,0,-1,0,250,250,1) #translating the coordinate system to the middle of the screen and reflecting it about x axis to make positive y cooredinates above x axis
painter.setTransform(reflecting_axis)
painter.setPen(QPen(Qt.black,1,Qt.SolidLine))
painter.setBrush(QBrush(Qt.gray,Qt.SolidPattern))
painter.drawEllipse(QPointF(0,0),250,250) #draw outer lane
painter.setPen(QPen(Qt.yellow,5,Qt.DashLine))
painter.setBrush(QBrush(Qt.gray,Qt.SolidPattern))
painter.drawEllipse(QPointF(0,0),150,150) #draw inner lane
painter.setPen(QPen(Qt.black,2,Qt.SolidLine))
painter.setBrush(QBrush(Qt.black,Qt.SolidPattern))
painter.drawEllipse(QPointF(0,0),50,50) #black centre
# -------------------------------------------------------------------------------------------------------------
# Drawing lanes is complete. Now drawing cars
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
counter=1
for point in Algorithm.run_algorithm(Window.STATE_OF_EMERGENCY):
if counter==1:
painter.drawEllipse(QPointF(point[0], point[1]),10,10)
counter=-1
else:
painter.drawEllipse(QPointF(point[0], point[1]),5,5)
counter=1
for a_car in CarMaintainer.Inner_Car_List:
if a_car.PSUEDO_CAR==False:
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
else:
painter.setPen(QPen(Qt.red,1,Qt.DashLine)) #new paint settings for Psuedo car
painter.setBrush(QBrush(Qt.gray,Qt.NoBrush))
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
painter.setPen(QPen(Qt.black,2,Qt.SolidLine)) # restore paint settings after drawing a psuedo car
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
for a_car in CarMaintainer.Outer_Car_List:
if a_car.IS_AMBULANCE == False :
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
else:
painter.setBrush(QBrush(Qt.red,Qt.SolidPattern))
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
for a_car in CarMaintainer.In_Transition_List:
painter.setBrush(QBrush(Qt.yellow,Qt.SolidPattern))
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
painter.setPen(QPen(Qt.red,1,Qt.SolidLine))
painter.setBrush(QBrush(Qt.green,Qt.NoBrush))
painter.drawEllipse(QPointF(0,0),100,100) #draw constuction line on inner lane
painter.drawEllipse(QPointF(0,0),200,200) #draw constuction line on outer lane
painter.setPen(QPen(Qt.red,1,Qt.SolidLine))
painter.setBrush(QBrush(Qt.red,Qt.SolidPattern))
# painter.drawEllipse(QPointF(100,0),5,5)
# painter.drawEllipse(QPointF(-100,0),5,5)
# painter.drawEllipse(QPointF(0,-100),5,5)
painter.drawEllipse(QPointF(0,0),10,10)
| [
"PyQt5.QtCore.QTimer",
"PyQt5.QtGui.QPainter",
"PyQt5.QtGui.QIcon",
"CarMaintainer.CarMaintainer",
"Algorithm.Algorithm",
"Algorithm.Algorithm.run_algorithm",
"PyQt5.QtGui.QPen",
"PyQt5.QtGui.QBrush",
"PyQt5.QtGui.QTransform",
"PyQt5.QtCore.QPointF"
] | [((485, 497), 'PyQt5.QtCore.QTimer', 'QTimer', (['self'], {}), '(self)\n', (491, 497), False, 'from PyQt5.QtCore import QTimer\n'), ((883, 898), 'CarMaintainer.CarMaintainer', 'CarMaintainer', ([], {}), '()\n', (896, 898), False, 'from CarMaintainer import CarMaintainer\n'), ((907, 918), 'Algorithm.Algorithm', 'Algorithm', ([], {}), '()\n', (916, 918), False, 'from Algorithm import Algorithm\n'), ((1276, 1290), 'PyQt5.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (1284, 1290), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((1318, 1360), 'PyQt5.QtGui.QTransform', 'QTransform', (['(1)', '(0)', '(0)', '(0)', '(-1)', '(0)', '(250)', '(250)', '(1)'], {}), '(1, 0, 0, 0, -1, 0, 250, 250, 1)\n', (1328, 1360), False, 'from PyQt5.QtGui import QTransform\n'), ((2423, 2473), 'Algorithm.Algorithm.run_algorithm', 'Algorithm.run_algorithm', (['Window.STATE_OF_EMERGENCY'], {}), '(Window.STATE_OF_EMERGENCY)\n', (2446, 2473), False, 'from Algorithm import Algorithm\n'), ((1001, 1024), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""icon.png"""'], {}), "('icon.png')\n", (1012, 1024), False, 'from PyQt5 import QtGui\n'), ((1562, 1593), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.black', '(1)', 'Qt.SolidLine'], {}), '(Qt.black, 1, Qt.SolidLine)\n', (1566, 1593), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((1618, 1650), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.gray', 'Qt.SolidPattern'], {}), '(Qt.gray, Qt.SolidPattern)\n', (1624, 1650), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((1680, 1693), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (1687, 1693), False, 'from PyQt5.QtCore import QPointF\n'), ((1766, 1797), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.yellow', '(5)', 'Qt.DashLine'], {}), '(Qt.yellow, 5, Qt.DashLine)\n', (1770, 1797), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((1822, 1854), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.gray', 'Qt.SolidPattern'], {}), '(Qt.gray, Qt.SolidPattern)\n', (1828, 1854), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((1884, 1897), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (1891, 1897), False, 'from PyQt5.QtCore import QPointF\n'), ((1969, 2000), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.black', '(2)', 'Qt.SolidLine'], {}), '(Qt.black, 2, Qt.SolidLine)\n', (1973, 2000), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((2025, 2058), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.black', 'Qt.SolidPattern'], {}), '(Qt.black, Qt.SolidPattern)\n', (2031, 2058), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((2088, 2101), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (2095, 2101), False, 'from PyQt5.QtCore import QPointF\n'), ((2347, 2380), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.green', 'Qt.SolidPattern'], {}), '(Qt.green, Qt.SolidPattern)\n', (2353, 2380), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((4596, 4625), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.red', '(1)', 'Qt.SolidLine'], {}), '(Qt.red, 1, Qt.SolidLine)\n', (4600, 4625), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((4650, 4678), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.green', 'Qt.NoBrush'], {}), '(Qt.green, Qt.NoBrush)\n', (4656, 4678), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((4708, 4721), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (4715, 4721), False, 'from PyQt5.QtCore import QPointF\n'), ((4797, 4810), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (4804, 4810), False, 'from PyQt5.QtCore import QPointF\n'), ((4882, 4911), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.red', '(1)', 'Qt.SolidLine'], {}), '(Qt.red, 1, Qt.SolidLine)\n', (4886, 4911), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((4936, 4967), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.red', 'Qt.SolidPattern'], {}), '(Qt.red, Qt.SolidPattern)\n', (4942, 4967), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((5152, 5165), 'PyQt5.QtCore.QPointF', 'QPointF', (['(0)', '(0)'], {}), '(0, 0)\n', (5159, 5165), False, 'from PyQt5.QtCore import QPointF\n'), ((4276, 4310), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.yellow', 'Qt.SolidPattern'], {}), '(Qt.yellow, Qt.SolidPattern)\n', (4282, 4310), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((4534, 4567), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.green', 'Qt.SolidPattern'], {}), '(Qt.green, Qt.SolidPattern)\n', (4540, 4567), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((2539, 2566), 'PyQt5.QtCore.QPointF', 'QPointF', (['point[0]', 'point[1]'], {}), '(point[0], point[1])\n', (2546, 2566), False, 'from PyQt5.QtCore import QPointF\n'), ((2655, 2682), 'PyQt5.QtCore.QPointF', 'QPointF', (['point[0]', 'point[1]'], {}), '(point[0], point[1])\n', (2662, 2682), False, 'from PyQt5.QtCore import QPointF\n'), ((3047, 3075), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.red', '(1)', 'Qt.DashLine'], {}), '(Qt.red, 1, Qt.DashLine)\n', (3051, 3075), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((3148, 3175), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.gray', 'Qt.NoBrush'], {}), '(Qt.gray, Qt.NoBrush)\n', (3154, 3175), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((3397, 3428), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.black', '(2)', 'Qt.SolidLine'], {}), '(Qt.black, 2, Qt.SolidLine)\n', (3401, 3428), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((3519, 3552), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.green', 'Qt.SolidPattern'], {}), '(Qt.green, Qt.SolidPattern)\n', (3525, 3552), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((3894, 3925), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.red', 'Qt.SolidPattern'], {}), '(Qt.red, Qt.SolidPattern)\n', (3900, 3925), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n'), ((4149, 4182), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.green', 'Qt.SolidPattern'], {}), '(Qt.green, Qt.SolidPattern)\n', (4155, 4182), False, 'from PyQt5.QtGui import QPainter, QBrush, QPen\n')] |
# -*- coding: utf-8 -*-
from placidity.node import Node, TreeNode
class TestNode():
def test_append_children_to_node(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
assert node1.children[0] == node2
assert node2.parents[0] == node1
def test_append_parents_to_node(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
assert node1.parents[0] == node2
assert node2.children[0] == node1
def test_append_same_node_as_child_and_parent(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node1.parents.append(node2)
assert node1.children[0] == node2
assert node1.parents[0] == node2
assert node2.children[0] == node1
assert node2.parents[0] == node1
def test_append_same_node_as_child_multiple_times(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node1.children.append(node2)
node1.children.append(node2)
assert node1.children[0] == node2
assert node2.parents[0] == node1
assert len(node1.children) == 1
assert len(node2.parents) == 1
def test_append_same_node_as_parent_multiple_times(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
node1.parents.append(node2)
node1.parents.append(node2)
assert node1.parents[0] == node2
assert node2.children[0] == node1
assert len(node1.parents) == 1
assert len(node2.children) == 1
def test_multi_append(self):
node1, node2, node3 = Node(), Node(), Node()
node1.children.append(node2, node3)
assert len(node1.children) == 2
assert node2 in node1.children
assert node3 in node1.children
def test_remove_child_node(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node1.children.remove(node2)
assert len(node1.children) == 0
assert len(node2.parents) == 0
def test_remove_parent_node(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
node1.parents.remove(node2)
assert len(node1.parents) == 0
assert len(node2.children) == 0
def test_remove_same_node_multiple_times(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
node1.parents.remove(node2)
node1.parents.remove(node2)
node1.parents.remove(node2)
assert len(node1.parents) == 0
assert len(node2.children) == 0
def test_multi_remove(self):
node1, node2, node3 = Node(), Node(), Node()
node1.children.append(node2, node3)
node1.children.remove(node2, node3)
assert len(node1.children) == 0
def test_find_immediate_child_node(self):
node1, node2 = Node(), Node()
node2.name = 'node to be found'
node1.children.append(node2)
assert node1.find_child(name='node to be found') == node2
def test_find_child_node_no_results(self):
node1 = Node()
assert node1.find_child(name='just some name') == None
def test_find_child_node_from_node_tree(self):
node1 = Node()
node1a = Node()
node1a1 = Node()
node1a1.color = 'blue'
node1a2 = Node()
node1a2.value = 13
node1b = Node()
node1b1 = Node()
node1b1.find_me = True
node1b1.color = 'blue'
node1.children.append(node1a, node1b)
node1a.children.append(node1a1, node1a2)
node1b.children.append(node1b1)
assert node1.find_child(value=13) == node1a2
assert node1.find_child(find_me=True) == node1b1
assert node1.find_child(color='blue') == [node1a1, node1b1]
def test_find_immediate_parent_node(self):
node1, node2 = Node(), Node()
node2.name = 'node to be found'
node1.parents.append(node2)
assert node1.find_parent(name='node to be found') == node2
def test_find_parent_node_no_results(self):
node1 = Node()
assert node1.find_parent(name='just some name') == None
def test_find_parent_node_from_node_tree(self):
node1 = Node()
node1a = Node()
node1a1 = Node()
node1a1.color = 'blue'
node1a2 = Node()
node1a2.value = 13
node1b = Node()
node1b1 = Node()
node1b1.find_me = True
node1b1.color = 'blue'
node1.parents.append(node1a, node1b)
node1a.parents.append(node1a1, node1a2)
node1b.parents.append(node1b1)
assert node1.find_parent(value=13) == node1a2
assert node1.find_parent(find_me=True) == node1b1
assert node1.find_parent(color='blue') == [node1a1, node1b1]
assert node1.find_parent(find_me=True, color='blue') == node1b1
def test_find_root(self):
node1, node1a, node1b, node1a1 = Node(), Node(), Node(), Node()
node1.children.append(node1a, node1b)
node1a.children.append(node1a1)
assert node1.find_root() == None
assert node1a.find_root() == node1
assert node1b.find_root() == node1
assert node1a1.find_root() == node1
def test_cyclic_find(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node2.children.append(node1)
assert node1.find_root() == None
assert node2.find_root() == None
def test_find_parent_with_value_name(self):
node1, node2, node3 = Node(), Node(), Node()
node3.attribute_to_find = 'find me'
node1.parents.append(node2)
node2.parents.append(node3)
assert node1.find_parent_with_attribute('attribute_to_find') == node3
def test_walk(self):
node1, node2, node3, node4 = Node(), Node(), Node(), Node()
node5 = Node()
node1.children.append(node2)
node1.children.append(node5)
node2.children.append(node3)
node2.children.append(node4)
result = (node1, node3, node4, node2, node5 )
for i, node in enumerate(node1.walk()):
assert node == result[i], '%s %s %s' % (i, node, result[i])
class TestTreeNode():
def test_set_parent(self):
node1, node2 = TreeNode(), TreeNode()
node1.parent = node2
assert node1.parent == node2
assert node2.children == [node1, ]
def test_set_parent_twice(self):
node1, node2, node3 = TreeNode(), TreeNode(), TreeNode()
node1.parent = node2
node1.parent = node3
assert node2.children == []
assert node3.children == [node1, ]
def test_find(self):
node1, node2, node3 = TreeNode(), TreeNode(), TreeNode()
node2.parent = node1
node3.parent = node1
node2.name = 'foo'
node3.name = 'bar'
assert node1.find(name='foo') == node2
assert node1.find(name='bar') == node3
assert node1.find(name='dummy') == None
assert node2.find(name='foo') == None
| [
"placidity.node.TreeNode",
"placidity.node.Node"
] | [((3123, 3129), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3127, 3129), False, 'from placidity.node import Node, TreeNode\n'), ((3262, 3268), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3266, 3268), False, 'from placidity.node import Node, TreeNode\n'), ((3286, 3292), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3290, 3292), False, 'from placidity.node import Node, TreeNode\n'), ((3311, 3317), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3315, 3317), False, 'from placidity.node import Node, TreeNode\n'), ((3367, 3373), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3371, 3373), False, 'from placidity.node import Node, TreeNode\n'), ((3418, 3424), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3422, 3424), False, 'from placidity.node import Node, TreeNode\n'), ((3443, 3449), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3447, 3449), False, 'from placidity.node import Node, TreeNode\n'), ((4123, 4129), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4127, 4129), False, 'from placidity.node import Node, TreeNode\n'), ((4264, 4270), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4268, 4270), False, 'from placidity.node import Node, TreeNode\n'), ((4288, 4294), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4292, 4294), False, 'from placidity.node import Node, TreeNode\n'), ((4313, 4319), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4317, 4319), False, 'from placidity.node import Node, TreeNode\n'), ((4369, 4375), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4373, 4375), False, 'from placidity.node import Node, TreeNode\n'), ((4420, 4426), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4424, 4426), False, 'from placidity.node import Node, TreeNode\n'), ((4445, 4451), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4449, 4451), False, 'from placidity.node import Node, TreeNode\n'), ((5901, 5907), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5905, 5907), False, 'from placidity.node import Node, TreeNode\n'), ((154, 160), 'placidity.node.Node', 'Node', ([], {}), '()\n', (158, 160), False, 'from placidity.node import Node, TreeNode\n'), ((162, 168), 'placidity.node.Node', 'Node', ([], {}), '()\n', (166, 168), False, 'from placidity.node import Node, TreeNode\n'), ((358, 364), 'placidity.node.Node', 'Node', ([], {}), '()\n', (362, 364), False, 'from placidity.node import Node, TreeNode\n'), ((366, 372), 'placidity.node.Node', 'Node', ([], {}), '()\n', (370, 372), False, 'from placidity.node import Node, TreeNode\n'), ((575, 581), 'placidity.node.Node', 'Node', ([], {}), '()\n', (579, 581), False, 'from placidity.node import Node, TreeNode\n'), ((583, 589), 'placidity.node.Node', 'Node', ([], {}), '()\n', (587, 589), False, 'from placidity.node import Node, TreeNode\n'), ((917, 923), 'placidity.node.Node', 'Node', ([], {}), '()\n', (921, 923), False, 'from placidity.node import Node, TreeNode\n'), ((925, 931), 'placidity.node.Node', 'Node', ([], {}), '()\n', (929, 931), False, 'from placidity.node import Node, TreeNode\n'), ((1294, 1300), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1298, 1300), False, 'from placidity.node import Node, TreeNode\n'), ((1302, 1308), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1306, 1308), False, 'from placidity.node import Node, TreeNode\n'), ((1646, 1652), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1650, 1652), False, 'from placidity.node import Node, TreeNode\n'), ((1654, 1660), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1658, 1660), False, 'from placidity.node import Node, TreeNode\n'), ((1662, 1668), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1666, 1668), False, 'from placidity.node import Node, TreeNode\n'), ((1895, 1901), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1899, 1901), False, 'from placidity.node import Node, TreeNode\n'), ((1903, 1909), 'placidity.node.Node', 'Node', ([], {}), '()\n', (1907, 1909), False, 'from placidity.node import Node, TreeNode\n'), ((2128, 2134), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2132, 2134), False, 'from placidity.node import Node, TreeNode\n'), ((2136, 2142), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2140, 2142), False, 'from placidity.node import Node, TreeNode\n'), ((2372, 2378), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2376, 2378), False, 'from placidity.node import Node, TreeNode\n'), ((2380, 2386), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2384, 2386), False, 'from placidity.node import Node, TreeNode\n'), ((2676, 2682), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2680, 2682), False, 'from placidity.node import Node, TreeNode\n'), ((2684, 2690), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2688, 2690), False, 'from placidity.node import Node, TreeNode\n'), ((2692, 2698), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2696, 2698), False, 'from placidity.node import Node, TreeNode\n'), ((2899, 2905), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2903, 2905), False, 'from placidity.node import Node, TreeNode\n'), ((2907, 2913), 'placidity.node.Node', 'Node', ([], {}), '()\n', (2911, 2913), False, 'from placidity.node import Node, TreeNode\n'), ((3898, 3904), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3902, 3904), False, 'from placidity.node import Node, TreeNode\n'), ((3906, 3912), 'placidity.node.Node', 'Node', ([], {}), '()\n', (3910, 3912), False, 'from placidity.node import Node, TreeNode\n'), ((4974, 4980), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4978, 4980), False, 'from placidity.node import Node, TreeNode\n'), ((4982, 4988), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4986, 4988), False, 'from placidity.node import Node, TreeNode\n'), ((4990, 4996), 'placidity.node.Node', 'Node', ([], {}), '()\n', (4994, 4996), False, 'from placidity.node import Node, TreeNode\n'), ((4998, 5004), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5002, 5004), False, 'from placidity.node import Node, TreeNode\n'), ((5320, 5326), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5324, 5326), False, 'from placidity.node import Node, TreeNode\n'), ((5328, 5334), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5332, 5334), False, 'from placidity.node import Node, TreeNode\n'), ((5572, 5578), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5576, 5578), False, 'from placidity.node import Node, TreeNode\n'), ((5580, 5586), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5584, 5586), False, 'from placidity.node import Node, TreeNode\n'), ((5588, 5594), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5592, 5594), False, 'from placidity.node import Node, TreeNode\n'), ((5854, 5860), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5858, 5860), False, 'from placidity.node import Node, TreeNode\n'), ((5862, 5868), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5866, 5868), False, 'from placidity.node import Node, TreeNode\n'), ((5870, 5876), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5874, 5876), False, 'from placidity.node import Node, TreeNode\n'), ((5878, 5884), 'placidity.node.Node', 'Node', ([], {}), '()\n', (5882, 5884), False, 'from placidity.node import Node, TreeNode\n'), ((6313, 6323), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6321, 6323), False, 'from placidity.node import Node, TreeNode\n'), ((6325, 6335), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6333, 6335), False, 'from placidity.node import Node, TreeNode\n'), ((6514, 6524), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6522, 6524), False, 'from placidity.node import Node, TreeNode\n'), ((6526, 6536), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6534, 6536), False, 'from placidity.node import Node, TreeNode\n'), ((6538, 6548), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6546, 6548), False, 'from placidity.node import Node, TreeNode\n'), ((6743, 6753), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6751, 6753), False, 'from placidity.node import Node, TreeNode\n'), ((6755, 6765), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6763, 6765), False, 'from placidity.node import Node, TreeNode\n'), ((6767, 6777), 'placidity.node.TreeNode', 'TreeNode', ([], {}), '()\n', (6775, 6777), False, 'from placidity.node import Node, TreeNode\n')] |
from distutils.core import setup
setup(
name = 'quicklock',
packages = ['quicklock'],
version = '0.1.7',
description = 'A simple Python resource lock to ensure only one process at a time is operating with a particular resource.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/NateFerrero/quicklock',
download_url = 'https://github.com/NateFerrero/quicklock/tarball/0.1.7',
keywords = ['lock', 'locking', 'singleton', 'process', 'resource', 'exclusive lock'],
classifiers = [],
platforms='any',
install_requires = [
'psutil>=2.2'
]
)
| [
"distutils.core.setup"
] | [((33, 565), 'distutils.core.setup', 'setup', ([], {'name': '"""quicklock"""', 'packages': "['quicklock']", 'version': '"""0.1.7"""', 'description': '"""A simple Python resource lock to ensure only one process at a time is operating with a particular resource."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/NateFerrero/quicklock"""', 'download_url': '"""https://github.com/NateFerrero/quicklock/tarball/0.1.7"""', 'keywords': "['lock', 'locking', 'singleton', 'process', 'resource', 'exclusive lock']", 'classifiers': '[]', 'platforms': '"""any"""', 'install_requires': "['psutil>=2.2']"}), "(name='quicklock', packages=['quicklock'], version='0.1.7',\n description=\n 'A simple Python resource lock to ensure only one process at a time is operating with a particular resource.'\n , author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/NateFerrero/quicklock', download_url=\n 'https://github.com/NateFerrero/quicklock/tarball/0.1.7', keywords=[\n 'lock', 'locking', 'singleton', 'process', 'resource', 'exclusive lock'\n ], classifiers=[], platforms='any', install_requires=['psutil>=2.2'])\n", (38, 565), False, 'from distutils.core import setup\n')] |
#
# Copyright (C) 2013 - 2021 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring
"""test cases for anyconfig.cli module.
"""
import contextlib
import io
import pathlib
import sys
import tempfile
import unittest
import anyconfig.api
import anyconfig.cli as TT
from .. import base
from . import collectors, datatypes
def make_args(_self, tdata):
"""Make arguments to run cli.main.
"""
return ['anyconfig_cli'] + tdata.opts + [str(tdata.inp_path)]
class BaseTestCase(unittest.TestCase):
"""Base Test case.
"""
collector = collectors.Collector()
make_args = make_args
def setUp(self):
if self.collector:
self.collector.init()
def post_checks(self, tdata, *args, **kwargs):
"""Placeholder to do more post checks.
"""
pass
def _run_main(self, tdata):
"""Wrapper for cli.main."""
args = self.make_args(tdata)
if tdata.outname: # Running cli.main will output files.
self.assertTrue(
tdata.ref is not None,
'No reference data was given, {tdata!r}'
)
with tempfile.TemporaryDirectory() as tdir:
opath = pathlib.Path(tdir) / tdata.outname
# Run anyconfig.cli.main with arguments.
TT.main(args + ['-o', str(opath)])
if tdata.exp.exit_code_matches and tdata.exp.exit_code == 0:
self.assertTrue(opath.exists(), str(opath))
try:
odata = anyconfig.api.load(opath, **tdata.oo_opts)
except anyconfig.api.UnknownFileTypeError:
odata = anyconfig.api.load(opath, ac_parser='json')
self.assertEqual(odata, tdata.ref, repr(tdata))
self.post_checks(tdata, opath)
else:
# Likewise but without -o <output_path> option.
TT.main(args)
self.post_checks(tdata)
sys.exit(0)
def run_main(self, tdata) -> None:
"""
Run anyconfig.cli.main and check if the exit code was expected one.
"""
expected: datatypes.Expected = tdata.exp
with self.assertRaises(expected.exception, msg=repr(tdata)) as ctx:
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with contextlib.redirect_stderr(io.StringIO()) as stderr:
self._run_main(tdata)
exc = ctx.exception
self.assertTrue(isinstance(exc, expected.exception))
ecode = getattr(exc, 'error_code', getattr(exc, 'code', 1))
if expected.exit_code_matches:
self.assertEqual(ecode, expected.exit_code, f'{tdata!r}')
else:
self.assertNotEqual(ecode, expected.exit_code, f'{tdata!r}')
if expected.words_in_stdout:
msg = stdout.getvalue()
self.assertTrue(expected.words_in_stdout in msg, msg)
if expected.words_in_stderr:
err = stderr.getvalue()
self.assertTrue(expected.words_in_stderr in err, err)
def test_runs_for_datasets(self) -> None:
if self.collector and self.collector.initialized:
if self.collector.kind == base.TDataCollector.kind:
return
for tdata in self.collector.each_data():
self.run_main(tdata)
class NoInputTestCase(BaseTestCase):
"""Test cases which does not require inputs.
"""
def make_args(self, tdata): # pylint: disable=no-self-use
"""Make arguments to run cli.main.
"""
return ['anyconfig_cli'] + tdata.opts
# vim:sw=4:ts=4:et:
| [
"io.StringIO",
"tempfile.TemporaryDirectory",
"pathlib.Path",
"anyconfig.cli.main",
"sys.exit"
] | [((2006, 2017), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2014, 2017), False, 'import sys\n'), ((1947, 1960), 'anyconfig.cli.main', 'TT.main', (['args'], {}), '(args)\n', (1954, 1960), True, 'import anyconfig.cli as TT\n'), ((1152, 1181), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1179, 1181), False, 'import tempfile\n'), ((1215, 1233), 'pathlib.Path', 'pathlib.Path', (['tdir'], {}), '(tdir)\n', (1227, 1233), False, 'import pathlib\n'), ((2328, 2341), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2339, 2341), False, 'import io\n'), ((2402, 2415), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2413, 2415), False, 'import io\n')] |
import urllib
from urllib import request, parse
import re
import os, sys
import time
import argparse
# Request headers
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Referer': 'http://www.verifyemailaddress.org',
'Origin': 'http://www.verifyemailaddress.org/',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
class EmailVerifier:
SITE_URL = 'https://www.verifyemailaddress.org/#result'
INVALID_SEARCH_STRING = "is not valid"
CONNECTING_TO_DOMAIN = "Connecting to {0} failed"
def create_request(self, email_addr):
post_form = { "email": email_addr }
enc_data = parse.urlencode(post_form).encode()
req = request.Request(
EmailVerifier.SITE_URL,
data = enc_data,
headers = headers
)
return req
def check_domain(self, domain):
req = self.create_request("help@{0}".format(domain))
resp = request.urlopen(req)
html = resp.read().decode("utf-8")
domain_invalid = EmailVerifier.CONNECTING_TO_DOMAIN.format(
domain) in html
if domain_invalid:
print(EmailVerifier.CONNECTING_TO_DOMAIN.format(
domain))
return False
else:
return True
# Returns a boolean value
def verify(self, email_addr, super_verbose = False):
req = self.create_request(email_addr)
resp = request.urlopen(req)
html = resp.read().decode("utf-8")
if super_verbose:
print(html)
re_res = EmailVerifier.INVALID_SEARCH_STRING in html
return (False if re_res else True)
# if super_verbose:
# print(re_res)
# Possible templates for different sizes
# All the possible combinations are covered by
# this and the action of the Permutator
email_templates = {
2: [
"{f}.{l}",
"{f}{l}",
],
3: [
"{f}{m}{l}",
"{f}{m}.{l}",
"{f}.{m}{l}",
"{f}.{m}.{l}"
],
1: [ "{f}" ]
}
EMAIL_FORMAT = "{user}@{domain}"
class Permutator:
""" Generate all possible combination of two and three
words to form an email. For example, (first, last), (last, first), (f, last)
The elems is produced and Permutator is called in a way such that
the emails are always produced most to least specific
eg first.last@ before f.last@ before first@
"""
def __init__(self, elems):
self.elems = elems
# Make actual permutations of elems
def make_perms(self, elems, r):
if r == 0:
yield [elems[0]]
return
for perm in self.make_perms(elems, r - 1):
for i in range(r + 1):
j = r - i
yield perm[:j] + [elems[r]] + perm[j:]
return
# Make permuatations of size from
def make_combs(self, size, l):
if (size > l + 1):
return
if size == 0:
yield []
return
if l == 0:
for elem in self.elems[0]:
yield [elem]
return
for c in self.make_combs(size, l - 1):
yield c
for elem in self.elems[l]:
for c in self.make_combs(size - 1, l - 1):
c.append(elem)
yield c
# Generate all P(n, r) permutations of r = size
def generate(self, size):
for comb in self.make_combs(size, len(self.elems) - 1):
for perm in self.make_perms(comb, len(comb) - 1):
yield perm
return
COLOR_GREEN = "\033[0;32m"
COLOR_RED = "\033[1;31m"
COLOR_RESET = "\033[0;0m"
def verify_for_size(f, l, m, size, verbose = False):
verifier = EmailVerifier()
for template in email_templates[size]:
user = template.format(f = f, l = l, m = m)
if len(user) < 3:
continue
try_addr = EMAIL_FORMAT.format(user = user, domain = domain)
if verbose:
print("Checking `" + try_addr + "`...", end = '', flush = True)
verif = verifier.verify(try_addr)
if verif:
print(COLOR_GREEN + "." + COLOR_RESET, end = '', flush = True)
return try_addr
else:
print(COLOR_RED + "." + COLOR_RESET, end = '', flush = True)
if verbose:
print(" ")
return None
# Sufficiently random email that nobody should
# actually have this as a valid one
RANDOM_EMAIL = "prhzdge.yrtheu"
# Find the email address, given the below parameters
# Permutates over the possible combinations of first and lastname
# including .(period), eg. first.last@ and then checks
# each email.
def find_email(first, middle, last, domain, args):
if not EmailVerifier().check_domain(domain):
raise ValueError("Invalid domain name for email server.")
elif EmailVerifier().verify(EMAIL_FORMAT.format(user = RANDOM_EMAIL, domain = domain)):
raise ValueError("Domain seems to accept all email addresses.")
elif args.verbose:
print("Domain checks successful")
# Can use either from each of elems
elems = [ (first, first[0]), (last, last[0]) ]
if middle:
elems.append((middle, middle[0]))
email, email_list = None, []
p_gen = Permutator(elems)
# Order of lengths is 2, 3, 1
# to match most common observations
for leng in (2, 3, 1):
for perm in p_gen.generate(leng):
first = perm[0]
last = perm[1] if len(perm) > 1 else None
middle = perm[2] if len(perm) > 2 else None
email = verify_for_size(first, last, middle, leng, args.verbose)
if email:
email_list.append(email)
if not args.find_all:
return email_list
# Not found, probably works for Amazon :D
return email_list
# Automatically append .com if no tld is
# present in domain.
TLD = [".com", ".org", ".net"]
def correct_for_tld(domain):
if domain == "":
return domain
domain_flag = False
for tld in TLD:
if domain.endswith(tld):
domain_flag = True
break
if not domain_flag:
return domain + TLD[0]
else:
return domain
# Check internet connectivity, using Google
# the standard connection tester :)
google_url = "https://google.com/"
def check_connectivity():
print("Checking connection...")
try:
request.urlopen(google_url)
return True
except urllib.error.URLError:
return False
parser = argparse.ArgumentParser(
description='Find email address given a name and a domain.')
parser.add_argument('--batch', dest='batch', default = False,
action='store_true', help = "Batch mode, process multiple requests")
parser.add_argument('-v', dest='verbose', default = False,
action='store_true', help = "Verbose mode")
parser.add_argument('--all', dest='find_all', default = False,
action='store_true', help = "Find all possible addresses instead \
of stopping at the first successful")
if __name__ == "__main__":
if not check_connectivity():
print("Can't connect to internet, exiting.")
sys.exit(1)
else:
print("Connectivity okay.")
args = parser.parse_args()
loops = 1000 if args.batch else 1
input_list = []
for l in range(loops):
name = input("Name({first} {last}): ")
if name == "":
break
domain = correct_for_tld(input("Domain: "))
input_list.append((domain, name.split()))
prev_domain = ""
for domain, name_parts in input_list:
if len(name_parts) > 2:
first, middle, last = name_parts[0], name_parts[1].lower(), name_parts[2]
else:
first, last = name_parts; middle = None
if domain == "":
domain = prev_domain
try:
email_list = find_email(first.lower(), middle, last.lower(), domain, args)
print()
if len(email_list) > 0:
print("Valid Emails: ", email_list)
else:
print("Not Found")
prev_domain = domain
except ValueError as e:
print("Error: " + str(e))
sys.exit(1)
# Successful return
sys.exit(0)
| [
"urllib.request.Request",
"argparse.ArgumentParser",
"urllib.parse.urlencode",
"urllib.request.urlopen",
"sys.exit"
] | [((5786, 5875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find email address given a name and a domain."""'}), "(description=\n 'Find email address given a name and a domain.')\n", (5809, 5875), False, 'import argparse\n'), ((846, 917), 'urllib.request.Request', 'request.Request', (['EmailVerifier.SITE_URL'], {'data': 'enc_data', 'headers': 'headers'}), '(EmailVerifier.SITE_URL, data=enc_data, headers=headers)\n', (861, 917), False, 'from urllib import request, parse\n'), ((1047, 1067), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (1062, 1067), False, 'from urllib import request, parse\n'), ((1441, 1461), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (1456, 1461), False, 'from urllib import request, parse\n'), ((5688, 5715), 'urllib.request.urlopen', 'request.urlopen', (['google_url'], {}), '(google_url)\n', (5703, 5715), False, 'from urllib import request, parse\n'), ((6390, 6401), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6398, 6401), False, 'import os, sys\n'), ((7272, 7283), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7280, 7283), False, 'import os, sys\n'), ((802, 828), 'urllib.parse.urlencode', 'parse.urlencode', (['post_form'], {}), '(post_form)\n', (817, 828), False, 'from urllib import request, parse\n'), ((7235, 7246), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7243, 7246), False, 'import os, sys\n')] |
import pygame
from random import randint
# Var de trainamento
# Lista de saida [Esquerda, Cima, Direita, Baixo]
listaOutput = [0,0,0,0]
# Lista de entrada [n da rodada ,pontos obtidos
# matriz do tabuleiro] (tabuleiro incluindo
# paredes, corpo da cobra e maca)
listaEntrada = [0]*3
# Nota: a matriz sera quase 4 vezes maior que o
# tabuleiro em si para a cebeca ser centralizada
# e poder enxergar o tabuleiro inteiro sempre
tamanho_tabuleiro_maior = 20 + 19
matriz_do_tabuleiro = [0]*tamanho_tabuleiro_maior
for i in range(20):
matriz_do_tabuleiro[i] = [0]*39
listaEntrada[2] = matriz_do_tabuleiro
# Funcoes para inserir a cabeca e o corpo
def corpoFunc(x, y):
screen.blit(imagemCorpo, (x,y))
def cabecaFunc(x, y):
screen.blit(imagemCabeca, (x,y))
# Funcao para inserir quadrado verde do fundo
def quadradoFundoFunc(x, y):
screen.blit(imagemQuadradoFundo, (x,y))
# Funcao para inserir a maca
def macaFunc(x, y):
screen.blit(imagemMaca, (x,y))
# Funcao para placar
def placarFunc(x,y):
placar = font.render("Pontos: " + str(pontos), True, (255, 255, 255))
screen.blit(placar, (x,y))
# Loop de treino
for c in range (2):
# Inicializa o pygame
pygame.init()
# Cria tela e define tamanho
screen = pygame.display.set_mode((600,600))
# Titulo e icone
pygame.display.set_caption("Jogo da Cobrenha de ThurMP")
icone = pygame.image.load("images/snake icon.png")
fim_de_jogo = False
rodada = 0
# Define fonte
pontos = 0
font = pygame.font.Font('freesansbold.ttf', 32)
# Cria e atualiza fundo
background = pygame.image.load("images/fundo_quadriculado_verde.png")
screen.blit(background,(0,0))
# Load das imagens
imagemCorpo = pygame.image.load("images/corpo.png")
imagemCabeca = pygame.image.load("images/cabeça_direita.png")
imagemQuadradoFundo = pygame.image.load("images/quadrado_do_fundo.png")
imagemMaca = pygame.image.load("images/maca1.png")
# Configuracao inicial da cabeca
cabecaX = 181
cabecaY = 271
#for i in range(39):
# matriz_do_tabuleiro[(-1)//30][(-1)//30] =
# Jogo comeca indo para a direita
cabecaXChange = 30
cabecaYChange = 0
# Listas para manter armazenadas as posicoes do corpo
# (Ja com a configuracao inicial)
listaXCorpo = [91, 121 ,151]
listaYCorpo = [271, 271, 271]
# Configuracao inicial do corpo
cabecaFunc(cabecaX, cabecaY)
corpoFunc(91, 271)
corpoFunc(121, 271)
corpoFunc(151, 271)
matriz_do_tabuleiro[(271-1)//30][(91-1)//30] = -1
matriz_do_tabuleiro[(271-1)//30][(121-1)//30] = -1
matriz_do_tabuleiro[(271-1)//30][(151-1)//30] = -1
# Cria a primeira maca e garante que nao esta na cobra
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
while((macaX in listaXCorpo) and (macaY in listaYCorpo)):
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
macaFunc(macaX, macaY)
matriz_do_tabuleiro[(macaY-1)//30][(macaX-1)//30] = 1
# Var para verificar se a cobra deve crescer ou nao
crescer = False
pygame.time.wait(1000)
# Game Loop
running = True
while running:
# Setando a Lista de entrada
listaEntrada[0] = rodada
listaEntrada[1] = pontos
#listaEntrada[2] = matriz_do_tabuleiro
#listaEntrada[2] = (macaX-1)/30
#listaEntrada[3] = (macaY-1)/30
#listaEntrada[4] = (cabecaX-1)/30
#listaEntrada[5] = (cabecaY-1)/30
# Get dos eventos
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Se alguma seta for apertada
if event.type == pygame.KEYDOWN:
rodada += 1
if not fim_de_jogo and rodada > 1:
# Nota: nao muda de direcao caso ja esteja indo para a desejada
if (event.key == pygame.K_LEFT) and (cabecaXChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_esquerda.png")
cabecaXChange = -30
cabecaYChange = 0
if (event.key == pygame.K_RIGHT) and (cabecaXChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_direita.png")
cabecaXChange = 30
cabecaYChange = 0
if (event.key == pygame.K_DOWN) and (cabecaYChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_baixo.png")
cabecaXChange = 0
cabecaYChange = 30
if (event.key == pygame.K_UP) and (cabecaYChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_cima.png")
cabecaXChange = 0
cabecaYChange = -30
if rodada>0:
# Se a maca for pega, add 1 ponto e cria outra
# Atuliza a posicao da da maca na matriz
if (cabecaX == macaX and cabecaY == macaY):
matriz_do_tabuleiro[(macaY-1)//30][(macaX-1)//30] = 0
pontos += 1
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
matriz_do_tabuleiro[(macaY-1)//30][(macaX-1)//30] = 1
# Garante que a maca nao apareca em cima da cobra
while((macaX in listaXCorpo) and (macaY in listaYCorpo)):
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
macaFunc(macaX, macaY)
# Guarda o valor para ela crescer
crescer = True
# Coloca o corpo logo onde a cabeca sai e
# grava na lista
listaXCorpo.append(cabecaX)
listaYCorpo.append(cabecaY)
matriz_do_tabuleiro[(cabecaY-1)//30][(cabecaX-1)//30] = -1
corpoFunc(cabecaX, cabecaY)
cabecaX += cabecaXChange
cabecaY += cabecaYChange
# Condicao de cobra bater na borda
if (cabecaX < 0) or (cabecaX > 600) or (cabecaY > 600) or (cabecaY < 0):
# Plot do placar
placarFunc(210,270)
cabecaXChange = 0
cabecaYChange = 0
fim_de_jogo = True
# Condicao de cobra bater nela mesma
for i in range(len(listaXCorpo)):
if(cabecaX == listaXCorpo[i]):
if (cabecaY == listaYCorpo[i]):
# Plot do placar
placarFunc(210,270)
cabecaXChange = 0
cabecaYChange = 0
fim_de_jogo = True
# Cobre a ponta da cauda com quadrado verde
# Caso crescer == True faz a cobra crescer 1 espaco
if not crescer:
matriz_do_tabuleiro[(listaYCorpo[0]-1)//30][(listaXCorpo[0]-1)//30] = 0
quadradoFundoFunc(listaXCorpo.pop(0), listaYCorpo.pop(0))
crescer = False
# Coloca a cabeca no espaco seguinte
cabecaFunc(cabecaX, cabecaY)
# Atualiza a tela e gera delay
pygame.display.update()
pygame.time.wait(150)
| [
"random.randint",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.time.wait",
"pygame.display.update",
"pygame.font.Font",
"pygame.image.load",
"pygame.display.set_caption"
] | [((1193, 1206), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1204, 1206), False, 'import pygame\n'), ((1254, 1289), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 600)'], {}), '((600, 600))\n', (1277, 1289), False, 'import pygame\n'), ((1315, 1371), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Jogo da Cobrenha de ThurMP"""'], {}), "('Jogo da Cobrenha de ThurMP')\n", (1341, 1371), False, 'import pygame\n'), ((1384, 1426), 'pygame.image.load', 'pygame.image.load', (['"""images/snake icon.png"""'], {}), "('images/snake icon.png')\n", (1401, 1426), False, 'import pygame\n'), ((1512, 1552), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (1528, 1552), False, 'import pygame\n'), ((1599, 1655), 'pygame.image.load', 'pygame.image.load', (['"""images/fundo_quadriculado_verde.png"""'], {}), "('images/fundo_quadriculado_verde.png')\n", (1616, 1655), False, 'import pygame\n'), ((1732, 1769), 'pygame.image.load', 'pygame.image.load', (['"""images/corpo.png"""'], {}), "('images/corpo.png')\n", (1749, 1769), False, 'import pygame\n'), ((1789, 1835), 'pygame.image.load', 'pygame.image.load', (['"""images/cabeça_direita.png"""'], {}), "('images/cabeça_direita.png')\n", (1806, 1835), False, 'import pygame\n'), ((1862, 1911), 'pygame.image.load', 'pygame.image.load', (['"""images/quadrado_do_fundo.png"""'], {}), "('images/quadrado_do_fundo.png')\n", (1879, 1911), False, 'import pygame\n'), ((1929, 1966), 'pygame.image.load', 'pygame.image.load', (['"""images/maca1.png"""'], {}), "('images/maca1.png')\n", (1946, 1966), False, 'import pygame\n'), ((3103, 3125), 'pygame.time.wait', 'pygame.time.wait', (['(1000)'], {}), '(1000)\n', (3119, 3125), False, 'import pygame\n'), ((3562, 3580), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3578, 3580), False, 'import pygame\n'), ((7272, 7295), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7293, 7295), False, 'import pygame\n'), ((7304, 7325), 'pygame.time.wait', 'pygame.time.wait', (['(150)'], {}), '(150)\n', (7320, 7325), False, 'import pygame\n'), ((2748, 2762), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (2755, 2762), False, 'from random import randint\n'), ((2781, 2795), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (2788, 2795), False, 'from random import randint\n'), ((2880, 2894), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (2887, 2894), False, 'from random import randint\n'), ((2917, 2931), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (2924, 2931), False, 'from random import randint\n'), ((4026, 4073), 'pygame.image.load', 'pygame.image.load', (['"""images/cabeça_esquerda.png"""'], {}), "('images/cabeça_esquerda.png')\n", (4043, 4073), False, 'import pygame\n'), ((4279, 4325), 'pygame.image.load', 'pygame.image.load', (['"""images/cabeça_direita.png"""'], {}), "('images/cabeça_direita.png')\n", (4296, 4325), False, 'import pygame\n'), ((4529, 4573), 'pygame.image.load', 'pygame.image.load', (['"""images/cabeça_baixo.png"""'], {}), "('images/cabeça_baixo.png')\n", (4546, 4573), False, 'import pygame\n'), ((4775, 4818), 'pygame.image.load', 'pygame.image.load', (['"""images/cabeça_cima.png"""'], {}), "('images/cabeça_cima.png')\n", (4792, 4818), False, 'import pygame\n'), ((5218, 5232), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (5225, 5232), False, 'from random import randint\n'), ((5263, 5277), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (5270, 5277), False, 'from random import randint\n'), ((5523, 5537), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (5530, 5537), False, 'from random import randint\n'), ((5572, 5586), 'random.randint', 'randint', (['(0)', '(19)'], {}), '(0, 19)\n', (5579, 5586), False, 'from random import randint\n')] |
from typing import List, Tuple
#from flare.forms.bones.base import ReadFromClientErrorSeverity
from flare.icons import SvgIcon
from flare.i18n import translate
from flare import html5
def collectBoneErrors(errorList, currentKey,boneStructure):
'''
severity:
NotSet = 0
InvalidatesOther = 1
Empty = 2
Invalid = 3
'''
boneErrors = []
for error in errorList or []:
if error["fieldPath"] and error["fieldPath"][0] == currentKey:
isError = False
if (error["severity"] == 0 or error["severity"] == 2) and boneStructure["required"]:
isError = True
elif error["severity"] ==3:
isError = True
# ToDO Field dependency!
if isError:
thisError = error.copy()
thisError["fieldPath"] = error["fieldPath"][1:]
boneErrors.append(thisError)
return boneErrors
class ToolTipError(html5.Div):
"""
Small utility class for providing tooltips
"""
def __init__(self, shortText="", longText="", *args, **kwargs):
super( ToolTipError, self ).__init__( *args, **kwargs )
self["class"] = "vi-tooltip msg msg--error is-active is-open"
self.sinkEvent("onClick")
self.prependChild( SvgIcon( "icon-arrow-right", title = shortText ) )
#language=HTMl
self.fromHTML("""
<div class="msg-content" [name]="tooltipMsg">
<h2 class="msg-headline" [name]="tooltipHeadline"></h2>
<div class="msg-descr" [name]="tooltipDescr"></div>
</div>
""")
self.tooltipHeadline.element.innerHTML = translate("vi.tooltip.error")
self.tooltipDescr.element.innerHTML = longText.replace( "\n", "<br />" )
def onClick(self, event):
self.toggleClass("is-open")
def _setDisabled(self, disabled):
return
def _getDisabled(self):
return False
#Not used
def buildBoneErrors(errorList):
boneErrors = {}
for error in errorList:
thisError = error.copy()
thisError[ "fieldPath" ] = error[ "fieldPath" ][ 1: ]
if error[ "fieldPath" ] and error[ "fieldPath" ][ 0 ] not in boneErrors:
boneErrors.update({error[ "fieldPath" ][ 1 ]:[thisError]})
else:
boneErrors[error[ "fieldPath" ][ 1 ]].append(thisError)
return boneErrors
def checkErrors(bone) -> Tuple[bool, List[str]]:
'''
first return value is a shortcut to test if bone is valid or not
second returns a list of fields which are invalid through this bone
'''
errors = bone["errors"]
#no errors for this bone
if not errors:
return False, list()
invalidatedFields = list()
isInvalid = True
for error in errors:
if (
(error["severity"] == ReadFromClientErrorSeverity.Empty and bone["required"]) or
(error["severity"] == ReadFromClientErrorSeverity.InvalidatesOther)
):
if error["invalidatedFields"]:
invalidatedFields.extend(error["invalidatedFields"])
# We found only warnings
if not invalidatedFields:
return False, list()
return isInvalid, invalidatedFields
| [
"flare.i18n.translate",
"flare.icons.SvgIcon"
] | [((1439, 1468), 'flare.i18n.translate', 'translate', (['"""vi.tooltip.error"""'], {}), "('vi.tooltip.error')\n", (1448, 1468), False, 'from flare.i18n import translate\n'), ((1124, 1168), 'flare.icons.SvgIcon', 'SvgIcon', (['"""icon-arrow-right"""'], {'title': 'shortText'}), "('icon-arrow-right', title=shortText)\n", (1131, 1168), False, 'from flare.icons import SvgIcon\n')] |
import cv2
import numpy as numpy
import tensorflow as tflow
from utils import label_map_util
#from ConeDetection import *
from cone_img_processing2 import *
import os
# Set threshold for detection of cone for object detector
threshold_cone = 0.5
#Set path to check point and label map
#PATH_TO_CKPT = './frozen_orange_net.pb'
PATH_TO_CKPT = './frozen_weights/frozen_cone_graph_modified.pb'
#PATH_TO_CKPT = './frozen_weights/mobilenet_v2_0.75_224_frozen.pb'
PATH_TO_LABELS = './test_scripts/label_map.pbtxt'
#Define no, of classes
NUM_CLASSES = 1 #only one class, i.e. cone
## Load a (frozen) Tensorflow model into memory.
detection_graph = tflow.Graph()
with detection_graph.as_default():
od_graph_def = tflow.GraphDef()
with tflow.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tflow.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
gpu_options = tflow.GPUOptions(per_process_gpu_memory_fraction=0.4)
#config=tflow.ConfigProto(gpu_options=gpu_options)
def mainLoop():
# Try the following videos:
# 20180619_175221224 # shade to brightness
# 20180619_180755490 # towards sun
# 20180619_180515860 # away from sun
cap = cv2.VideoCapture('./test_videos/20180619_175221224.mp4')
#cap = cv2.VideoCapture('./test_videos/Formula Student Spain 2015 Endurance- DHBW Engineering with the eSleek15.mp4')
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
count = 0
img_number = 1000
with detection_graph.as_default():
#with tflow.Session(graph=detection_graph) as sess:
with tflow.Session(graph=detection_graph, config=tflow.ConfigProto(gpu_options=gpu_options)) as sess:
while count < frameCount:
ret, image_np = cap.read()
if ret == True:
count = count + 1
# image_np = cv2.resize(processFrame.image, (0,0), fx=0.5, fy=0.5)
#image_np = processFrame.image
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
# Definition of boxes [ymin, xmin, ymax, xmax]
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
width = image_np.shape[1]
height = image_np.shape[0]
# width, height = cv2.GetSize(image_np)
output_img = image_np.copy()
for i in range(boxes.shape[0]):
if np.all(boxes[i] == 0) or scores[i] < threshold_cone:
continue
b = boxes[i]
box_width = np.abs(float(b[3])-float(b[1]))
box_height = np.abs(float(b[2])-float(b[0]))
x = int(b[1] * width)
y = int(b[0] * height)
h = int(box_height * height)
w = int(box_width * width)
candidate = image_np[y:y+h, x:x+w]
# if count % (2*fps) == 0:
# # Save the image (optional)
# cv2.imwrite('./test_videos/cone_samples/' + str(img_number) + '.jpg', candidate)
# img_number = img_number + 1
y = y + 1
z = 0
result = detectCone1(candidate)
# print(result)
if result == 0:
print("Yellow Cone")
cv2.rectangle(output_img, (int(b[1] * width),int(b[0] * height)), (x+w,y+h), (0, 255, 255), 7)
cv2.putText(output_img, 'yellow cone', (int(b[1] * width),int(b[0] * height)-30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(output_img, str(round(z,1))+" m", (int(b[1] * width),int(b[0] * height)-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
if result == 1:
print("Blue Cone")
cv2.rectangle(output_img, (int(b[1] * width),int(b[0] * height)), (x+w,y+h), (255, 0, 0), 7)
cv2.putText(output_img, 'blue cone', (int(b[1] * width),int(b[0] * height)-30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(output_img, str(round(z,1))+" m", (int(b[1] * width),int(b[0] * height)-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
if result == 2:
print("Orange Cone")
cv2.rectangle(output_img, (int(b[1] * width),int(b[0] * height)), (x+w,y+h), (0,165,255), 7)
cv2.putText(output_img, 'orange cone', (int(b[1] * width),int(b[0] * height)-30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(output_img, str(round(z,1))+" m", (int(b[1] * width),int(b[0] * height)-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('object detection', cv2.resize(output_img, (image_np.shape[1],image_np.shape[0])))
cv2.waitKey(1)
cv2.destroyAllWindows()
if __name__ == '__main__':
mainLoop()
| [
"utils.label_map_util.create_category_index",
"cv2.destroyAllWindows",
"cv2.waitKey",
"utils.label_map_util.load_labelmap",
"utils.label_map_util.convert_label_map_to_categories",
"cv2.VideoCapture",
"tensorflow.ConfigProto",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"tensorflow.GPUOptions",
"cv2.resize"
] | [((655, 668), 'tensorflow.Graph', 'tflow.Graph', ([], {}), '()\n', (666, 668), True, 'import tensorflow as tflow\n'), ((939, 983), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (967, 983), False, 'from utils import label_map_util\n'), ((997, 1111), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (1043, 1111), False, 'from utils import label_map_util\n'), ((1124, 1172), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1160, 1172), False, 'from utils import label_map_util\n'), ((1188, 1241), 'tensorflow.GPUOptions', 'tflow.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.4)'}), '(per_process_gpu_memory_fraction=0.4)\n', (1204, 1241), True, 'import tensorflow as tflow\n'), ((721, 737), 'tensorflow.GraphDef', 'tflow.GraphDef', ([], {}), '()\n', (735, 737), True, 'import tensorflow as tflow\n'), ((1489, 1545), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./test_videos/20180619_175221224.mp4"""'], {}), "('./test_videos/20180619_175221224.mp4')\n", (1505, 1545), False, 'import cv2\n'), ((6792, 6815), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6813, 6815), False, 'import cv2\n'), ((745, 782), 'tensorflow.gfile.GFile', 'tflow.gfile.GFile', (['PATH_TO_CKPT', '"""rb"""'], {}), "(PATH_TO_CKPT, 'rb')\n", (762, 782), True, 'import tensorflow as tflow\n'), ((880, 925), 'tensorflow.import_graph_def', 'tflow.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (902, 925), True, 'import tensorflow as tflow\n'), ((1958, 2000), 'tensorflow.ConfigProto', 'tflow.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1975, 2000), True, 'import tensorflow as tflow\n'), ((6752, 6766), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6763, 6766), False, 'import cv2\n'), ((6669, 6731), 'cv2.resize', 'cv2.resize', (['output_img', '(image_np.shape[1], image_np.shape[0])'], {}), '(output_img, (image_np.shape[1], image_np.shape[0]))\n', (6679, 6731), False, 'import cv2\n')] |
from rest_framework.test import APITestCase
from django.shortcuts import reverse
from rest_framework import status
from core.models import ProductsTbl, PurchaseTransactionTbl
# Create your tests here.
class TestProductViews(APITestCase):
def setUp(self):
self.product_url = reverse('products-list')
self.product_data = {
'product_id': 10215,
'name': '<NAME>',
'quantity': 105,
'unit_price': 100.00
}
def test_cannot_create_products(self):
res = self.client.post(self.product_url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_product(self):
res = self.client.post(self.product_url, self.product_data)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(res.data['product_id'],
self.product_data['product_id'])
self.assertEqual(res.data['name'], self.product_data['name'])
self.assertEqual(res.data['quantity'], self.product_data['quantity'])
class TestPurchaseViews(APITestCase):
def setUp(self):
self.purchase_url = reverse('purchases-list')
self.purchase_data = {
'product_id': 10215,
'purchase_id': '8d7qdouiabnsdodAY9DQJp09',
'purchased_quantity': 90,
}
self.product = ProductsTbl.objects.create(
product_id=10216,
name='<NAME>',
quantity=100,
unit_price=100.00
)
self.purchase = PurchaseTransactionTbl.objects.create(
product=self.product,
purchase_id='d6asd65asd654as5d4',
purchased_quantity=75
)
self.purchase_detail_url = reverse(
'purchases-detail', kwargs={'pk': self.purchase.pk})
def test_cannot_create_purchase(self):
res = self.client.post(self.purchase_url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_create_purchase_wrong_data(self):
res = self.client.post(self.purchase_url, self.purchase_data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_purchase_with_data(self):
product = ProductsTbl.objects.create(
product_id=10215,
name='<NAME>',
quantity=105,
unit_price=100.00
)
qty = product.quantity
res = self.client.post(self.purchase_url, self.purchase_data)
new_qty = ProductsTbl.objects.get(
product_id=res.data['product_id']).quantity
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(res.data['product_id'],
self.purchase_data['product_id'])
self.assertEqual(res.data['purchase_id'],
self.purchase_data['purchase_id'])
self.assertEqual(res.data['purchased_quantity'],
self.purchase_data['purchased_quantity'])
self.assertEqual(
new_qty, (qty - self.purchase_data['purchased_quantity']))
def test_cannot_update_purchase(self):
res = self.client.put(self.purchase_detail_url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_purchase(self):
data = {'purchased_quantity': 50}
qty = self.product.quantity
old_qty = self.purchase.purchased_quantity
new_qty = data['purchased_quantity']
qty = qty - (new_qty - old_qty)
res = self.client.put(self.purchase_detail_url, data)
check_qty = ProductsTbl.objects.get(id=self.product.pk).quantity
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['purchased_quantity'],
data['purchased_quantity'])
self.assertEqual(qty, check_qty)
def test_cannot_delete_purchase(self):
res = self.client.delete(
reverse('purchases-detail', kwargs={'pk': 100}))
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_purchase(self):
qty = self.product.quantity
purchase_quantity = self.purchase.purchased_quantity
res = self.client.delete(self.purchase_detail_url)
new_qty = ProductsTbl.objects.get(id=self.product.id).quantity
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(new_qty, (qty + purchase_quantity))
class TestRefillView(APITestCase):
def setUp(self):
self.product = ProductsTbl.objects.create(
product_id=10001,
name='<NAME>',
quantity=100,
unit_price=2500
)
self.url = reverse('refill-list')
def test_cannot_refill(self):
res = self.client.post(self.url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_refill_wrong_data(self):
data = {
'product_id':1000,
'refill_count':100,
}
res = self.client.post(self.url, data)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_refill(self):
data ={
'product_id':self.product.product_id,
'refill_count':1000
}
quantity = self.product.quantity
new_qty = quantity+data['refill_count']
res = self.client.post(self.url,data)
check_qty = ProductsTbl.objects.get(id=self.product.pk).quantity
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(check_qty,new_qty)
| [
"core.models.PurchaseTransactionTbl.objects.create",
"django.shortcuts.reverse",
"core.models.ProductsTbl.objects.create",
"core.models.ProductsTbl.objects.get"
] | [((288, 312), 'django.shortcuts.reverse', 'reverse', (['"""products-list"""'], {}), "('products-list')\n", (295, 312), False, 'from django.shortcuts import reverse\n'), ((1157, 1182), 'django.shortcuts.reverse', 'reverse', (['"""purchases-list"""'], {}), "('purchases-list')\n", (1164, 1182), False, 'from django.shortcuts import reverse\n'), ((1373, 1468), 'core.models.ProductsTbl.objects.create', 'ProductsTbl.objects.create', ([], {'product_id': '(10216)', 'name': '"""<NAME>"""', 'quantity': '(100)', 'unit_price': '(100.0)'}), "(product_id=10216, name='<NAME>', quantity=100,\n unit_price=100.0)\n", (1399, 1468), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((1548, 1669), 'core.models.PurchaseTransactionTbl.objects.create', 'PurchaseTransactionTbl.objects.create', ([], {'product': 'self.product', 'purchase_id': '"""d6asd65asd654as5d4"""', 'purchased_quantity': '(75)'}), "(product=self.product, purchase_id=\n 'd6asd65asd654as5d4', purchased_quantity=75)\n", (1585, 1669), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((1746, 1806), 'django.shortcuts.reverse', 'reverse', (['"""purchases-detail"""'], {'kwargs': "{'pk': self.purchase.pk}"}), "('purchases-detail', kwargs={'pk': self.purchase.pk})\n", (1753, 1806), False, 'from django.shortcuts import reverse\n'), ((2246, 2341), 'core.models.ProductsTbl.objects.create', 'ProductsTbl.objects.create', ([], {'product_id': '(10215)', 'name': '"""<NAME>"""', 'quantity': '(105)', 'unit_price': '(100.0)'}), "(product_id=10215, name='<NAME>', quantity=105,\n unit_price=100.0)\n", (2272, 2341), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((4562, 4656), 'core.models.ProductsTbl.objects.create', 'ProductsTbl.objects.create', ([], {'product_id': '(10001)', 'name': '"""<NAME>"""', 'quantity': '(100)', 'unit_price': '(2500)'}), "(product_id=10001, name='<NAME>', quantity=100,\n unit_price=2500)\n", (4588, 4656), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((4714, 4736), 'django.shortcuts.reverse', 'reverse', (['"""refill-list"""'], {}), "('refill-list')\n", (4721, 4736), False, 'from django.shortcuts import reverse\n'), ((2516, 2574), 'core.models.ProductsTbl.objects.get', 'ProductsTbl.objects.get', ([], {'product_id': "res.data['product_id']"}), "(product_id=res.data['product_id'])\n", (2539, 2574), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((3610, 3653), 'core.models.ProductsTbl.objects.get', 'ProductsTbl.objects.get', ([], {'id': 'self.product.pk'}), '(id=self.product.pk)\n', (3633, 3653), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((3966, 4013), 'django.shortcuts.reverse', 'reverse', (['"""purchases-detail"""'], {'kwargs': "{'pk': 100}"}), "('purchases-detail', kwargs={'pk': 100})\n", (3973, 4013), False, 'from django.shortcuts import reverse\n'), ((4296, 4339), 'core.models.ProductsTbl.objects.get', 'ProductsTbl.objects.get', ([], {'id': 'self.product.id'}), '(id=self.product.id)\n', (4319, 4339), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n'), ((5411, 5454), 'core.models.ProductsTbl.objects.get', 'ProductsTbl.objects.get', ([], {'id': 'self.product.pk'}), '(id=self.product.pk)\n', (5434, 5454), False, 'from core.models import ProductsTbl, PurchaseTransactionTbl\n')] |
import cv2
from PIL import Image
import numpy as np
import constants
import os
import math
import matplotlib.pyplot as plt
import time
def hammingDistance(v1, v2):
t = 0
for i in range(len(v1)):
if v1[i] != v2[i]:
t += 1
return t
# read thresholds from thresholds.txt and then store them into thresholds list
thresholds = []
with open('./thresholds.txt', 'r') as f:
threshold = f.readline()
while threshold:
threshold = threshold.rstrip("\n")
thresholds.append(float(threshold))
threshold = f.readline()
f.close()
# read barcode and image location from barcodes.txt file
imageLocations = []
barcodes = []
with open("barcodes.txt", 'r') as f:
line = f.readline()
while line:
line = line.rstrip("\n")
line = line.split(",")
imageLocation = line.pop()
barcode = []
for bit in line:
barcode.append(int(bit))
imageLocations.append(imageLocation)
barcodes.append(barcode)
line = f.readline()
f.close()
def create_barcode(imagePath):
barcode = []
opcv = cv2.imread(imagePath, 0) # read image file as cv2 image
# ret2, th2 = cv2.threshold(opcv, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # apply threshold it just makes pixel values either black or white
img = Image.fromarray(opcv) # create image from thresholded 2d image array
barcode = []
degree = constants.MIN_DEGREE
while degree < constants.MAX_DEGREE: # loop through MIN_DEGREE to MAX_DEGREE by STEP_DEGREE
currentProjectionThreshold = int(degree / constants.STEP_DEGREE) # find the appropriate threshold index
rotated_image = img.rotate(degree) # rotate the image
image2d = np.array(rotated_image) # get 2d representation of the rotated image
for row in image2d: # loop through each row in thresholded image
row_sum = 0 # initialize row pixel counter
for pixel in row: # loop through each pixel in the row
pixel = pixel / 255 # since we have either 0 or 255 as a pixel value divide this number by 255 to get 0 or 1 which is there is pixel or there is not
row_sum+=pixel # sum of pixels across a single row
# thresholds the sum of the row to 1 or 0 based on calculated threshold
if row_sum >= thresholds[currentProjectionThreshold]:
barcode.append(1)
else:
barcode.append(0)
degree += constants.STEP_DEGREE
return barcode
class CalculateAccuracyHitRatio:
def __init__(self, barcodes, imageLocations):
self.barcodes = barcodes
self.imageLocations = imageLocations
def calculateAccuracy(self):
accuracy = lambda x : x / 100
successCount = 0
for currDigit in range(constants.NUMBER_OF_DIGITS): # loop through 0 to NUMBER_OF_DIGITS-1
directory = r'./MNIST_DS/{}'.format(currDigit) # digit folder path
for imageName in os.listdir(directory): # loop thorugh every file in the directory
print("Checking image {}".format(os.path.join(directory, imageName)))
searchBarcode = create_barcode(os.path.join(directory, imageName))
s, hd, resultImgLoc, resultImgBarcode = self.checkSuccess(searchBarcode, currDigit)
print("\tHamming Distance: {}\n\tResult Image: {}".format(hd, resultImgLoc))
# time.sleep(0.5/4)
if s:
successCount += 1
hitRatio = accuracy(successCount)
return hitRatio
def checkSuccess(self, searchBarcode, searchDigitGroup):
success = False # variable for holding the success information
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1 # Minimum Hamming Distance. It is (maxiumum hamming distance + 1) by default
minBarcode = None # barcode that corresponds to the minimum hamming distance
imageLoc = None # result image location
for i, barcode in enumerate(self.barcodes): # loop through every barcode in the barcodes list
currentHMD = hammingDistance( barcode, searchBarcode) # check each bit in both barcodes and calculate how many of these not same
if currentHMD == 0: # hamming distance 0 means the barcodes are identical which means they are the same image
continue # skip
elif currentHMD < minHMD: # if the current calculated hamming distance is less than the minimum hamming distance
minHMD = currentHMD # then set minimum hamming distance to current calculated hamming distance
minBarcode = barcode # set the current barcode as
imageLoc = self.imageLocations[i]
resultDigitGroup = imageLoc.split("_", 1)[0]
if int(resultDigitGroup) == int(searchDigitGroup):
success = True
return success, minHMD, imageLoc, minBarcode
class SearchSimilar:
def __init__(self):
self.digitSelectMenu()
def findSimilar(self, inputBarcode):
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1
print(minHMD)
minBarcode = None
imageLoc = None
for i, barcode in enumerate(barcodes):
print(imageLocations[i])
currentHMD = hammingDistance( barcode, inputBarcode)
print(currentHMD)
if currentHMD == 0:
continue
elif currentHMD < minHMD:
minHMD = currentHMD
minBarcode = barcode
imageLoc = imageLocations[i]
return minHMD, minBarcode, imageLoc
def digitSelectMenu(self):
digitFolder = int(input("enter a digit (0 - 9): "))
while digitFolder >= 0 and digitFolder <= 9:
directory = r'.\MNIST_DS\{}'.format(digitFolder)
for c, imageName in enumerate(os.listdir(directory)):
print(c , " - ", imageName)
selectImage = int(input("select image from above list: "))
selectedImagePath = os.path.join(directory, os.listdir(directory)[selectImage])
print(selectedImagePath)
selectedImageBarcode = create_barcode(selectedImagePath)
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1
print(minHMD)
minBarcode = None
imageLoc = None
for i, barcode in enumerate(barcodes):
print(imageLocations[i])
currentHMD = hammingDistance( barcode,selectedImageBarcode)
print(currentHMD)
if currentHMD == 0:
continue
elif currentHMD < minHMD:
minHMD = currentHMD
minBarcode = barcode
imageLoc = imageLocations[i]
print("Result:")
print("\tHD: {}".format(minHMD))
print("\tImage Location: {}".format(imageLoc))
print("\tBarcode: {}".format(minBarcode))
fig = plt.figure(figsize=(10, 7))
fig.suptitle("Hamming Distance: {}".format(minHMD))
rows, columns = 2, 2
selectedImage = cv2.imread(selectedImagePath)
resultImageRelativePath = imageLoc.split("_", 1)
resultImagePath = os.path.join(r".\MNIST_DS", r"{}\{}".format(resultImageRelativePath[0], resultImageRelativePath[1]))
resultImage = cv2.imread(resultImagePath)
from create_barcode_image import BarcodeImageGenerator as big
big.generate_barcode_image(selectedImageBarcode, r".\temp\searchImage.png")
big.generate_barcode_image(minBarcode, r".\temp\resultImage.png")
searchBarcodeImage = cv2.imread(r".\temp\searchImage.png")
resultBarcodeImage = cv2.imread(r".\temp\resultImage.png")
fig.add_subplot(rows, columns, 1)
plt.imshow(selectedImage)
plt.axis("off")
plt.title("Search Image")
fig.add_subplot(rows, columns, 2)
plt.imshow(resultImage)
plt.axis("off")
plt.title("Result Image")
fig.add_subplot(rows, columns, 3)
plt.imshow(searchBarcodeImage)
plt.axis("off")
plt.title("Search Barcode")
fig.add_subplot(rows, columns, 4)
plt.imshow(resultBarcodeImage)
plt.axis("off")
plt.title("Result Barcode")
plt.show()
digitFolder = int(input("enter a digit (0 - 9): "))
def showAllResults(self):
fig = plt.figure(figsize=(16,100), dpi=100)
rows, cols = constants.NUMBER_OF_DIGITS*constants.NUMBER_IMAGES, 2
for currDigit in range(constants.NUMBER_OF_DIGITS): # loop through 0 to NUMBER_OF_DIGITS-1
directory = r'./MNIST_DS/{}'.format(currDigit) # digit folder path
for i, imageName in zip((i for i in range(1, 20, 2)), os.listdir(directory)): # loop thorugh every file in the directory
selectedImagePath = os.path.join(directory, imageName)
print("Checking image {}".format(os.path.join(directory, imageName)))
searchBarcode = create_barcode(os.path.join(directory, imageName))
hmd, resultBarcode, resultImgLoc = self.findSimilar(searchBarcode)
selectedImage = cv2.imread(selectedImagePath)
resultImageRelativePath = resultImgLoc.split("_", 1)
resultImagePath = os.path.join(r".\MNIST_DS", r"{}\{}".format(resultImageRelativePath[0], resultImageRelativePath[1]))
resultImage = cv2.imread(resultImagePath)
sii = currDigit*20+i
fig.add_subplot(rows, cols, sii)
plt.imshow(selectedImage)
plt.axis("off")
plt.title(selectedImagePath, fontsize=9, y=0.90)
fig.add_subplot(rows, cols, sii+1)
plt.imshow(resultImage)
plt.axis("off")
plt.title(resultImagePath, fontsize=9, y=0.90)
return fig
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
class ScrollableWindow(QtWidgets.QMainWindow):
def __init__(self, fig):
self.qapp = QtWidgets.QApplication([])
QtWidgets.QMainWindow.__init__(self)
self.widget = QtWidgets.QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QtWidgets.QVBoxLayout())
self.widget.layout().setContentsMargins(0,0,0,0)
self.widget.layout().setSpacing(0)
self.fig = fig
self.canvas = FigureCanvas(self.fig)
self.canvas.draw()
self.scroll = QtWidgets.QScrollArea(self.widget)
self.scroll.setWidget(self.canvas)
self.nav = NavigationToolbar(self.canvas, self.widget)
self.widget.layout().addWidget(self.nav)
self.widget.layout().addWidget(self.scroll)
self.show()
exit(self.qapp.exec_())
if __name__ == "__main__":
print("Search Menu")
print("Calculate Accuracy Hit Ratio")
print("Show All Results at Once")
input("Yes I have read the above notes. Press Enter to continue...")
print("\n\n\nEnter a number between 0 and 9 to search image")
print("Enter a number smaller than 0 or greater than 9 to exit the search menu")
print("Once you exit Search Menu you will get Calculate Accuracy Hit Ratio ")
input("Yes I have read the above notes. Press Enter to continue...")
si = SearchSimilar() # search menu
print("\n\n\nCalculating accuracy hit ratio...")
cahr = CalculateAccuracyHitRatio(barcodes, imageLocations) # accuracy calculator
print("Accuracy is {}".format(cahr.calculateAccuracy())) # calculate and display the accuracy
input("Yes I have read the above notes. Press Enter to DISPLAY ALL THE RESULTS at Once...")
print("\n\n\nSearching all the images in the dataset and finding results...")
print("Once you get the window maximize the window and scrolldown to see the results")
input("Yes I have read the above notes. Press Enter to continue...")
fig = si.showAllResults()
a = ScrollableWindow(fig) | [
"matplotlib.pyplot.title",
"PyQt5.QtWidgets.QMainWindow.__init__",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.pyplot.figure",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication",
"os.path.join",
"PyQt5.QtWidgets.QWidget",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"create_barcode_image.BarcodeImageGenerator.generate_barcode_image",
"matplotlib.use",
"os.listdir",
"matplotlib.pyplot.axis",
"PyQt5.QtWidgets.QScrollArea",
"cv2.imread",
"numpy.array",
"PIL.Image.fromarray"
] | [((10253, 10277), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (10267, 10277), False, 'import matplotlib\n'), ((1118, 1142), 'cv2.imread', 'cv2.imread', (['imagePath', '(0)'], {}), '(imagePath, 0)\n', (1128, 1142), False, 'import cv2\n'), ((1343, 1364), 'PIL.Image.fromarray', 'Image.fromarray', (['opcv'], {}), '(opcv)\n', (1358, 1364), False, 'from PIL import Image\n'), ((1754, 1777), 'numpy.array', 'np.array', (['rotated_image'], {}), '(rotated_image)\n', (1762, 1777), True, 'import numpy as np\n'), ((8683, 8721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 100)', 'dpi': '(100)'}), '(figsize=(16, 100), dpi=100)\n', (8693, 8721), True, 'import matplotlib.pyplot as plt\n'), ((10606, 10632), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (10628, 10632), False, 'from PyQt5 import QtWidgets\n'), ((10642, 10678), 'PyQt5.QtWidgets.QMainWindow.__init__', 'QtWidgets.QMainWindow.__init__', (['self'], {}), '(self)\n', (10672, 10678), False, 'from PyQt5 import QtWidgets\n'), ((10701, 10720), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (10718, 10720), False, 'from PyQt5 import QtWidgets\n'), ((10965, 10987), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (10977, 10987), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((11037, 11071), 'PyQt5.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', (['self.widget'], {}), '(self.widget)\n', (11058, 11071), False, 'from PyQt5 import QtWidgets\n'), ((11135, 11178), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self.widget'], {}), '(self.canvas, self.widget)\n', (11152, 11178), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((3046, 3067), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3056, 3067), False, 'import os\n'), ((7107, 7134), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (7117, 7134), True, 'import matplotlib.pyplot as plt\n'), ((7261, 7290), 'cv2.imread', 'cv2.imread', (['selectedImagePath'], {}), '(selectedImagePath)\n', (7271, 7290), False, 'import cv2\n'), ((7509, 7536), 'cv2.imread', 'cv2.imread', (['resultImagePath'], {}), '(resultImagePath)\n', (7519, 7536), False, 'import cv2\n'), ((7625, 7701), 'create_barcode_image.BarcodeImageGenerator.generate_barcode_image', 'big.generate_barcode_image', (['selectedImageBarcode', '""".\\\\temp\\\\searchImage.png"""'], {}), "(selectedImageBarcode, '.\\\\temp\\\\searchImage.png')\n", (7651, 7701), True, 'from create_barcode_image import BarcodeImageGenerator as big\n'), ((7713, 7779), 'create_barcode_image.BarcodeImageGenerator.generate_barcode_image', 'big.generate_barcode_image', (['minBarcode', '""".\\\\temp\\\\resultImage.png"""'], {}), "(minBarcode, '.\\\\temp\\\\resultImage.png')\n", (7739, 7779), True, 'from create_barcode_image import BarcodeImageGenerator as big\n'), ((7813, 7851), 'cv2.imread', 'cv2.imread', (['""".\\\\temp\\\\searchImage.png"""'], {}), "('.\\\\temp\\\\searchImage.png')\n", (7823, 7851), False, 'import cv2\n'), ((7884, 7922), 'cv2.imread', 'cv2.imread', (['""".\\\\temp\\\\resultImage.png"""'], {}), "('.\\\\temp\\\\resultImage.png')\n", (7894, 7922), False, 'import cv2\n'), ((7982, 8007), 'matplotlib.pyplot.imshow', 'plt.imshow', (['selectedImage'], {}), '(selectedImage)\n', (7992, 8007), True, 'import matplotlib.pyplot as plt\n'), ((8020, 8035), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8028, 8035), True, 'import matplotlib.pyplot as plt\n'), ((8048, 8073), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Image"""'], {}), "('Search Image')\n", (8057, 8073), True, 'import matplotlib.pyplot as plt\n'), ((8134, 8157), 'matplotlib.pyplot.imshow', 'plt.imshow', (['resultImage'], {}), '(resultImage)\n', (8144, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8170, 8185), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8178, 8185), True, 'import matplotlib.pyplot as plt\n'), ((8198, 8223), 'matplotlib.pyplot.title', 'plt.title', (['"""Result Image"""'], {}), "('Result Image')\n", (8207, 8223), True, 'import matplotlib.pyplot as plt\n'), ((8284, 8314), 'matplotlib.pyplot.imshow', 'plt.imshow', (['searchBarcodeImage'], {}), '(searchBarcodeImage)\n', (8294, 8314), True, 'import matplotlib.pyplot as plt\n'), ((8327, 8342), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8335, 8342), True, 'import matplotlib.pyplot as plt\n'), ((8355, 8382), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Barcode"""'], {}), "('Search Barcode')\n", (8364, 8382), True, 'import matplotlib.pyplot as plt\n'), ((8443, 8473), 'matplotlib.pyplot.imshow', 'plt.imshow', (['resultBarcodeImage'], {}), '(resultBarcodeImage)\n', (8453, 8473), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8501), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8494, 8501), True, 'import matplotlib.pyplot as plt\n'), ((8514, 8541), 'matplotlib.pyplot.title', 'plt.title', (['"""Result Barcode"""'], {}), "('Result Barcode')\n", (8523, 8541), True, 'import matplotlib.pyplot as plt\n'), ((8555, 8565), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8563, 8565), True, 'import matplotlib.pyplot as plt\n'), ((10794, 10817), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (10815, 10817), False, 'from PyQt5 import QtWidgets\n'), ((5949, 5970), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5959, 5970), False, 'import os\n'), ((9042, 9063), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (9052, 9063), False, 'import os\n'), ((9145, 9179), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (9157, 9179), False, 'import os\n'), ((9464, 9493), 'cv2.imread', 'cv2.imread', (['selectedImagePath'], {}), '(selectedImagePath)\n', (9474, 9493), False, 'import cv2\n'), ((9728, 9755), 'cv2.imread', 'cv2.imread', (['resultImagePath'], {}), '(resultImagePath)\n', (9738, 9755), False, 'import cv2\n'), ((9859, 9884), 'matplotlib.pyplot.imshow', 'plt.imshow', (['selectedImage'], {}), '(selectedImage)\n', (9869, 9884), True, 'import matplotlib.pyplot as plt\n'), ((9901, 9916), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9909, 9916), True, 'import matplotlib.pyplot as plt\n'), ((9933, 9980), 'matplotlib.pyplot.title', 'plt.title', (['selectedImagePath'], {'fontsize': '(9)', 'y': '(0.9)'}), '(selectedImagePath, fontsize=9, y=0.9)\n', (9942, 9980), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10072), 'matplotlib.pyplot.imshow', 'plt.imshow', (['resultImage'], {}), '(resultImage)\n', (10059, 10072), True, 'import matplotlib.pyplot as plt\n'), ((10089, 10104), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10097, 10104), True, 'import matplotlib.pyplot as plt\n'), ((10121, 10166), 'matplotlib.pyplot.title', 'plt.title', (['resultImagePath'], {'fontsize': '(9)', 'y': '(0.9)'}), '(resultImagePath, fontsize=9, y=0.9)\n', (10130, 10166), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3279), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (3257, 3279), False, 'import os\n'), ((6146, 6167), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (6156, 6167), False, 'import os\n'), ((9313, 9347), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (9325, 9347), False, 'import os\n'), ((3161, 3195), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (3173, 3195), False, 'import os\n'), ((9229, 9263), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (9241, 9263), False, 'import os\n')] |
# Written by <NAME>
import os
def file_feature_extraction(dir, filename):
'''
Navigates through tokenized words,
reconstructs original form by following few rules
Then, extracts features for that token and adds them to the corresponding list
All features of the dataset is written in the same txt file.
Args:
dir: directory of the given file
filename: name of the input file
Returns:
'''
punc = '\',.\"?!-;:()' # % left out for now
quote_count = 0
x = [] # features
y = [] # labels
tokens = []
with open(dir+filename, 'r', encoding='utf8') as f:
for line in f.readlines(): # each token is kept on a different line
tokens.append(line.rstrip('\n'))
for token, i in zip(tokens, range(len(tokens))):
found = False
if token in punc: # for only punctuation tokens
if (token == '\'' or token == '\"') and quote_count % 2 == 0:
quote_count += 1
punc_type = punc.index(token)
try:
original_form = token + tokens[i + 1] #add try statements
label = 1 # punctuation is another token
pos = 0
found = True
except IndexError:
break
# send for feature extraction
elif (token == '\'' or token == '\"') and quote_count % 2 == 1:
quote_count += 1
punc_type = punc.index(token)
original_form = tokens[i - 1] + token
label = 1 # punctuation is another token
pos = len(original_form) - 1
found = True
# send for feature extraction
elif token == '.' or token == ',' or token == '?' or token == '!' or token == ';' or token == ':':
punc_type = punc.index(token)
original_form = tokens[i - 1] + token
label = 1
pos = len(original_form) - 1
found = True
#send for feature extraction
elif token == '(':
punc_type = punc.index(token)
try:
original_form = token + tokens[i + 1]
label = 1
pos = 0
found = True
except IndexError:
break
elif token == ')':
punc_type = punc.index(token)
original_form = tokens[i - 1] + token
label = 1
pos = 0
found = True
else: # for not only punctuation tokens
if token == '...':
punc_type = punc.index(token[0])
original_form = tokens[i - 1] + token
label = 1
pos = len(original_form) - 1
found = True
else:
for ch, j in zip(token, range(len(token))): # iterate through string to detect punctuations
punc_type = punc.find(ch)
if punc_type != -1: # punctuation is found
pos = j
original_form = token
label = 0
found = True
break
if found:
only_punc = True
for j in original_form:
if j not in punc:
case = int(j.isupper())
only_punc = False
break
if not only_punc:
x.append([punc_type, pos, len(original_form), case])
y.append(label)
return x, y
def token_feature_extraction(token):
'''
Args:
token: token whose features are going to be extracted
Returns:
features for the token
used during inference
'''
x = None
punc = '\',.\"?!-;:()' # % left out for now
for ch, j in zip(token, range(len(token))): # iterate through string to detect punctuations
punc_type = punc.find(ch)
if punc_type != -1: # punctuation is found
pos = j
original_form = token
break
only_punc = True
for j in original_form:
if j not in punc:
case = int(j.isupper())
only_punc = False
break
if not only_punc:
x = [punc_type, pos, len(original_form), case]
return x
if __name__ == '__main__':
x = []
y = []
dir = 'D:/Mansur/Boun/CMPE 561/assignments/assignment 1/42bin_haber/news/'
categories = os.listdir(dir)
for i in categories:
category_dir = dir + i + '/'
category_files = os.listdir(category_dir)
for j in category_files:
if '_tokenized' in j: # take only tokenized files
x_temp, y_temp = file_feature_extraction(category_dir, j)
x.extend(x_temp)
y.extend(y_temp)
with open('../features/tokenization_features_and_labels.txt', 'r+', encoding='utf8') as f:
for feature, i in zip(x, range(len(x))):
for j in feature:
f.write('%d\t' % j)
f.write('%d\n' % y[i])
| [
"os.listdir"
] | [((4735, 4750), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (4745, 4750), False, 'import os\n'), ((4841, 4865), 'os.listdir', 'os.listdir', (['category_dir'], {}), '(category_dir)\n', (4851, 4865), False, 'import os\n')] |
""" Clinical snapshot encoders for use with CANTRIP Model.
CANTRIPModel expects a clinical snapshot encoder function which takes as input the CANTRIPModel and adds
clinical snapshot encoding ops to the graph returning the final clinical snapshot encoding as
[batch x max_seq_len x embedding_size]
"""
import tensorflow.compat.v1 as tf
import layers
import rnn_cell
def rnn_encoder(num_hidden, cell_fn=rnn_cell.RANCell):
"""
Creates an RNN encoder with the given number of hidden layers. If
:param num_hidden: number of hidden (memory) units use; num_hidden is iterable, a multi-layer
rnn cell will be creating using each number of hidden units
:param cell_fn: rnn_cell constructor to use
:return: rnn_encoder function
"""
def _rnn_encoder(model):
"""
:type model: modeling.BERTModel
"""
with tf.variable_scope('rnn_encoder'):
# Embed clinical observations
embedded_observations = layers.embedding_layer(model.observations, model.vocabulary_size,
model.embedding_size,
model.vocab_dropout,
training=model.training)
# Reshape to (batch * seq_len) x doc_len x embedding
flattened_embedded_obs = tf.reshape(embedded_observations,
[model.batch_size * model.max_seq_len,
model.max_snapshot_size,
model.embedding_size],
name='flat_emb_obs')
flattened_snapshot_sizes = tf.reshape(model.snapshot_sizes, [model.batch_size * model.max_seq_len],
name='flat_snapshot_sizes')
# Apply RNN to all documents in all batches
flattened_snapshot_encodings = layers.rnn_layer(cell_fn=cell_fn,
num_hidden=num_hidden,
inputs=flattened_embedded_obs,
lengths=flattened_snapshot_sizes,
return_interpretable_weights=False)
# Reshape back to (batch x seq_len x encoding_size)
return tf.reshape(flattened_snapshot_encodings,
[model.batch_size, model.max_seq_len, flattened_snapshot_encodings.shape[-1]],
name='rnn_snapshot_encoding')
return _rnn_encoder
def cnn_encoder(windows=None, kernels=1000, dropout=0.):
"""
Creates a CNN encoder with the given number of windows, kernels, and dropout
:param windows: number of consecutive observations to consider; defaults to [3, 4, 5]
:param kernels: number of convolutional kernels; defaults to 1,000
:param dropout: dropout probability; defaults to 0.0 (no dropout)
:return: cnn_encoder function
"""
if windows is None:
windows = [3, 4, 5]
def _cnn_encoder(model):
"""
:type model: BERTModel
"""
with tf.variable_scope('cnn_encoder'):
# Embed observations
embedded_observations = layers.embedding_layer(model.observations, model.vocabulary_size,
model.embedding_size,
model.vocab_dropout,
training=model.training)
# Reshape to (batch * seq_len) x snapshot_size x embedding
flattened_embedded_obs = tf.reshape(embedded_observations,
[model.batch_size * model.max_seq_len,
model.max_snapshot_size,
model.embedding_size])
# Apply parallel convolutional and pooling layers
outputs = []
for n in windows:
if dropout > 0:
flattened_embedded_obs = \
tf.keras.layers.Dropout(rate=model.dropout)(flattened_embedded_obs, training=model.training)
conv_layer = tf.keras.layers.Convolution1D(filters=kernels,
kernel_size=n,
activation=tf.nn.leaky_relu,
name="conv_%dgram" % n)(flattened_embedded_obs)
pool_layer = tf.keras.layers.MaxPooling1D(pool_size=1,
strides=model.max_snapshot_size - n + 1,
name="maxpool_%dgram" % n)(conv_layer)
outputs.append(pool_layer)
# Concatenate pooled outputs
output = tf.concat(outputs, axis=-1)
# Embed concat output with leaky ReLU
embeddings = tf.keras.layers.Dense(units=model.embedding_size, activation=tf.nn.relu)(output)
# Reshape back to [batch_size x max_seq_len x encoding_size]
return tf.reshape(embeddings, [model.batch_size, model.max_seq_len, model.embedding_size])
return _cnn_encoder
def get_bag_vectors(model):
"""
Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
:param model: CANTRIP model
:type model: modeling.CANTRIPModel
:return: clinical snapshot encoding
"""
# 1. Evaluate which entries in model.observations are non-zero
mask = tf.not_equal(model.observations, 0)
where = tf.where(mask)
# 2. Get the vocabulary indices for non-zero observations
vocab_indices = tf.boolean_mask(model.observations, mask)
vocab_indices = tf.expand_dims(vocab_indices[:], axis=-1)
vocab_indices = tf.cast(vocab_indices, dtype=tf.int64)
# 3. Get batch and sequence indices for non-zero observations
tensor_indices = where[:, :-1]
# Concat batch, sequence, and vocabulary indices
indices = tf.concat([tensor_indices, vocab_indices], axis=-1)
# Our sparse tensor will be 1 for observed observations, 0, otherwise
ones = tf.ones_like(indices[:, 0], dtype=tf.float32)
# The dense shape will be the same as model.observations, but using the entire vocabulary as the final dimension
dense_shape = model.observations.get_shape().as_list()
dense_shape[2] = model.vocabulary_size
# Store as a sparse tensor because they're neat
st = tf.SparseTensor(indices=indices, values=ones, dense_shape=dense_shape)
return tf.sparse.reorder(st)
def dense_encoder(model):
"""
Represents documents as an embedded bag of clinical observations. Specifically, returns an embedded of the V-length
binary vector encoding all clinical observations included in a snapshot
:param model: CANTRIP model
:type model: modeling.CANTRIPModel
:return: clinical snapshot encoding
"""
with tf.variable_scope('dense_encoder'):
# Use the CPU cause things are about to weird (i.e., too big to fit in GPU memory)
with tf.device("/cpu:0"):
# Add bag-of-observation vector transformations to the model
bags = get_bag_vectors(model)
# Embed bag-of-observation vectors
embedded_observations = layers.create_embeddings(model.vocabulary_size, model.embedding_size,
model.vocab_dropout,
training=model.training)
# Reshape them so we use the same projection weights for every bag
flat_emb_bags = tf.sparse.reshape(bags, [model.batch_size * model.max_seq_len,
model.vocabulary_size],
name='flat_emb_obs')
# Dropout for fun
# if model.dropout > 0:
# flat_emb_bags = tf.layers.dropout(flat_emb_bags, rate=model.dropout, training=model.training)
# Sparse to dense projection
flat_doc_embeddings = tf.sparse_tensor_dense_matmul(flat_emb_bags, embedded_observations,
name='flat_doc_embeddings')
# More dropout for fun
flat_doc_embeddings = tf.keras.layers.Dropout(rate=model.dropout)(flat_doc_embeddings,
training=model.training)
# Reshape back to [batch_size x max_seq_len x encoding_size]
return tf.reshape(flat_doc_embeddings, [model.batch_size, model.max_seq_len, model.embedding_size],
name='doc_embeddings')
def bag_encoder(model):
"""
Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
:param model: CANTRIP model
:type model: modeling.CANTRIPModel
:return: clinical snapshot encoding
"""
with tf.variable_scope('bow_encoder'):
# Use the CPU cause everything will be vocabulary-length
with tf.device("/cpu:0"):
return tf.sparse.to_dense(get_bag_vectors(model))
class SparseDenseLayer(tf.keras.layers.Dense):
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SparseDenseLayer, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def call(self, inputs):
if not isinstance(inputs, tf.SparseTensor):
return super(SparseDenseLayer, self).call(inputs)
outputs = tf.sparse.sparse_dense_matmul(inputs, self.kernel)
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer had NaN product")
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer had NaN bias sum")
if self.activation is not None:
outputs = self.activation(outputs)
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer had NaN activation")
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer output had NaNs")
return outputs
def dan_encoder(obs_hidden_units, avg_hidden_units, activation='gelu'):
"""Represents snapshots as a modified element-wise averages of embedded clinical observations.
:param obs_hidden_units: number of hidden units in dense layers between observation embeddings and average;
if iterable multiple dense layers will be added using the respective hidden units
:param avg_hidden_units: number of hidden units in dense layers between average embeddings and snapshot encoding;
if iterable multiple dense layers will be added using the respective hidden units
:param activation: type of activation function to use between layers
:return: clinical snapshot encoding
"""
activation_fn = None
if activation == 'gelu':
activation_fn = layers.gelu
elif activation == 'relu':
activation_fn = tf.nn.relu
elif activation == 'tanh':
activation_fn = tf.nn.tanh
elif activation == 'sigmoid':
activation_fn = tf.nn.sigmoid
else:
raise KeyError('Unsupported activation function: %s' % activation)
def _dan_encoder(model):
"""
:param model:
:type model: modeling.CANTRIPModel
:return:
"""
with tf.variable_scope('dan_encoder'):
embedded_observations = layers.embedding_layer(model.observations, model.vocabulary_size,
model.embedding_size, model.vocab_dropout,
training=model.training)
# Reshape to (batch * seq_len * doc_len) x embedding
flattened_embedded_observations = tf.reshape(
embedded_observations,
[model.batch_size * model.max_seq_len * model.max_snapshot_size,
model.embedding_size]
)
# Add dense observation layers
obs_layer = flattened_embedded_observations
for num_hidden in obs_hidden_units:
obs_layer = tf.keras.layers.Dense(units=num_hidden, activation=activation_fn)(obs_layer)
# Reshape final output by grouping observations in the same snapshot together
obs_layer = tf.reshape(obs_layer, [model.batch_size * model.max_seq_len,
model.max_snapshot_size,
obs_hidden_units[-1]])
# Divide by active number of observations rather than the padded snapshot size; requires reshaping to
# (batch x seq_len) x 1 so we can divide by this
flattened_snapshot_sizes = tf.reshape(model.snapshot_sizes, [model.batch_size * model.max_seq_len, 1])
mask = tf.sequence_mask(model.snapshot_sizes, maxlen=model.max_snapshot_size, dtype=tf.float32)
mask = tf.reshape(mask, [model.batch_size * model.max_seq_len, model.max_snapshot_size, 1])
# Compute dynamic-size element-wise average
avg_layer = tf.reduce_sum(obs_layer * mask, axis=1)
avg_layer = avg_layer / tf.cast(tf.maximum(flattened_snapshot_sizes, 1), dtype=tf.float32)
# More fun dense layers
for num_hidden in avg_hidden_units:
avg_layer = tf.keras.layers.Dense(num_hidden, activation_fn)(avg_layer)
# Final output of the model
output = tf.keras.layers.Dense(model.embedding_size, activation_fn)(avg_layer)
# Reshape to [batch_size x seq_len x encoding_size]
return tf.reshape(output, [model.batch_size, model.max_seq_len, model.embedding_size])
return _dan_encoder
def rmlp_encoder(activation='gelu', num_layers=10, num_hidden=2048):
activation_fn = None
if activation == 'gelu':
activation_fn = layers.gelu
elif activation == 'relu':
activation_fn = tf.nn.relu
elif activation == 'tanh':
activation_fn = tf.nn.tanh
elif activation == 'sigmoid':
activation_fn = tf.nn.sigmoid
else:
raise KeyError('Unsupported activation function: %s' % activation)
def residual_unit(inputs, i, units):
with tf.variable_scope("residual_unit%d" % i):
x = tf.keras.layers.Dense(units=units, activation=activation_fn)(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = activation_fn(x)
return x + inputs
def _rmlp_encoder(model):
# Convert batch x seq_len x doc_len tensor of obs IDs to batch x seq_len x vocab_size bag-of-observation vectors
with tf.variable_scope("RMLP"):
bags = get_bag_vectors(model)
flat_bags = tf.sparse.reshape(bags, [model.batch_size * model.max_seq_len, model.vocabulary_size])
x = SparseDenseLayer(units=num_hidden, activation=None)(flat_bags)
# Convert to Dense to debug NaNs
# flat_bags = tf.sparse.to_dense(flat_bags)
# flat_bags = tf.debugging.assert_all_finite(flat_bags, 'flat bags had nans')
# x = tf.keras.layers.Dense(units=num_hidden, activation=None)(flat_bags)
for i in range(num_layers):
x = residual_unit(x, i, num_hidden)
x = tf.keras.layers.Dense(units=model.embedding_size, activation=activation_fn)(x)
x = tf.debugging.assert_all_finite(x, 'dense had nans')
x = tf.reshape(x, [model.batch_size, model.max_seq_len, model.embedding_size])
x = tf.debugging.assert_all_finite(x, 'reshape had nans')
return x
return _rmlp_encoder
def vhn_encoder(activation='gelu', noise_weight=0.75, num_layers=10, depth=6, num_hidden=2048):
activation_fn = None
if activation == 'gelu':
activation_fn = layers.gelu
elif activation == 'relu':
activation_fn = tf.nn.relu
elif activation == 'tanh':
activation_fn = tf.nn.tanh
elif activation == 'sigmoid':
activation_fn = tf.nn.sigmoid
else:
raise KeyError('Unsupported activation function: %s' % activation)
def vhn_layer(inputs, units, residuals):
noise = tf.random.uniform(shape=inputs.shape, dtype=tf.float32) / noise_weight
out = tf.keras.layers.Dense(units=units, activation=activation_fn)(inputs + noise)
return tf.math.add_n([out, inputs] + residuals)
def _vhn_encoder(model):
# Convert batch x seq_len x doc_len tensor of obs IDs to batch x seq_len x vocab_size bag-of-observation vectors
bags = get_bag_vectors(model)
flat_bags = tf.sparse.reshape(bags, [model.batch_size * model.max_seq_len, model.vocabulary_size])
x = SparseDenseLayer(units=num_hidden, activation=None)(flat_bags)
residuals = []
for i in range(num_layers):
slice_ = min(i + 1, depth)
x = vhn_layer(x, units=num_hidden, residuals=residuals[-slice_:])
residuals.append(x)
x = tf.keras.layers.Dense(units=model.embedding_size, activation=activation_fn)(x)
x = tf.reshape(x, [model.batch_size, model.max_seq_len, model.embedding_size])
return x
return _vhn_encoder
| [
"tensorflow.compat.v1.sparse.reorder",
"tensorflow.compat.v1.keras.layers.Dropout",
"tensorflow.compat.v1.keras.layers.BatchNormalization",
"layers.rnn_layer",
"tensorflow.compat.v1.sparse_tensor_dense_matmul",
"layers.create_embeddings",
"layers.embedding_layer",
"tensorflow.compat.v1.debugging.assert_all_finite",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.keras.layers.Convolution1D",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.nn.bias_add",
"tensorflow.compat.v1.sparse.sparse_dense_matmul",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.SparseTensor",
"tensorflow.compat.v1.debugging.check_numerics",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.sequence_mask",
"tensorflow.compat.v1.keras.layers.MaxPooling1D",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.sparse.reshape",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.random.uniform",
"tensorflow.compat.v1.keras.layers.Dense",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.math.add_n"
] | [((5944, 5979), 'tensorflow.compat.v1.not_equal', 'tf.not_equal', (['model.observations', '(0)'], {}), '(model.observations, 0)\n', (5956, 5979), True, 'import tensorflow.compat.v1 as tf\n'), ((5992, 6006), 'tensorflow.compat.v1.where', 'tf.where', (['mask'], {}), '(mask)\n', (6000, 6006), True, 'import tensorflow.compat.v1 as tf\n'), ((6090, 6131), 'tensorflow.compat.v1.boolean_mask', 'tf.boolean_mask', (['model.observations', 'mask'], {}), '(model.observations, mask)\n', (6105, 6131), True, 'import tensorflow.compat.v1 as tf\n'), ((6152, 6193), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['vocab_indices[:]'], {'axis': '(-1)'}), '(vocab_indices[:], axis=-1)\n', (6166, 6193), True, 'import tensorflow.compat.v1 as tf\n'), ((6214, 6252), 'tensorflow.compat.v1.cast', 'tf.cast', (['vocab_indices'], {'dtype': 'tf.int64'}), '(vocab_indices, dtype=tf.int64)\n', (6221, 6252), True, 'import tensorflow.compat.v1 as tf\n'), ((6423, 6474), 'tensorflow.compat.v1.concat', 'tf.concat', (['[tensor_indices, vocab_indices]'], {'axis': '(-1)'}), '([tensor_indices, vocab_indices], axis=-1)\n', (6432, 6474), True, 'import tensorflow.compat.v1 as tf\n'), ((6561, 6606), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['indices[:, 0]'], {'dtype': 'tf.float32'}), '(indices[:, 0], dtype=tf.float32)\n', (6573, 6606), True, 'import tensorflow.compat.v1 as tf\n'), ((6889, 6959), 'tensorflow.compat.v1.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'indices', 'values': 'ones', 'dense_shape': 'dense_shape'}), '(indices=indices, values=ones, dense_shape=dense_shape)\n', (6904, 6959), True, 'import tensorflow.compat.v1 as tf\n'), ((6971, 6992), 'tensorflow.compat.v1.sparse.reorder', 'tf.sparse.reorder', (['st'], {}), '(st)\n', (6988, 6992), True, 'import tensorflow.compat.v1 as tf\n'), ((9004, 9124), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['flat_doc_embeddings', '[model.batch_size, model.max_seq_len, model.embedding_size]'], {'name': '"""doc_embeddings"""'}), "(flat_doc_embeddings, [model.batch_size, model.max_seq_len, model\n .embedding_size], name='doc_embeddings')\n", (9014, 9124), True, 'import tensorflow.compat.v1 as tf\n'), ((7353, 7387), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""dense_encoder"""'], {}), "('dense_encoder')\n", (7370, 7387), True, 'import tensorflow.compat.v1 as tf\n'), ((9499, 9531), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""bow_encoder"""'], {}), "('bow_encoder')\n", (9516, 9531), True, 'import tensorflow.compat.v1 as tf\n'), ((11193, 11243), 'tensorflow.compat.v1.sparse.sparse_dense_matmul', 'tf.sparse.sparse_dense_matmul', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (11222, 11243), True, 'import tensorflow.compat.v1 as tf\n'), ((11262, 11334), 'tensorflow.compat.v1.debugging.check_numerics', 'tf.debugging.check_numerics', (['outputs', '"""SparseDenseLayer had NaN product"""'], {}), "(outputs, 'SparseDenseLayer had NaN product')\n", (11289, 11334), True, 'import tensorflow.compat.v1 as tf\n'), ((11720, 11792), 'tensorflow.compat.v1.debugging.check_numerics', 'tf.debugging.check_numerics', (['outputs', '"""SparseDenseLayer output had NaNs"""'], {}), "(outputs, 'SparseDenseLayer output had NaNs')\n", (11747, 11792), True, 'import tensorflow.compat.v1 as tf\n'), ((18091, 18131), 'tensorflow.compat.v1.math.add_n', 'tf.math.add_n', (['([out, inputs] + residuals)'], {}), '([out, inputs] + residuals)\n', (18104, 18131), True, 'import tensorflow.compat.v1 as tf\n'), ((18341, 18432), 'tensorflow.compat.v1.sparse.reshape', 'tf.sparse.reshape', (['bags', '[model.batch_size * model.max_seq_len, model.vocabulary_size]'], {}), '(bags, [model.batch_size * model.max_seq_len, model.\n vocabulary_size])\n', (18358, 18432), True, 'import tensorflow.compat.v1 as tf\n'), ((18816, 18890), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['x', '[model.batch_size, model.max_seq_len, model.embedding_size]'], {}), '(x, [model.batch_size, model.max_seq_len, model.embedding_size])\n', (18826, 18890), True, 'import tensorflow.compat.v1 as tf\n'), ((864, 896), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""rnn_encoder"""'], {}), "('rnn_encoder')\n", (881, 896), True, 'import tensorflow.compat.v1 as tf\n'), ((976, 1114), 'layers.embedding_layer', 'layers.embedding_layer', (['model.observations', 'model.vocabulary_size', 'model.embedding_size', 'model.vocab_dropout'], {'training': 'model.training'}), '(model.observations, model.vocabulary_size, model.\n embedding_size, model.vocab_dropout, training=model.training)\n', (998, 1114), False, 'import layers\n'), ((1390, 1535), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['embedded_observations', '[model.batch_size * model.max_seq_len, model.max_snapshot_size, model.\n embedding_size]'], {'name': '"""flat_emb_obs"""'}), "(embedded_observations, [model.batch_size * model.max_seq_len,\n model.max_snapshot_size, model.embedding_size], name='flat_emb_obs')\n", (1400, 1535), True, 'import tensorflow.compat.v1 as tf\n'), ((1765, 1869), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['model.snapshot_sizes', '[model.batch_size * model.max_seq_len]'], {'name': '"""flat_snapshot_sizes"""'}), "(model.snapshot_sizes, [model.batch_size * model.max_seq_len],\n name='flat_snapshot_sizes')\n", (1775, 1869), True, 'import tensorflow.compat.v1 as tf\n'), ((2016, 2182), 'layers.rnn_layer', 'layers.rnn_layer', ([], {'cell_fn': 'cell_fn', 'num_hidden': 'num_hidden', 'inputs': 'flattened_embedded_obs', 'lengths': 'flattened_snapshot_sizes', 'return_interpretable_weights': '(False)'}), '(cell_fn=cell_fn, num_hidden=num_hidden, inputs=\n flattened_embedded_obs, lengths=flattened_snapshot_sizes,\n return_interpretable_weights=False)\n', (2032, 2182), False, 'import layers\n'), ((2498, 2657), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['flattened_snapshot_encodings', '[model.batch_size, model.max_seq_len, flattened_snapshot_encodings.shape[-1]]'], {'name': '"""rnn_snapshot_encoding"""'}), "(flattened_snapshot_encodings, [model.batch_size, model.\n max_seq_len, flattened_snapshot_encodings.shape[-1]], name=\n 'rnn_snapshot_encoding')\n", (2508, 2657), True, 'import tensorflow.compat.v1 as tf\n'), ((3305, 3337), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""cnn_encoder"""'], {}), "('cnn_encoder')\n", (3322, 3337), True, 'import tensorflow.compat.v1 as tf\n'), ((3408, 3546), 'layers.embedding_layer', 'layers.embedding_layer', (['model.observations', 'model.vocabulary_size', 'model.embedding_size', 'model.vocab_dropout'], {'training': 'model.training'}), '(model.observations, model.vocabulary_size, model.\n embedding_size, model.vocab_dropout, training=model.training)\n', (3430, 3546), False, 'import layers\n'), ((3828, 3952), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['embedded_observations', '[model.batch_size * model.max_seq_len, model.max_snapshot_size, model.\n embedding_size]'], {}), '(embedded_observations, [model.batch_size * model.max_seq_len,\n model.max_snapshot_size, model.embedding_size])\n', (3838, 3952), True, 'import tensorflow.compat.v1 as tf\n'), ((5127, 5154), 'tensorflow.compat.v1.concat', 'tf.concat', (['outputs'], {'axis': '(-1)'}), '(outputs, axis=-1)\n', (5136, 5154), True, 'import tensorflow.compat.v1 as tf\n'), ((5405, 5493), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['embeddings', '[model.batch_size, model.max_seq_len, model.embedding_size]'], {}), '(embeddings, [model.batch_size, model.max_seq_len, model.\n embedding_size])\n', (5415, 5493), True, 'import tensorflow.compat.v1 as tf\n'), ((7493, 7512), 'tensorflow.compat.v1.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7502, 7512), True, 'import tensorflow.compat.v1 as tf\n'), ((7713, 7833), 'layers.create_embeddings', 'layers.create_embeddings', (['model.vocabulary_size', 'model.embedding_size', 'model.vocab_dropout'], {'training': 'model.training'}), '(model.vocabulary_size, model.embedding_size, model\n .vocab_dropout, training=model.training)\n', (7737, 7833), False, 'import layers\n'), ((8059, 8171), 'tensorflow.compat.v1.sparse.reshape', 'tf.sparse.reshape', (['bags', '[model.batch_size * model.max_seq_len, model.vocabulary_size]'], {'name': '"""flat_emb_obs"""'}), "(bags, [model.batch_size * model.max_seq_len, model.\n vocabulary_size], name='flat_emb_obs')\n", (8076, 8171), True, 'import tensorflow.compat.v1 as tf\n'), ((8520, 8620), 'tensorflow.compat.v1.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['flat_emb_bags', 'embedded_observations'], {'name': '"""flat_doc_embeddings"""'}), "(flat_emb_bags, embedded_observations, name=\n 'flat_doc_embeddings')\n", (8549, 8620), True, 'import tensorflow.compat.v1 as tf\n'), ((9611, 9630), 'tensorflow.compat.v1.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (9620, 9630), True, 'import tensorflow.compat.v1 as tf\n'), ((11384, 11418), 'tensorflow.compat.v1.nn.bias_add', 'tf.nn.bias_add', (['outputs', 'self.bias'], {}), '(outputs, self.bias)\n', (11398, 11418), True, 'import tensorflow.compat.v1 as tf\n'), ((11441, 11514), 'tensorflow.compat.v1.debugging.check_numerics', 'tf.debugging.check_numerics', (['outputs', '"""SparseDenseLayer had NaN bias sum"""'], {}), "(outputs, 'SparseDenseLayer had NaN bias sum')\n", (11468, 11514), True, 'import tensorflow.compat.v1 as tf\n'), ((11625, 11700), 'tensorflow.compat.v1.debugging.check_numerics', 'tf.debugging.check_numerics', (['outputs', '"""SparseDenseLayer had NaN activation"""'], {}), "(outputs, 'SparseDenseLayer had NaN activation')\n", (11652, 11700), True, 'import tensorflow.compat.v1 as tf\n'), ((13050, 13082), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""dan_encoder"""'], {}), "('dan_encoder')\n", (13067, 13082), True, 'import tensorflow.compat.v1 as tf\n'), ((13120, 13258), 'layers.embedding_layer', 'layers.embedding_layer', (['model.observations', 'model.vocabulary_size', 'model.embedding_size', 'model.vocab_dropout'], {'training': 'model.training'}), '(model.observations, model.vocabulary_size, model.\n embedding_size, model.vocab_dropout, training=model.training)\n', (13142, 13258), False, 'import layers\n'), ((13484, 13609), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['embedded_observations', '[model.batch_size * model.max_seq_len * model.max_snapshot_size, model.\n embedding_size]'], {}), '(embedded_observations, [model.batch_size * model.max_seq_len *\n model.max_snapshot_size, model.embedding_size])\n', (13494, 13609), True, 'import tensorflow.compat.v1 as tf\n'), ((14036, 14149), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['obs_layer', '[model.batch_size * model.max_seq_len, model.max_snapshot_size,\n obs_hidden_units[-1]]'], {}), '(obs_layer, [model.batch_size * model.max_seq_len, model.\n max_snapshot_size, obs_hidden_units[-1]])\n', (14046, 14149), True, 'import tensorflow.compat.v1 as tf\n'), ((14454, 14529), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['model.snapshot_sizes', '[model.batch_size * model.max_seq_len, 1]'], {}), '(model.snapshot_sizes, [model.batch_size * model.max_seq_len, 1])\n', (14464, 14529), True, 'import tensorflow.compat.v1 as tf\n'), ((14550, 14642), 'tensorflow.compat.v1.sequence_mask', 'tf.sequence_mask', (['model.snapshot_sizes'], {'maxlen': 'model.max_snapshot_size', 'dtype': 'tf.float32'}), '(model.snapshot_sizes, maxlen=model.max_snapshot_size,\n dtype=tf.float32)\n', (14566, 14642), True, 'import tensorflow.compat.v1 as tf\n'), ((14658, 14747), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['mask', '[model.batch_size * model.max_seq_len, model.max_snapshot_size, 1]'], {}), '(mask, [model.batch_size * model.max_seq_len, model.\n max_snapshot_size, 1])\n', (14668, 14747), True, 'import tensorflow.compat.v1 as tf\n'), ((14824, 14863), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(obs_layer * mask)'], {'axis': '(1)'}), '(obs_layer * mask, axis=1)\n', (14837, 14863), True, 'import tensorflow.compat.v1 as tf\n'), ((15356, 15435), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['output', '[model.batch_size, model.max_seq_len, model.embedding_size]'], {}), '(output, [model.batch_size, model.max_seq_len, model.embedding_size])\n', (15366, 15435), True, 'import tensorflow.compat.v1 as tf\n'), ((15966, 16006), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (["('residual_unit%d' % i)"], {}), "('residual_unit%d' % i)\n", (15983, 16006), True, 'import tensorflow.compat.v1 as tf\n'), ((16377, 16402), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""RMLP"""'], {}), "('RMLP')\n", (16394, 16402), True, 'import tensorflow.compat.v1 as tf\n'), ((16470, 16561), 'tensorflow.compat.v1.sparse.reshape', 'tf.sparse.reshape', (['bags', '[model.batch_size * model.max_seq_len, model.vocabulary_size]'], {}), '(bags, [model.batch_size * model.max_seq_len, model.\n vocabulary_size])\n', (16487, 16561), True, 'import tensorflow.compat.v1 as tf\n'), ((17119, 17170), 'tensorflow.compat.v1.debugging.assert_all_finite', 'tf.debugging.assert_all_finite', (['x', '"""dense had nans"""'], {}), "(x, 'dense had nans')\n", (17149, 17170), True, 'import tensorflow.compat.v1 as tf\n'), ((17187, 17261), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['x', '[model.batch_size, model.max_seq_len, model.embedding_size]'], {}), '(x, [model.batch_size, model.max_seq_len, model.embedding_size])\n', (17197, 17261), True, 'import tensorflow.compat.v1 as tf\n'), ((17278, 17331), 'tensorflow.compat.v1.debugging.assert_all_finite', 'tf.debugging.assert_all_finite', (['x', '"""reshape had nans"""'], {}), "(x, 'reshape had nans')\n", (17308, 17331), True, 'import tensorflow.compat.v1 as tf\n'), ((17914, 17969), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', ([], {'shape': 'inputs.shape', 'dtype': 'tf.float32'}), '(shape=inputs.shape, dtype=tf.float32)\n', (17931, 17969), True, 'import tensorflow.compat.v1 as tf\n'), ((17999, 18059), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'units', 'activation': 'activation_fn'}), '(units=units, activation=activation_fn)\n', (18020, 18059), True, 'import tensorflow.compat.v1 as tf\n'), ((18725, 18800), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'model.embedding_size', 'activation': 'activation_fn'}), '(units=model.embedding_size, activation=activation_fn)\n', (18746, 18800), True, 'import tensorflow.compat.v1 as tf\n'), ((5231, 5303), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'model.embedding_size', 'activation': 'tf.nn.relu'}), '(units=model.embedding_size, activation=tf.nn.relu)\n', (5252, 5303), True, 'import tensorflow.compat.v1 as tf\n'), ((8750, 8793), 'tensorflow.compat.v1.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'model.dropout'}), '(rate=model.dropout)\n', (8773, 8793), True, 'import tensorflow.compat.v1 as tf\n'), ((15202, 15260), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', (['model.embedding_size', 'activation_fn'], {}), '(model.embedding_size, activation_fn)\n', (15223, 15260), True, 'import tensorflow.compat.v1 as tf\n'), ((16024, 16084), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'units', 'activation': 'activation_fn'}), '(units=units, activation=activation_fn)\n', (16045, 16084), True, 'import tensorflow.compat.v1 as tf\n'), ((16109, 16145), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (16143, 16145), True, 'import tensorflow.compat.v1 as tf\n'), ((17024, 17099), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'model.embedding_size', 'activation': 'activation_fn'}), '(units=model.embedding_size, activation=activation_fn)\n', (17045, 17099), True, 'import tensorflow.compat.v1 as tf\n'), ((4438, 4557), 'tensorflow.compat.v1.keras.layers.Convolution1D', 'tf.keras.layers.Convolution1D', ([], {'filters': 'kernels', 'kernel_size': 'n', 'activation': 'tf.nn.leaky_relu', 'name': "('conv_%dgram' % n)"}), "(filters=kernels, kernel_size=n, activation=tf\n .nn.leaky_relu, name='conv_%dgram' % n)\n", (4467, 4557), True, 'import tensorflow.compat.v1 as tf\n'), ((4783, 4896), 'tensorflow.compat.v1.keras.layers.MaxPooling1D', 'tf.keras.layers.MaxPooling1D', ([], {'pool_size': '(1)', 'strides': '(model.max_snapshot_size - n + 1)', 'name': "('maxpool_%dgram' % n)"}), "(pool_size=1, strides=model.max_snapshot_size -\n n + 1, name='maxpool_%dgram' % n)\n", (4811, 4896), True, 'import tensorflow.compat.v1 as tf\n'), ((13844, 13909), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'num_hidden', 'activation': 'activation_fn'}), '(units=num_hidden, activation=activation_fn)\n', (13865, 13909), True, 'import tensorflow.compat.v1 as tf\n'), ((14908, 14947), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['flattened_snapshot_sizes', '(1)'], {}), '(flattened_snapshot_sizes, 1)\n', (14918, 14947), True, 'import tensorflow.compat.v1 as tf\n'), ((15080, 15128), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_hidden', 'activation_fn'], {}), '(num_hidden, activation_fn)\n', (15101, 15128), True, 'import tensorflow.compat.v1 as tf\n'), ((4316, 4359), 'tensorflow.compat.v1.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'model.dropout'}), '(rate=model.dropout)\n', (4339, 4359), True, 'import tensorflow.compat.v1 as tf\n')] |
import gzip
import unittest
import subprocess
import os
class TestPreProcessing(unittest.TestCase):
def setUp(self):
# params for the test
self.curr_path = os.path.dirname(os.path.abspath(__file__))
self.output_path = self.curr_path + "/data"
self.file_one = self.output_path + "/all_reads_R1.fq"
self.file_two = self.output_path + "/all_reads_R2.fq"
self.database_path = self.curr_path
self.db_one_name = "human-GCA-phix-db"
self.db_two_name = "human-GRC-db"
self.db_three_name = "kraken2-human-db"
pass
def test_command(self):
"""Test command completes."""
# run command
res_cmnd = subprocess.run(["./test-command.sh",
self.file_one,
self.file_two,
self.database_path,
self.db_one_name,
self.db_two_name,
self.db_three_name,
self.output_path])
self.assertTrue(res_cmnd.returncode == 0)
def test_filter_results(self):
"""Test command results match expectation."""
# base truth
with open(self.output_path + '/host_read_ids.txt') as file:
exp_lines = [line.replace('\n','') for line in file.readlines()]
# check file size (should be much greater than 100 bytes)
out_size = os.path.getsize(self.output_path + '/test_res_R1.trimmed.fastq.gz')
self.assertTrue(out_size > 100)
# results
with gzip.open(self.output_path + '/test_res_R1.trimmed.fastq.gz','r') as fin:
res_lines = [line.decode("utf-8").replace('\n','') for line in fin if '@' in str(line)]
# check there are no host reads passing filter
rel_tol = len(set(res_lines) & set(exp_lines)) / len(set(exp_lines)) * 100
self.assertTrue(rel_tol < 0.1)
# finally remove files
os.remove(self.curr_path + "/fastp.html")
os.remove(self.curr_path + "/fastp.json")
os.remove(self.output_path + "/test_res_R1.trimmed.fastq.gz")
os.remove(self.output_path + "/test_res_R2.trimmed.fastq.gz")
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"subprocess.run",
"os.remove",
"os.path.abspath",
"gzip.open",
"os.path.getsize"
] | [((2276, 2291), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2289, 2291), False, 'import unittest\n'), ((691, 861), 'subprocess.run', 'subprocess.run', (["['./test-command.sh', self.file_one, self.file_two, self.database_path,\n self.db_one_name, self.db_two_name, self.db_three_name, self.output_path]"], {}), "(['./test-command.sh', self.file_one, self.file_two, self.\n database_path, self.db_one_name, self.db_two_name, self.db_three_name,\n self.output_path])\n", (705, 861), False, 'import subprocess\n'), ((1489, 1556), 'os.path.getsize', 'os.path.getsize', (["(self.output_path + '/test_res_R1.trimmed.fastq.gz')"], {}), "(self.output_path + '/test_res_R1.trimmed.fastq.gz')\n", (1504, 1556), False, 'import os\n'), ((2011, 2052), 'os.remove', 'os.remove', (["(self.curr_path + '/fastp.html')"], {}), "(self.curr_path + '/fastp.html')\n", (2020, 2052), False, 'import os\n'), ((2061, 2102), 'os.remove', 'os.remove', (["(self.curr_path + '/fastp.json')"], {}), "(self.curr_path + '/fastp.json')\n", (2070, 2102), False, 'import os\n'), ((2111, 2172), 'os.remove', 'os.remove', (["(self.output_path + '/test_res_R1.trimmed.fastq.gz')"], {}), "(self.output_path + '/test_res_R1.trimmed.fastq.gz')\n", (2120, 2172), False, 'import os\n'), ((2181, 2242), 'os.remove', 'os.remove', (["(self.output_path + '/test_res_R2.trimmed.fastq.gz')"], {}), "(self.output_path + '/test_res_R2.trimmed.fastq.gz')\n", (2190, 2242), False, 'import os\n'), ((186, 211), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (201, 211), False, 'import os\n'), ((1628, 1694), 'gzip.open', 'gzip.open', (["(self.output_path + '/test_res_R1.trimmed.fastq.gz')", '"""r"""'], {}), "(self.output_path + '/test_res_R1.trimmed.fastq.gz', 'r')\n", (1637, 1694), False, 'import gzip\n')] |
######################################################################
## Copyright (c) 2019 <NAME>, Norway
## ###################################################################
## Created : wulff at 2019-3-25
## ###################################################################
## The MIT License (MIT)
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
######################################################################
import os
import json
import sys
import collections
import datetime
import click
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas as pd
class rema:
def __init__(self,file):
with open(file,"r") as f:
jsonobj = json.load(f)
cat_fn = "categories.json"
categories = None
if(os.path.exists(cat_fn)):
with open(cat_fn,"r") as f:
categories = json.load(f)
self.categories = categories
self.obj = jsonobj
self.oformat = "str"
def printGroups(self):
groups = dict()
transactions = self.obj["TransactionsInfo"]["Transactions"]
for t in transactions:
for item in t["Receipt"]:
groups[item["ProductGroupDescription"]] = " "
self.printList(groups)
def printOrderByGroupOrCategory(self,maxcount=10,month = False,category=False,keyName=None,plot=False,quarter=False):
summary = dict()
if self.categories is None and category:
print("Could not find categories.json. Can't run this command")
transactions = self.obj["TransactionsInfo"]["Transactions"]
for t in transactions:
datestr= str(t["PurchaseDate"])
d = datetime.datetime.utcfromtimestamp(int(datestr[:-3]))
if(month):
header_key = str(d.year) + "-" + str(d.month)
elif(quarter):
header_key = str(d.year) + "-Q" + str(pd.Timestamp(d).quarter)
else:
header_key = str(d.year)
if(header_key not in summary):
summary[header_key] = dict()
for item in t["Receipt"]:
key = item["ProductGroupDescription"]
if(category and key in self.categories ):
key = self.categories[key]
# print(json.dumps(item,indent=4))
if(keyName and key == keyName):
key = item['ProductDescription']
elif(keyName):
continue
if(key in summary[header_key]):
summary[header_key][key] += item["Amount"]
else:
summary[header_key][key] = item["Amount"]
self.printTransactionSummary(summary,maxcount,plot)
def printTransactionSummary(self,summary,maxcount,plot):
data = OrderedDict()
for header_key in summary:
transactions = summary[header_key]
data[header_key] = list()
listofTuples = sorted(transactions.items() ,reverse = True, key=lambda x: x[1])
count = 0
for s in listofTuples:
if(count >= maxcount):
continue
else:
count += 1
data[header_key].append((s[1],s[0]))
if(plot):
self.plotDictWithTouple(data)
pass
else:
self.printDictWithTouple(data)
def printList(self,data):
"""Print a list of items"""
if(self.oformat == "json"):
print(json.dumps(data,indent=4))
else:
for el in data:
print(el)
def plotDictWithTouple(self,data):
"""Print ordered dictionary where each item is a (number,description) touple"""
pdata = dict()
#- Reorganize data
for key in data:
for el in data[key]:
val = el[0]
name = el[1]
if name not in pdata:
pdata[name] = dict()
pdata[name]['yval'] = list()
pdata[name]['xval'] = list()
pdata[name]['yval'].append(val)
pdata[name]['xval'].append(key)
#with plt.xkcd():
for key in pdata:
plt.plot(pdata[key]['xval'],pdata[key]['yval'],label=key)
plt.xlabel('Date [n]')
plt.ylabel("Kostnad [kr]")
plt.legend()
plt.xticks(rotation=90)
plt.savefig("plot.jpg")
#plt.xlim([datetime.date(2016, 1, 1), datetime.datetime.now()])
#plt.autoscale()
plt.show()
def printDictWithTouple(self,data):
"""Print ordered dictionary where each item is a (number,description) touple"""
if(self.oformat == "json"):
print(json.dumps(data,indent=4))
else:
for key in data:
print(str(key) + ":")
for el in data[key]:
print("\t%.1f\t%s" %(el[0],el[1]))
#----------------------------------------------------------
#- Command line interface
#----------------------------------------------------------
@click.group()
@click.argument('data', type=click.Path(exists=True))
@click.option('--json',is_flag=True,help="Set JSON as output format")
@click.pass_context
def cli(ctx,data,json):
ctx.ensure_object(dict)
#Load the file
r = rema(data)
if(json):
r.oformat = "json"
else:
r.oformat = "str"
ctx.obj['rema'] = r
@cli.command('list',help="Sum and list items")
@click.pass_context
@click.option('--maxcount',default=10,help="Number of items to list")
@click.option('--month',is_flag=True,help="Sort per month")
@click.option("--category",is_flag=True,help="Sort by categories.json file")
@click.option("--item",help="Specify a certain group or category")
@click.option("--plot",is_flag=True,help="Plot items")
@click.option('--quarter',is_flag=True,help="Sort per quarter")
def group(ctx,maxcount,month,category,item,plot,quarter):
ctx.obj['rema'].printOrderByGroupOrCategory(maxcount,month,category,item,plot,quarter)
@cli.command('listgroups',help="List all groups")
@click.pass_context
def listgroups(ctx):
ctx.obj['rema'].printGroups()
if __name__ == "__main__":
cli(obj = {}) | [
"json.load",
"matplotlib.pyplot.show",
"pandas.Timestamp",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"click.option",
"os.path.exists",
"json.dumps",
"matplotlib.pyplot.xticks",
"click.Path",
"collections.OrderedDict",
"matplotlib.pyplot.ylabel",
"click.group",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] | [((6261, 6274), 'click.group', 'click.group', ([], {}), '()\n', (6272, 6274), False, 'import click\n'), ((6330, 6400), 'click.option', 'click.option', (['"""--json"""'], {'is_flag': '(True)', 'help': '"""Set JSON as output format"""'}), "('--json', is_flag=True, help='Set JSON as output format')\n", (6342, 6400), False, 'import click\n'), ((6689, 6759), 'click.option', 'click.option', (['"""--maxcount"""'], {'default': '(10)', 'help': '"""Number of items to list"""'}), "('--maxcount', default=10, help='Number of items to list')\n", (6701, 6759), False, 'import click\n'), ((6759, 6819), 'click.option', 'click.option', (['"""--month"""'], {'is_flag': '(True)', 'help': '"""Sort per month"""'}), "('--month', is_flag=True, help='Sort per month')\n", (6771, 6819), False, 'import click\n'), ((6819, 6896), 'click.option', 'click.option', (['"""--category"""'], {'is_flag': '(True)', 'help': '"""Sort by categories.json file"""'}), "('--category', is_flag=True, help='Sort by categories.json file')\n", (6831, 6896), False, 'import click\n'), ((6896, 6962), 'click.option', 'click.option', (['"""--item"""'], {'help': '"""Specify a certain group or category"""'}), "('--item', help='Specify a certain group or category')\n", (6908, 6962), False, 'import click\n'), ((6963, 7018), 'click.option', 'click.option', (['"""--plot"""'], {'is_flag': '(True)', 'help': '"""Plot items"""'}), "('--plot', is_flag=True, help='Plot items')\n", (6975, 7018), False, 'import click\n'), ((7018, 7082), 'click.option', 'click.option', (['"""--quarter"""'], {'is_flag': '(True)', 'help': '"""Sort per quarter"""'}), "('--quarter', is_flag=True, help='Sort per quarter')\n", (7030, 7082), False, 'import click\n'), ((1847, 1869), 'os.path.exists', 'os.path.exists', (['cat_fn'], {}), '(cat_fn)\n', (1861, 1869), False, 'import os\n'), ((3927, 3940), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3938, 3940), False, 'from collections import OrderedDict\n'), ((5449, 5471), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date [n]"""'], {}), "('Date [n]')\n", (5459, 5471), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5506), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Kostnad [kr]"""'], {}), "('Kostnad [kr]')\n", (5490, 5506), True, 'import matplotlib.pyplot as plt\n'), ((5515, 5527), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5525, 5527), True, 'import matplotlib.pyplot as plt\n'), ((5536, 5559), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (5546, 5559), True, 'import matplotlib.pyplot as plt\n'), ((5568, 5591), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.jpg"""'], {}), "('plot.jpg')\n", (5579, 5591), True, 'import matplotlib.pyplot as plt\n'), ((5715, 5725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5723, 5725), True, 'import matplotlib.pyplot as plt\n'), ((6304, 6327), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (6314, 6327), False, 'import click\n'), ((1759, 1771), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1768, 1771), False, 'import json\n'), ((5383, 5442), 'matplotlib.pyplot.plot', 'plt.plot', (["pdata[key]['xval']", "pdata[key]['yval']"], {'label': 'key'}), "(pdata[key]['xval'], pdata[key]['yval'], label=key)\n", (5391, 5442), True, 'import matplotlib.pyplot as plt\n'), ((1941, 1953), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1950, 1953), False, 'import json\n'), ((4640, 4666), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (4650, 4666), False, 'import json\n'), ((5910, 5936), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (5920, 5936), False, 'import json\n'), ((3000, 3015), 'pandas.Timestamp', 'pd.Timestamp', (['d'], {}), '(d)\n', (3012, 3015), True, 'import pandas as pd\n')] |
# Generated by Django 2.0.7 on 2018-08-12 22:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserProfiles', '0005_auto_20180808_2150'),
]
operations = [
migrations.AlterField(
model_name='userprofilemodel',
name='UserProfileBio',
field=models.TextField(blank=True, max_length=300, null=True),
),
migrations.AlterField(
model_name='userprofilemodel',
name='UserProfileHeader',
field=models.ImageField(default='UserProfiles/Defaults/BlankWhite.png', upload_to='UserProfiles/'),
),
migrations.AlterField(
model_name='userprofilemodel',
name='UserProfileImage',
field=models.ImageField(default='UserProfiles/Defaults/Blank.png', upload_to='UserProfiles/'),
),
]
| [
"django.db.models.ImageField",
"django.db.models.TextField"
] | [((359, 414), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(300)', 'null': '(True)'}), '(blank=True, max_length=300, null=True)\n', (375, 414), False, 'from django.db import migrations, models\n'), ((557, 654), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""UserProfiles/Defaults/BlankWhite.png"""', 'upload_to': '"""UserProfiles/"""'}), "(default='UserProfiles/Defaults/BlankWhite.png', upload_to\n ='UserProfiles/')\n", (574, 654), False, 'from django.db import migrations, models\n'), ((791, 883), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""UserProfiles/Defaults/Blank.png"""', 'upload_to': '"""UserProfiles/"""'}), "(default='UserProfiles/Defaults/Blank.png', upload_to=\n 'UserProfiles/')\n", (808, 883), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 15:39:43 2019
@author: Manu
"""
import mne
from mne import io
import sys
sys.path.append('C:/_MANU/_U821/Python_Dev/')
import scipy
from util import tools,asr,raw_asrcalibration
import numpy as np
import matplotlib.pyplot as plt
from mne.viz import plot_evoked_topo
fname = 'C:/_MANU/_U821/_wip/ContextOdd/raw/ANDNI_0001.vhdr'
raw = io.read_raw_brainvision(fname, preload = False)
picks_eeg = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,stim=False, exclude='bads')
ListChannels = np.array(raw.info['ch_names'])
montage = mne.channels.read_montage(kind='standard_1020',ch_names=ListChannels[picks_eeg])
raw = io.read_raw_brainvision(fname, montage=montage, preload = True)
picks_eeg = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,stim=False, exclude='bads')
raw =raw.pick_types( meg=False, eeg=True, eog=False,stim=True, exclude='bads')
# ASR Calibration
raworig_Data= raw._data
l_freq = 2
h_freq = 20
Wn = [l_freq/(raw.info['sfreq']/2.), h_freq/(raw.info['sfreq']/2.) ]
b, a = scipy.signal.iirfilter(N=2, Wn=Wn, btype = 'bandpass', analog = False, ftype = 'butter', output = 'ba')
raw._data[picks_eeg,:]=scipy.signal.lfilter(b, a, raworig_Data[picks_eeg,:], axis = 1, zi = None)
rawCalibAsr=raw.copy()
tmin = 30
tmax = 60 #s
rawCalibAsr = rawCalibAsr.crop(tmin=tmin,tmax=tmax)
ChanName4VEOG = ['Fp1','Fp2'] # 2 VEOG
cutoff = 5 # Makoto preprocessing says best between 10 and 20 https://sccn.ucsd.edu/wiki/Makoto%27s_preprocessing_pipeline#Alternatively.2C_cleaning_continuous_data_using_ASR_.2803.2F26.2F2019_updated.29
Yule_Walker_filtering = True
state = raw_asrcalibration.raw_asrcalibration(rawCalibAsr,ChanName4VEOG, cutoff,Yule_Walker_filtering)
# ASR process on epoch
event_id = {'Std': 1, 'Dev': 2}
events_orig,_ = mne.events_from_annotations(raw)
ixdev = np.array(np.where(events_orig[:,2]==2))
ixstd= ixdev-1
events = events_orig[np.sort(np.array(np.hstack((ixstd , ixdev)))),:]
events = np.squeeze(events, axis=0)
tmin, tmax = -0.2, 0.5
raw4detect = raw.copy()
raw4detect._data,iirstate = asr.YW_filter(raw._data,raw.info['sfreq'],None) ## HERE
epochs4Detect = mne.Epochs(raw4detect, events=events, event_id=event_id, tmin=tmin,tmax=tmax, proj=True,baseline=None, reject=None, picks=picks_eeg)
epochs_filt = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,tmax=tmax, proj=None,baseline=None, reject=None, picks=picks_eeg)
Data4detect = epochs4Detect.get_data()
Data2Correct = epochs_filt.get_data()
DataClean = np.zeros((Data2Correct.shape))
for i_epoch in range(Data4detect.shape[0]):
EpochYR = Data4detect[i_epoch,:,:]
Epoch2Corr = Data2Correct[i_epoch,:,:]
DataClean[i_epoch,:,:] = asr.asr_process_on_epoch(EpochYR,Epoch2Corr,state)
epochs_clean = mne.EpochsArray(DataClean,info=epochs_filt.info,events=events,event_id=event_id)
srate = raw.info['sfreq']
evoked_std = epochs_filt['Std'].average(picks=picks_eeg)
evoked_dev = epochs_filt['Dev'].average(picks=picks_eeg)
evoked_clean_std = epochs_clean['Std'].average(picks=picks_eeg)
evoked_clean_dev = epochs_clean['Dev'].average(picks=picks_eeg)
evoked_clean_std.first=-200
evoked_clean_std.last= tmax*srate
evoked_clean_dev.first=-200
evoked_clean_dev.last= tmax*srate
evoked_clean_std.times= np.around(np.linspace(-0.2, tmax, num=DataClean.shape[2]),decimals=3)
evoked_clean_dev.times= np.around(np.linspace(-0.2, tmax, num=DataClean.shape[2]),decimals=3)
evokeds = [evoked_std, evoked_dev, evoked_clean_std, evoked_clean_dev]
colors = 'blue', 'red','steelblue','magenta'
plot_evoked_topo(evokeds, color=colors, title='Std Dev', background_color='w')
plt.show()
evoked_clean_MMN=evoked_clean_std.copy()
evoked_clean_MMN.data = (evoked_clean_dev.data - evoked_clean_std.data)
evoked_MMN =evoked_clean_MMN.copy()
evoked_MMN.data = (evoked_dev.data-evoked_std.data)
evokeds_MMN= [evoked_clean_MMN,evoked_MMN]
colors = 'red', 'black'
plot_evoked_topo(evokeds_MMN, color=colors, title='MMN', background_color='w')
plt.show()
kwargs = dict(times=np.arange(-0.1, 0.40, 0.025), vmin=-1.5, vmax=1.5, layout='auto',
head_pos=dict(center=(0., 0.), scale=(1., 1.)))
evoked_MMN.plot_topomap(**kwargs)
evoked_clean_MMN.plot_topomap(**kwargs)
| [
"mne.pick_types",
"util.asr.YW_filter",
"mne.io.read_raw_brainvision",
"util.raw_asrcalibration.raw_asrcalibration",
"numpy.arange",
"sys.path.append",
"mne.events_from_annotations",
"scipy.signal.lfilter",
"mne.channels.read_montage",
"scipy.signal.iirfilter",
"numpy.linspace",
"matplotlib.pyplot.show",
"mne.viz.plot_evoked_topo",
"numpy.hstack",
"numpy.squeeze",
"util.asr.asr_process_on_epoch",
"numpy.zeros",
"mne.Epochs",
"numpy.where",
"numpy.array",
"mne.EpochsArray"
] | [((126, 171), 'sys.path.append', 'sys.path.append', (['"""C:/_MANU/_U821/Python_Dev/"""'], {}), "('C:/_MANU/_U821/Python_Dev/')\n", (141, 171), False, 'import sys\n'), ((391, 436), 'mne.io.read_raw_brainvision', 'io.read_raw_brainvision', (['fname'], {'preload': '(False)'}), '(fname, preload=False)\n', (414, 436), False, 'from mne import io\n'), ((451, 539), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(False)', 'eeg': '(True)', 'eog': '(False)', 'stim': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg=False, eeg=True, eog=False, stim=False,\n exclude='bads')\n", (465, 539), False, 'import mne\n'), ((551, 581), 'numpy.array', 'np.array', (["raw.info['ch_names']"], {}), "(raw.info['ch_names'])\n", (559, 581), True, 'import numpy as np\n'), ((592, 678), 'mne.channels.read_montage', 'mne.channels.read_montage', ([], {'kind': '"""standard_1020"""', 'ch_names': 'ListChannels[picks_eeg]'}), "(kind='standard_1020', ch_names=ListChannels[\n picks_eeg])\n", (617, 678), False, 'import mne\n'), ((679, 740), 'mne.io.read_raw_brainvision', 'io.read_raw_brainvision', (['fname'], {'montage': 'montage', 'preload': '(True)'}), '(fname, montage=montage, preload=True)\n', (702, 740), False, 'from mne import io\n'), ((756, 844), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(False)', 'eeg': '(True)', 'eog': '(False)', 'stim': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg=False, eeg=True, eog=False, stim=False,\n exclude='bads')\n", (770, 844), False, 'import mne\n'), ((1069, 1169), 'scipy.signal.iirfilter', 'scipy.signal.iirfilter', ([], {'N': '(2)', 'Wn': 'Wn', 'btype': '"""bandpass"""', 'analog': '(False)', 'ftype': '"""butter"""', 'output': '"""ba"""'}), "(N=2, Wn=Wn, btype='bandpass', analog=False, ftype=\n 'butter', output='ba')\n", (1091, 1169), False, 'import scipy\n'), ((1196, 1267), 'scipy.signal.lfilter', 'scipy.signal.lfilter', (['b', 'a', 'raworig_Data[picks_eeg, :]'], {'axis': '(1)', 'zi': 'None'}), '(b, a, raworig_Data[picks_eeg, :], axis=1, zi=None)\n', (1216, 1267), False, 'import scipy\n'), ((1656, 1756), 'util.raw_asrcalibration.raw_asrcalibration', 'raw_asrcalibration.raw_asrcalibration', (['rawCalibAsr', 'ChanName4VEOG', 'cutoff', 'Yule_Walker_filtering'], {}), '(rawCalibAsr, ChanName4VEOG, cutoff,\n Yule_Walker_filtering)\n', (1693, 1756), False, 'from util import tools, asr, raw_asrcalibration\n'), ((1836, 1868), 'mne.events_from_annotations', 'mne.events_from_annotations', (['raw'], {}), '(raw)\n', (1863, 1868), False, 'import mne\n'), ((2012, 2038), 'numpy.squeeze', 'np.squeeze', (['events'], {'axis': '(0)'}), '(events, axis=0)\n', (2022, 2038), True, 'import numpy as np\n'), ((2114, 2163), 'util.asr.YW_filter', 'asr.YW_filter', (['raw._data', "raw.info['sfreq']", 'None'], {}), "(raw._data, raw.info['sfreq'], None)\n", (2127, 2163), False, 'from util import tools, asr, raw_asrcalibration\n'), ((2186, 2325), 'mne.Epochs', 'mne.Epochs', (['raw4detect'], {'events': 'events', 'event_id': 'event_id', 'tmin': 'tmin', 'tmax': 'tmax', 'proj': '(True)', 'baseline': 'None', 'reject': 'None', 'picks': 'picks_eeg'}), '(raw4detect, events=events, event_id=event_id, tmin=tmin, tmax=\n tmax, proj=True, baseline=None, reject=None, picks=picks_eeg)\n', (2196, 2325), False, 'import mne\n'), ((2333, 2464), 'mne.Epochs', 'mne.Epochs', (['raw'], {'events': 'events', 'event_id': 'event_id', 'tmin': 'tmin', 'tmax': 'tmax', 'proj': 'None', 'baseline': 'None', 'reject': 'None', 'picks': 'picks_eeg'}), '(raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,\n proj=None, baseline=None, reject=None, picks=picks_eeg)\n', (2343, 2464), False, 'import mne\n'), ((2550, 2578), 'numpy.zeros', 'np.zeros', (['Data2Correct.shape'], {}), '(Data2Correct.shape)\n', (2558, 2578), True, 'import numpy as np\n'), ((2822, 2910), 'mne.EpochsArray', 'mne.EpochsArray', (['DataClean'], {'info': 'epochs_filt.info', 'events': 'events', 'event_id': 'event_id'}), '(DataClean, info=epochs_filt.info, events=events, event_id=\n event_id)\n', (2837, 2910), False, 'import mne\n'), ((3611, 3689), 'mne.viz.plot_evoked_topo', 'plot_evoked_topo', (['evokeds'], {'color': 'colors', 'title': '"""Std Dev"""', 'background_color': '"""w"""'}), "(evokeds, color=colors, title='Std Dev', background_color='w')\n", (3627, 3689), False, 'from mne.viz import plot_evoked_topo\n'), ((3690, 3700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3698, 3700), True, 'import matplotlib.pyplot as plt\n'), ((3972, 4050), 'mne.viz.plot_evoked_topo', 'plot_evoked_topo', (['evokeds_MMN'], {'color': 'colors', 'title': '"""MMN"""', 'background_color': '"""w"""'}), "(evokeds_MMN, color=colors, title='MMN', background_color='w')\n", (3988, 4050), False, 'from mne.viz import plot_evoked_topo\n'), ((4051, 4061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1918), 'numpy.where', 'np.where', (['(events_orig[:, 2] == 2)'], {}), '(events_orig[:, 2] == 2)\n', (1894, 1918), True, 'import numpy as np\n'), ((2740, 2792), 'util.asr.asr_process_on_epoch', 'asr.asr_process_on_epoch', (['EpochYR', 'Epoch2Corr', 'state'], {}), '(EpochYR, Epoch2Corr, state)\n', (2764, 2792), False, 'from util import tools, asr, raw_asrcalibration\n'), ((3337, 3384), 'numpy.linspace', 'np.linspace', (['(-0.2)', 'tmax'], {'num': 'DataClean.shape[2]'}), '(-0.2, tmax, num=DataClean.shape[2])\n', (3348, 3384), True, 'import numpy as np\n'), ((3431, 3478), 'numpy.linspace', 'np.linspace', (['(-0.2)', 'tmax'], {'num': 'DataClean.shape[2]'}), '(-0.2, tmax, num=DataClean.shape[2])\n', (3442, 3478), True, 'import numpy as np\n'), ((4086, 4113), 'numpy.arange', 'np.arange', (['(-0.1)', '(0.4)', '(0.025)'], {}), '(-0.1, 0.4, 0.025)\n', (4095, 4113), True, 'import numpy as np\n'), ((1971, 1996), 'numpy.hstack', 'np.hstack', (['(ixstd, ixdev)'], {}), '((ixstd, ixdev))\n', (1980, 1996), True, 'import numpy as np\n')] |
from abc import abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar
from pyjackson import dumps, loads
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from ebonite.core.objects import DatasetType
from ebonite.core.objects.artifacts import ArtifactCollection
from ebonite.core.objects.core import (Buildable, EvaluationResults, EvaluationSet, Image, Model, Pipeline,
PipelineStep, Project, RuntimeEnvironment, RuntimeInstance, Task)
from ebonite.core.objects.dataset_source import DatasetSource
from ebonite.core.objects.metric import Metric
from ebonite.core.objects.requirements import Requirements
SQL_OBJECT_FIELD = '_sqlalchemy_object'
def json_column():
return Column(Text)
def safe_loads(payload, as_class):
return loads(payload, Optional[as_class])
def sqlobject(obj):
return getattr(obj, SQL_OBJECT_FIELD, None)
def update_attrs(obj, **attrs):
for name, value in attrs.items():
setattr(obj, name, value)
T = TypeVar('T')
S = TypeVar('S', bound='Attaching')
class Attaching:
id = ...
name = ...
def attach(self, obj):
setattr(obj, SQL_OBJECT_FIELD, self)
return obj
@classmethod
def from_obj(cls: Type[S], obj: T, new=False) -> S:
kwargs = cls.get_kwargs(obj)
existing = sqlobject(obj)
if not new and existing is not None:
update_attrs(existing, **kwargs)
return existing
return cls(**kwargs)
@classmethod
@abstractmethod
def get_kwargs(cls, obj: T) -> dict:
pass # pragma: no cover
@abstractmethod
def to_obj(self) -> T:
pass # pragma: no cover
Base = declarative_base()
class SProject(Base, Attaching):
__tablename__ = 'projects'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
tasks: Iterable['STask'] = relationship("STask", back_populates="project")
def to_obj(self) -> Project:
p = Project(self.name, id=self.id, author=self.author, creation_date=self.creation_date)
for task in self.tasks:
p._tasks.add(task.to_obj())
return self.attach(p)
@classmethod
def get_kwargs(cls, project: Project) -> dict:
return dict(id=project.id,
name=project.name,
author=project.author,
creation_date=project.creation_date,
tasks=[STask.from_obj(t) for t in project.tasks.values()])
class STask(Base, Attaching):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
project_id = Column(Integer, ForeignKey('projects.id'), nullable=False)
project = relationship("SProject", back_populates="tasks")
models: Iterable['SModel'] = relationship("SModel", back_populates="task")
pipelines: Iterable['SPipeline'] = relationship("SPipeline", back_populates='task')
images: Iterable['SImage'] = relationship("SImage", back_populates='task')
datasets = Column(Text)
metrics = Column(Text)
evaluation_sets = Column(Text)
__table_args__ = (UniqueConstraint('name', 'project_id', name='tasks_name_and_ref'),)
def to_obj(self) -> Task:
task = Task(id=self.id,
name=self.name,
author=self.author,
creation_date=self.creation_date,
project_id=self.project_id,
datasets=safe_loads(self.datasets, Dict[str, DatasetSource]),
metrics=safe_loads(self.metrics, Dict[str, Metric]),
evaluation_sets=safe_loads(self.evaluation_sets, Dict[str, EvaluationSet]))
for model in self.models:
task._models.add(model.to_obj())
for pipeline in self.pipelines:
task._pipelines.add(pipeline.to_obj())
for image in self.images:
task._images.add(image.to_obj())
return self.attach(task)
@classmethod
def get_kwargs(cls, task: Task) -> dict:
return dict(id=task.id,
name=task.name,
author=task.author,
creation_date=task.creation_date,
project_id=task.project_id,
models=[SModel.from_obj(m) for m in task.models.values()],
images=[SImage.from_obj(i) for i in task.images.values()],
pipelines=[SPipeline.from_obj(p) for p in task.pipelines.values()],
datasets=dumps(task.datasets),
metrics=dumps(task.metrics),
evaluation_sets=dumps(task.evaluation_sets))
class SModel(Base, Attaching):
__tablename__ = 'models'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
wrapper = Column(Text)
artifact = Column(Text)
requirements = Column(Text)
description = Column(Text)
params = Column(Text)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="models")
evaluations = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='models_name_and_ref'),)
def to_obj(self) -> Model:
model = Model(name=self.name,
wrapper_meta=safe_loads(self.wrapper, dict),
author=self.author,
creation_date=self.creation_date,
artifact=safe_loads(self.artifact, ArtifactCollection),
requirements=safe_loads(self.requirements, Requirements),
description=self.description,
params=safe_loads(self.params, Dict[str, Any]),
id=self.id,
task_id=self.task_id,
evaluations=safe_loads(self.evaluations, Dict[str, EvaluationResults]))
return self.attach(model)
@classmethod
def get_kwargs(cls, model: Model) -> dict:
return dict(id=model.id,
name=model.name,
author=model.author,
creation_date=model.creation_date,
wrapper=dumps(model.wrapper_meta),
artifact=dumps(model.artifact),
requirements=dumps(model.requirements),
description=model.description,
params=dumps(model.params),
task_id=model.task_id,
evaluations=dumps(model.evaluations))
class SPipeline(Base, Attaching):
__tablename__ = 'pipelines'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
steps = Column(Text)
input_data = Column(Text)
output_data = Column(Text)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="pipelines")
evaluations = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='pipelines_name_and_ref'),)
def to_obj(self) -> Pipeline:
pipeline = Pipeline(name=self.name,
steps=safe_loads(self.steps, List[PipelineStep]),
input_data=safe_loads(self.input_data, DatasetType),
output_data=safe_loads(self.output_data, DatasetType),
author=self.author,
creation_date=self.creation_date,
id=self.id,
task_id=self.task_id,
evaluations=safe_loads(self.evaluations, EvaluationResults))
return self.attach(pipeline)
@classmethod
def get_kwargs(cls, pipeline: Pipeline) -> dict:
return dict(id=pipeline.id,
name=pipeline.name,
author=pipeline.author,
creation_date=pipeline.creation_date,
steps=dumps(pipeline.steps),
input_data=dumps(pipeline.input_data),
output_data=dumps(pipeline.output_data),
task_id=pipeline.task_id,
evaluations=dumps(pipeline.evaluations))
class SImage(Base, Attaching):
__tablename__ = 'images'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="images")
environment_id = Column(Integer, ForeignKey('environments.id'), nullable=False)
params = Column(Text)
source = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='image_name_and_ref'),)
def to_obj(self) -> Image:
image = Image(name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
task_id=self.task_id,
params=safe_loads(self.params, Image.Params),
source=safe_loads(self.source, Buildable),
environment_id=self.environment_id)
return self.attach(image)
@classmethod
def get_kwargs(cls, image: Image) -> dict:
return dict(id=image.id,
name=image.name,
author=image.author,
creation_date=image.creation_date,
task_id=image.task_id,
params=dumps(image.params),
source=dumps(image.source),
environment_id=image.environment_id)
class SRuntimeEnvironment(Base, Attaching):
__tablename__ = 'environments'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
params = Column(Text)
def to_obj(self) -> RuntimeEnvironment:
environment = RuntimeEnvironment(
name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
params=safe_loads(self.params, RuntimeEnvironment.Params))
return self.attach(environment)
@classmethod
def get_kwargs(cls, environment: RuntimeEnvironment) -> dict:
return dict(id=environment.id,
name=environment.name,
author=environment.author,
creation_date=environment.creation_date,
params=dumps(environment.params))
class SRuntimeInstance(Base, Attaching):
__tablename__ = 'instances'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
image_id = Column(Integer, ForeignKey('images.id'), nullable=False)
environment_id = Column(Integer, ForeignKey('environments.id'), nullable=False)
params = Column(Text)
__table_args__ = (UniqueConstraint('name', 'image_id', 'environment_id', name='instance_name_and_ref'),)
def to_obj(self) -> RuntimeInstance:
instance = RuntimeInstance(
name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
image_id=self.image_id,
environment_id=self.environment_id,
params=safe_loads(self.params, RuntimeInstance.Params))
return self.attach(instance)
@classmethod
def get_kwargs(cls, instance: RuntimeInstance) -> dict:
return dict(id=instance.id,
name=instance.name,
author=instance.author,
creation_date=instance.creation_date,
image_id=instance.image_id,
environment_id=instance.environment_id,
params=dumps(instance.params))
| [
"pyjackson.loads",
"sqlalchemy.ForeignKey",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.relationship",
"ebonite.core.objects.core.Project",
"sqlalchemy.UniqueConstraint",
"sqlalchemy.Column",
"typing.TypeVar",
"pyjackson.dumps"
] | [((1166, 1178), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1173, 1178), False, 'from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar\n'), ((1183, 1214), 'typing.TypeVar', 'TypeVar', (['"""S"""'], {'bound': '"""Attaching"""'}), "('S', bound='Attaching')\n", (1190, 1214), False, 'from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar\n'), ((1848, 1866), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (1864, 1866), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((888, 900), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (894, 900), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((949, 983), 'pyjackson.loads', 'loads', (['payload', 'Optional[as_class]'], {}), '(payload, Optional[as_class])\n', (954, 983), False, 'from pyjackson import dumps, loads\n'), ((1942, 1995), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (1948, 1995), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((2007, 2050), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)', 'nullable': '(False)'}), '(String, unique=True, nullable=False)\n', (2013, 2050), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((2064, 2108), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (2070, 2108), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((2129, 2175), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (2135, 2175), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((2208, 2255), 'sqlalchemy.orm.relationship', 'relationship', (['"""STask"""'], {'back_populates': '"""project"""'}), "('STask', back_populates='project')\n", (2220, 2255), False, 'from sqlalchemy.orm import relationship\n'), ((2881, 2934), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (2887, 2934), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((2946, 2990), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (2952, 2990), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((3004, 3048), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (3010, 3048), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((3069, 3115), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (3075, 3115), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((3207, 3255), 'sqlalchemy.orm.relationship', 'relationship', (['"""SProject"""'], {'back_populates': '"""tasks"""'}), "('SProject', back_populates='tasks')\n", (3219, 3255), False, 'from sqlalchemy.orm import relationship\n'), ((3289, 3334), 'sqlalchemy.orm.relationship', 'relationship', (['"""SModel"""'], {'back_populates': '"""task"""'}), "('SModel', back_populates='task')\n", (3301, 3334), False, 'from sqlalchemy.orm import relationship\n'), ((3374, 3422), 'sqlalchemy.orm.relationship', 'relationship', (['"""SPipeline"""'], {'back_populates': '"""task"""'}), "('SPipeline', back_populates='task')\n", (3386, 3422), False, 'from sqlalchemy.orm import relationship\n'), ((3456, 3501), 'sqlalchemy.orm.relationship', 'relationship', (['"""SImage"""'], {'back_populates': '"""task"""'}), "('SImage', back_populates='task')\n", (3468, 3501), False, 'from sqlalchemy.orm import relationship\n'), ((3518, 3530), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3524, 3530), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((3545, 3557), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3551, 3557), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((3580, 3592), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3586, 3592), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5216, 5269), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (5222, 5269), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5282, 5326), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (5288, 5326), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5340, 5384), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (5346, 5384), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5405, 5451), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (5411, 5451), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5466, 5478), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (5472, 5478), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5495, 5507), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (5501, 5507), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5527, 5539), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (5533, 5539), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5558, 5570), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (5564, 5570), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5584, 5596), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (5590, 5596), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5678, 5724), 'sqlalchemy.orm.relationship', 'relationship', (['"""STask"""'], {'back_populates': '"""models"""'}), "('STask', back_populates='models')\n", (5690, 5724), False, 'from sqlalchemy.orm import relationship\n'), ((5744, 5756), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (5750, 5756), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7242, 7295), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (7248, 7295), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7308, 7352), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (7314, 7352), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7366, 7410), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (7372, 7410), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7431, 7477), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (7437, 7477), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7490, 7502), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (7496, 7502), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7521, 7533), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (7527, 7533), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7552, 7564), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (7558, 7564), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7647, 7696), 'sqlalchemy.orm.relationship', 'relationship', (['"""STask"""'], {'back_populates': '"""pipelines"""'}), "('STask', back_populates='pipelines')\n", (7659, 7696), False, 'from sqlalchemy.orm import relationship\n'), ((7716, 7728), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (7722, 7728), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9064, 9117), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (9070, 9117), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9130, 9174), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (9136, 9174), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9188, 9232), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (9194, 9232), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9253, 9299), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (9259, 9299), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9382, 9428), 'sqlalchemy.orm.relationship', 'relationship', (['"""STask"""'], {'back_populates': '"""images"""'}), "('STask', back_populates='images')\n", (9394, 9428), False, 'from sqlalchemy.orm import relationship\n'), ((9528, 9540), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (9534, 9540), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9554, 9566), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (9560, 9566), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((10644, 10697), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (10650, 10697), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((10710, 10753), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)', 'nullable': '(False)'}), '(String, unique=True, nullable=False)\n', (10716, 10753), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((10767, 10811), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (10773, 10811), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((10832, 10878), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (10838, 10878), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((10893, 10905), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (10899, 10905), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((11647, 11700), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(Integer, primary_key=True, autoincrement=True)\n', (11653, 11700), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((11713, 11757), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (11719, 11757), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((11771, 11815), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(False)', 'nullable': '(False)'}), '(String, unique=False, nullable=False)\n', (11777, 11815), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((11836, 11882), 'sqlalchemy.Column', 'Column', (['DateTime'], {'unique': '(False)', 'nullable': '(False)'}), '(DateTime, unique=False, nullable=False)\n', (11842, 11882), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((12054, 12066), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (12060, 12066), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((2302, 2391), 'ebonite.core.objects.core.Project', 'Project', (['self.name'], {'id': 'self.id', 'author': 'self.author', 'creation_date': 'self.creation_date'}), '(self.name, id=self.id, author=self.author, creation_date=self.\n creation_date)\n', (2309, 2391), False, 'from ebonite.core.objects.core import Buildable, EvaluationResults, EvaluationSet, Image, Model, Pipeline, PipelineStep, Project, RuntimeEnvironment, RuntimeInstance, Task\n'), ((3149, 3174), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""projects.id"""'], {}), "('projects.id')\n", (3159, 3174), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((3616, 3681), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""name"""', '"""project_id"""'], {'name': '"""tasks_name_and_ref"""'}), "('name', 'project_id', name='tasks_name_and_ref')\n", (3632, 3681), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5627, 5649), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""tasks.id"""'], {}), "('tasks.id')\n", (5637, 5649), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5779, 5842), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""name"""', '"""task_id"""'], {'name': '"""models_name_and_ref"""'}), "('name', 'task_id', name='models_name_and_ref')\n", (5795, 5842), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7596, 7618), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""tasks.id"""'], {}), "('tasks.id')\n", (7606, 7618), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((7751, 7817), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""name"""', '"""task_id"""'], {'name': '"""pipelines_name_and_ref"""'}), "('name', 'task_id', name='pipelines_name_and_ref')\n", (7767, 7817), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9331, 9353), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""tasks.id"""'], {}), "('tasks.id')\n", (9341, 9353), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9467, 9496), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""environments.id"""'], {}), "('environments.id')\n", (9477, 9496), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((9590, 9652), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""name"""', '"""task_id"""'], {'name': '"""image_name_and_ref"""'}), "('name', 'task_id', name='image_name_and_ref')\n", (9606, 9652), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((11915, 11938), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""images.id"""'], {}), "('images.id')\n", (11925, 11938), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((11993, 12022), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""environments.id"""'], {}), "('environments.id')\n", (12003, 12022), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((12090, 12179), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""name"""', '"""image_id"""', '"""environment_id"""'], {'name': '"""instance_name_and_ref"""'}), "('name', 'image_id', 'environment_id', name=\n 'instance_name_and_ref')\n", (12106, 12179), False, 'from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint\n'), ((5008, 5028), 'pyjackson.dumps', 'dumps', (['task.datasets'], {}), '(task.datasets)\n', (5013, 5028), False, 'from pyjackson import dumps, loads\n'), ((5058, 5077), 'pyjackson.dumps', 'dumps', (['task.metrics'], {}), '(task.metrics)\n', (5063, 5077), False, 'from pyjackson import dumps, loads\n'), ((5115, 5142), 'pyjackson.dumps', 'dumps', (['task.evaluation_sets'], {}), '(task.evaluation_sets)\n', (5120, 5142), False, 'from pyjackson import dumps, loads\n'), ((6825, 6850), 'pyjackson.dumps', 'dumps', (['model.wrapper_meta'], {}), '(model.wrapper_meta)\n', (6830, 6850), False, 'from pyjackson import dumps, loads\n'), ((6881, 6902), 'pyjackson.dumps', 'dumps', (['model.artifact'], {}), '(model.artifact)\n', (6886, 6902), False, 'from pyjackson import dumps, loads\n'), ((6937, 6962), 'pyjackson.dumps', 'dumps', (['model.requirements'], {}), '(model.requirements)\n', (6942, 6962), False, 'from pyjackson import dumps, loads\n'), ((7042, 7061), 'pyjackson.dumps', 'dumps', (['model.params'], {}), '(model.params)\n', (7047, 7061), False, 'from pyjackson import dumps, loads\n'), ((7138, 7162), 'pyjackson.dumps', 'dumps', (['model.evaluations'], {}), '(model.evaluations)\n', (7143, 7162), False, 'from pyjackson import dumps, loads\n'), ((8742, 8763), 'pyjackson.dumps', 'dumps', (['pipeline.steps'], {}), '(pipeline.steps)\n', (8747, 8763), False, 'from pyjackson import dumps, loads\n'), ((8796, 8822), 'pyjackson.dumps', 'dumps', (['pipeline.input_data'], {}), '(pipeline.input_data)\n', (8801, 8822), False, 'from pyjackson import dumps, loads\n'), ((8856, 8883), 'pyjackson.dumps', 'dumps', (['pipeline.output_data'], {}), '(pipeline.output_data)\n', (8861, 8883), False, 'from pyjackson import dumps, loads\n'), ((8963, 8990), 'pyjackson.dumps', 'dumps', (['pipeline.evaluations'], {}), '(pipeline.evaluations)\n', (8968, 8990), False, 'from pyjackson import dumps, loads\n'), ((10427, 10446), 'pyjackson.dumps', 'dumps', (['image.params'], {}), '(image.params)\n', (10432, 10446), False, 'from pyjackson import dumps, loads\n'), ((10475, 10494), 'pyjackson.dumps', 'dumps', (['image.source'], {}), '(image.source)\n', (10480, 10494), False, 'from pyjackson import dumps, loads\n'), ((11535, 11560), 'pyjackson.dumps', 'dumps', (['environment.params'], {}), '(environment.params)\n', (11540, 11560), False, 'from pyjackson import dumps, loads\n'), ((12965, 12987), 'pyjackson.dumps', 'dumps', (['instance.params'], {}), '(instance.params)\n', (12970, 12987), False, 'from pyjackson import dumps, loads\n')] |
import numpy as np
import string
import re
import nltk
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
class word_inform():
def __init__(self):
self.inform = {}
def wordinput(self):
WI = input('문장을 입력해주세요 : ') # 문장 받아오기. WI = word input.
WI = WI.replace('\n',' ') # 문단에 줄 내림이 있다면, 스페이스바로 바꿔주기
#be = {'am', 'is', 'are', 'be' , 'was', 'were'} # be 동사 저장.
WI = WI.lower()
#WI = WI.replace("i'm",'i am') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he's",'he is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she's",'she is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("that's",'that is') # be동사를 찾아내기 위해, 변환을 해준다
#WI = WI.replace("what's",'what is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("it's",'it is') # be동사를 찾아내기 위해, 변환을 해준다. (is 줄임말 풀어주기.)
#WI = WI.replace("you're",'you are') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they're",'they are') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we're",'we are') # be동사를 찾아내기 위해, 변환을 해준다.
#Auxiliary_verb = {'will','would','can','could','shall','should','may','might','must'}
#WI = WI.replace("i'll",'i will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("you'll",'you will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they'll",'they will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we'll",'we will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he'll",'he will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she'll",'she will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("it'll",'it will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("that'll",'that will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("i'd",'i would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("you'd",'you would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they'd",'they would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we'd",'we would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he'd",'he would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she'd",'she would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = re.sub("[.]{2,}",'',WI) # 마침표 두개이상 없애기
WI = re.sub('[\\w.]+@[\\w.]+',' ',WI)
WI = re.sub("[?!'.]{1,}",'.',WI)
WI = re.sub("[^\w\s'.]+",'',WI) # 특수문자 제거하기 따옴표는 제거하지 않음... >> stop words에 포함된 단어 you'll 같은 거 때문.
WI = re.sub("[.]{1,}",'.',WI)
sentence = WI.strip(string.punctuation).split('.') # 문단에 마침표가 있다면, 문장을 분리해주기. 마지막에 있는 구두점 떼어주기.
sentence_words = [s.split() for s in sentence] # 각각의 문장속에 있는 단어 분리 해주기.
self.inform['sentence_words'] = sentence_words
def word_voc(self,voc):
before_voc_length = len(voc)
sentence_words = self.inform['sentence_words'] # 입력받은 문장 그대로.
for length in range(len(sentence_words)):
for vocab in sentence_words[length]:
if vocab.isdigit() == False: # 숫자가 계속 학습하는 문장에 들어가서 학습 효율이 떨어지는 듯 하다. ( 따라서 숫자는 제외한다.)
if vocab not in stop_words:
if vocab not in voc:
voc.append(vocab)
self.inform['voc'] = voc
after_voc_length = len(voc)
self.inform['voc_length_diff'] = (after_voc_length - before_voc_length)
self.inform['voc_length'] = after_voc_length
word_vector = [[] for i in sentence_words]
word_sentence = [[] for i in sentence_words]
voc_vectors = []
for word in voc:
voc_vector = np.zeros_like(voc, dtype = int)# 단어장 크기의 새로운 벡터를 만든다.
index_of_input_word = voc.index(word)
voc_vector[index_of_input_word] += 1 # 한단어가 단어장의 몇번 index에 있는지를 확인.
voc_vectors.append(voc_vector)
self.inform['voc_vectors'] = voc_vectors
# word_vector >> 입력받은 문장들을 단어별로 구분해 놓은 리스트.
for length in range(len(sentence_words)):
for word in sentence_words[length]:
if word.isdigit() == False: # 숫자가 계속 학습하는 문장에 들어가서 학습 효율이 떨어지는 듯 하다. ( 따라서 숫자는 제외한다.)
if word not in stop_words:
voc_vector = np.zeros_like(voc, dtype = int)# 단어장 크기의 새로운 벡터를 만든다.
index_of_input_word = voc.index(word)
voc_vector[index_of_input_word] += 1 # 한단어가 단어장의 몇번 index에 있는지를 확인.
word_vector[length].append(voc_vector)
word_sentence[length].append(word)
self.inform['sentence_words'] = word_sentence
self.inform['word_vector'] = word_vector
| [
"nltk.download",
"numpy.zeros_like",
"re.sub",
"nltk.corpus.stopwords.words"
] | [((55, 81), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (68, 81), False, 'import nltk\n'), ((95, 133), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (122, 133), False, 'import nltk\n'), ((2335, 2369), 're.sub', 're.sub', (['"""[\\\\w.]+@[\\\\w.]+"""', '""" """', 'WI'], {}), "('[\\\\w.]+@[\\\\w.]+', ' ', WI)\n", (2341, 2369), False, 'import re\n'), ((2381, 2410), 're.sub', 're.sub', (['"""[?!\'.]{1,}"""', '"""."""', 'WI'], {}), '("[?!\'.]{1,}", \'.\', WI)\n', (2387, 2410), False, 'import re\n'), ((2422, 2452), 're.sub', 're.sub', (['"""[^\\\\w\\\\s\'.]+"""', '""""""', 'WI'], {}), '("[^\\\\w\\\\s\'.]+", \'\', WI)\n', (2428, 2452), False, 'import re\n'), ((2530, 2556), 're.sub', 're.sub', (['"""[.]{1,}"""', '"""."""', 'WI'], {}), "('[.]{1,}', '.', WI)\n", (2536, 2556), False, 'import re\n'), ((3745, 3774), 'numpy.zeros_like', 'np.zeros_like', (['voc'], {'dtype': 'int'}), '(voc, dtype=int)\n', (3758, 3774), True, 'import numpy as np\n'), ((4394, 4423), 'numpy.zeros_like', 'np.zeros_like', (['voc'], {'dtype': 'int'}), '(voc, dtype=int)\n', (4407, 4423), True, 'import numpy as np\n')] |
from base import BaseTestCase
from django.contrib.auth.models import User
from redistricting.models import *
class StatisticsSetTestCase(BaseTestCase):
fixtures = [
'redistricting_testdata.json',
'redistricting_testdata_geolevel2.json',
'redistricting_statisticssets.json',
]
def setUp(self):
super(StatisticsSetTestCase, self).setUp()
display = ScoreDisplay.objects.get(title='Demographics')
summary = ScorePanel.objects.get(title='Plan Summary')
demographics = ScorePanel.objects.get(title='Demographics')
display.scorepanel_set.add(summary)
display.scorepanel_set.add(demographics)
functions = ScoreFunction.objects.filter(
name__in=('Voting Age Population',
'Hispanic voting-age population', 'Total Population'))
demographics.score_functions = functions.all()
demographics.save()
self.functions = functions.all()
self.demographics = demographics
self.summary = summary
self.display = display
def tearDown(self):
self.display.delete()
super(StatisticsSetTestCase, self).tearDown()
def test_copy_scoredisplay(self):
user = User(username="Stats User")
user.save()
# We'll set the owner but it's overwritten
copy = ScoreDisplay(owner=user)
copy = copy.copy_from(display=self.display)
self.assertEqual(
"%s copy" % self.display.__unicode__(), copy.__unicode__(),
"ScoreDisplay title copied, allowing same name for user more than once"
)
self.assertEqual(
len(copy.scorepanel_set.all()),
len(self.display.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
self.assertNotEqual(
user, copy.owner,
"ScoreDisplay copied owner rather than copying owner from ScoreDisplay"
)
copy = ScoreDisplay(owner=user)
copy = copy.copy_from(display=self.display, owner=user)
self.assertEqual(self.display.__unicode__(), copy.__unicode__(),
"Title of scoredisplay not copied")
self.assertEqual(
len(copy.scorepanel_set.all()),
len(self.display.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
vap = ScoreFunction.objects.get(name="Voting Age Population")
copy = copy.copy_from(
display=self.display,
functions=[unicode(str(vap.id))],
title="Copied from")
self.assertEqual(
len(copy.scorepanel_set.all()),
len(self.display.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
new_demo = ScoreDisplay.objects.get(title="Copied from")
panels_tested = 0
for panel in new_demo.scorepanel_set.all():
if panel.title == "Plan Summary":
self.assertEqual(
len(self.summary.score_functions.all()),
len(panel.score_functions.all()),
"Copied plan summary panel didn't have correct number of functions"
)
panels_tested += 1
elif panel.title == "Demographics":
self.assertEqual(1, len(
panel.score_functions.all()
), "Copied demographics panel didn't have correct number of functions"
)
panels_tested += 1
self.assertEqual(2, panels_tested,
"Copied scoredisplay didn't have both panels needed")
# Let's try just updating those score functions
new_copy = ScoreDisplay(owner=user)
new_copy = copy.copy_from(display=copy, functions=self.functions)
self.assertEqual(copy.title, new_copy.title,
"Title of scoredisplay not copied")
self.assertEqual(copy.id, new_copy.id,
"Scorefunctions not added to current display")
self.assertEqual(
len(copy.scorepanel_set.all()), len(new_copy.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
panels_tested = 0
for panel in new_copy.scorepanel_set.all():
if panel.title == "Plan Summary":
self.assertEqual(
len(self.summary.score_functions.all()),
len(panel.score_functions.all()),
"Copied plan summary panel didn't have correct number of functions"
)
panels_tested += 1
elif panel.title == "Demographics":
self.assertEqual(
len(self.functions), len(panel.score_functions.all()),
"Copied demographics panel didn't have correct number of functions; e:%d,a:%d"
% (3, len(panel.score_functions.all())))
panels_tested += 1
self.assertEqual(2, panels_tested,
"Copied scoredisplay didn't have both panels needed")
| [
"django.contrib.auth.models.User"
] | [((1242, 1269), 'django.contrib.auth.models.User', 'User', ([], {'username': '"""Stats User"""'}), "(username='Stats User')\n", (1246, 1269), False, 'from django.contrib.auth.models import User\n')] |
import argparse
import serial
import os
parser = argparse.ArgumentParser()
parser.add_argument('image', default='kernel8.img', type=str)
parser.add_argument('device',default='/dev/ttyUSB0', type=str)
args = parser.parse_args()
try:
ser = serial.Serial(args.device,115200)
except:
print("Serial init failed!")
exit(1)
file_path = args.image
file_size = os.stat(file_path).st_size
ser.write(file_size.to_bytes(4, byteorder="big"))
print("Sending kernel...")
with open(file_path, 'rb', buffering = 0) as f:
for i in range(file_size):
ser.write(f.read(1))
print(ser.readline())
print("done") | [
"serial.Serial",
"argparse.ArgumentParser",
"os.stat"
] | [((50, 75), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (73, 75), False, 'import argparse\n'), ((244, 278), 'serial.Serial', 'serial.Serial', (['args.device', '(115200)'], {}), '(args.device, 115200)\n', (257, 278), False, 'import serial\n'), ((367, 385), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (374, 385), False, 'import os\n')] |
#!/usr/bin/env python3.4
import webSocketClient
import motorInterface
import json
axis = ['xLeft', 'yLeft', 'triggerLeft', 'xRight', 'yRight', 'triggerRight']
buttons = ['A', 'B', 'X', 'Y', 'LB', 'RB']
trimUp = {
'center': 0.0
}
# these are in a row
# this motor is IN3/4 on the edge of the motor controller
m1 = motorInterface.motor(16, 26) # vertical
m2 = motorInterface.motor(12, 13) # unused
# 2nd chip
m3 = motorInterface.motor(27, 23) # side (maybe left)
m4 = motorInterface.motor(4, 18) # side (maybe right)
def move1(pow):
m1.set(pow)
def move2(pow):
m2.set(pow)
def move3(pow):
m3.set(pow)
def move4(pow):
m4.set(pow)
justPressed = [
{
'A': False,
'B': False,
'X': False,
'Y': False,
'LB': False,
'RB': False
},
{
'A': False,
'B': False,
'X': False,
'Y': False,
'LB': False,
'RB': False
}
]
def buttonPressed(button, num):
global trimUp
# num is 0 or 1
if num == 1: # controller number 2
if button == 'LB':
trimUp['center'] += 1
elif button == 'RB':
trimUp['center'] -= 1
def process(data):
joysticks = json.loads(data)
assert len(joysticks) == 24
joystick1 = dict(zip(axis + buttons, joysticks[:12]))
joystick2 = dict(zip(axis + buttons, joysticks[12:]))
old = [] # for debugging
#print('msg:', joysticks)
del data
global justPressed
stickNum = 0
for stick, jPressed in zip((joystick1, joystick2), justPressed):
for k in stick:
if k not in buttons:
continue
v = stick[k]
if v == 1 and not jPressed[k]:
# just pressed
buttonPressed(k, stickNum)
jPressed[k] = True
elif v == 0 and jPressed[k]:
# just released
jPressed[k] = False
elif v not in [1, 0]:
raise ValueError('Got {0}, expected 0 or 1'.format(v))
else:
pass
stickNum += 1
del stickNum
yLeft = 50 * joystick2['yLeft']
#xLeft = 50 * joystick2['xLeft']
yRight = 50 * joystick2['yRight']
#xRight = 50 * joystick2['xRight']
joystick2['triggerRight'] = (joystick2['triggerRight'] + 1) / 2
joystick2['triggerLeft'] = (joystick2['triggerLeft'] + 1) / 2
vertical = 0
if joystick2['triggerRight'] >= 0.1 and joystick2['triggerLeft'] >= 0.1:
pass # do nothing cause both are pressed
else:
if joystick2['triggerRight'] > 0.1:
# spin right
vertical = joystick2['triggerRight'] * 50
if joystick2['triggerLeft'] > 0.1:
# spin left
vertical = -joystick2['triggerLeft'] * 50
# Mini-ROV motor setup
# top view
# ____
# | |
# /a\| |/b\
# |____|
# (up)
#
motor_a = yLeft
motor_b = yRight
global trimUp
motor_up = trimUp['center'] + vertical
def bounds(x):
# max power is -100 to 100
if x < -50:
return -50
if x > 50:
return 50
return round(x, 2)
motor_a = bounds(motor_a)
motor_b = bounds(motor_b)
motor_up = bounds(motor_up)
# right
move1(motor_up)
move4(motor_a)
move3(motor_b)
# print datalist
for i in range(30):
print('\r\033[A\033[K', end='')
print('Trim: {0}'.format(trimUp['center']))
print(joystick1)
print(joystick2)
print(motor_a, motor_b)
print(motor_up)
print()
index = 0
for i in old:
print(index, i)
index += 1
webSocketClient.start('miniROV', process, ip="192.168.1.2")
| [
"motorInterface.motor",
"webSocketClient.start",
"json.loads"
] | [((321, 349), 'motorInterface.motor', 'motorInterface.motor', (['(16)', '(26)'], {}), '(16, 26)\n', (341, 349), False, 'import motorInterface\n'), ((366, 394), 'motorInterface.motor', 'motorInterface.motor', (['(12)', '(13)'], {}), '(12, 13)\n', (386, 394), False, 'import motorInterface\n'), ((421, 449), 'motorInterface.motor', 'motorInterface.motor', (['(27)', '(23)'], {}), '(27, 23)\n', (441, 449), False, 'import motorInterface\n'), ((475, 502), 'motorInterface.motor', 'motorInterface.motor', (['(4)', '(18)'], {}), '(4, 18)\n', (495, 502), False, 'import motorInterface\n'), ((3695, 3754), 'webSocketClient.start', 'webSocketClient.start', (['"""miniROV"""', 'process'], {'ip': '"""192.168.1.2"""'}), "('miniROV', process, ip='192.168.1.2')\n", (3716, 3754), False, 'import webSocketClient\n'), ((1213, 1229), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1223, 1229), False, 'import json\n')] |
from apminsight import constants
from apminsight.util import is_non_empty_string
from apminsight.agentfactory import get_agent
from .wrapper import default_wrapper
class CursorProxy():
def __init__(self, cursor, conn):
self._apm_wrap_cursor = cursor
self._apm_wrap_conn = conn
self._apm_check_and_wrap('execute')
self._apm_check_and_wrap('executemany')
def __getattr__(self, key):
if key in self.__dict__:
return getattr(self, key)
return getattr(self._apm_wrap_cursor, key)
def __setattr__(self, key, value):
if( key in ['_apm_wrap_cursor', '_apm_wrap_conn', 'execute', 'executemany']):
self.__dict__[key] = value
else:
return setattr(self._apm_wrap_conn, key, value)
def _apm_check_and_wrap(self, attr):
if hasattr(self._apm_wrap_cursor, attr):
actual = getattr(self._apm_wrap_cursor, attr)
attr_info = {
constants.method_str : attr,
constants.component_str : self._apm_wrap_conn._apm_comp_name,
constants.extract_info : self._apm_extract_query,
constants.is_db_tracker : True
}
wrapper = default_wrapper(actual, 'Cursor', attr_info)
setattr(self, attr, wrapper)
def _apm_extract_query(self, tracker, args=(), kwargs={}, return_value=None):
tracker.set_info(self._apm_wrap_conn._apm_host_info)
threshold = get_agent().get_threshold()
if threshold.is_sql_capture_enabled() is not True:
return
if isinstance(args, (list, tuple)) and len(args)>0:
if is_non_empty_string(args[0]):
tracker.set_info({'query' : args[0]})
class ConnectionProxy():
def __init__(self, conn, comp, host_info):
self._apm_wrap_conn = conn
self._apm_comp_name = comp
self._apm_host_info = host_info
def cursor(self, *args, **kwargs):
real_cursor = self._apm_wrap_conn.cursor(*args, **kwargs)
cur = CursorProxy(real_cursor, self)
return cur
def __getattr__(self, key):
if key in self.__dict__:
return getattr(self, key)
return getattr(self._apm_wrap_conn, key)
def __setattr__(self, key, value):
if( key in ['_apm_wrap_conn', '_apm_comp_name', '_apm_host_info']):
self.__dict__[key] = value
else:
return setattr(self._apm_wrap_conn, key, value)
@staticmethod
def get_host_info(method_info, conn_kwargs):
host_info = {}
if constants.host in conn_kwargs:
host_info[constants.host] = conn_kwargs[constants.host]
elif constants.default_host in method_info:
host_info[constants.host] = conn_kwargs[constants.host]
if constants.port in conn_kwargs:
host_info[constants.port] = str(conn_kwargs[constants.port])
elif constants.default_port in method_info:
host_info[constants.port] = method_info[constants.default_port]
return host_info
@staticmethod
def instrument_conn(original, module, method_info):
def conn_wrapper(*args, **kwargs):
conn = original(*args, **kwargs)
if conn is not None:
comp = method_info.get(constants.component_str, '')
host_info = ConnectionProxy.get_host_info(method_info, kwargs)
new_conn = ConnectionProxy(conn, comp, host_info)
return new_conn
return conn
return conn_wrapper
| [
"apminsight.agentfactory.get_agent",
"apminsight.util.is_non_empty_string"
] | [((1679, 1707), 'apminsight.util.is_non_empty_string', 'is_non_empty_string', (['args[0]'], {}), '(args[0])\n', (1698, 1707), False, 'from apminsight.util import is_non_empty_string\n'), ((1497, 1508), 'apminsight.agentfactory.get_agent', 'get_agent', ([], {}), '()\n', (1506, 1508), False, 'from apminsight.agentfactory import get_agent\n')] |
from weatherman import weather_butler
import pytest
import datetime # noqa
import sqlite3 # noqa
import yaml
import json # noqa
import os
import unittest
mock = unittest.mock.Mock()
master_copnfig = 'etc/weatherman.yml'
with open(master_copnfig) as ycf:
config = yaml.load(ycf, Loader=yaml.FullLoader)
environment = os.environ.get('ENVIRONMENT')
@pytest.fixture(scope="function")
def setup_wb():
wb = weather_butler.WeatherButler('db/weatherman_unit')
return wb
# def test_get_response():
# def test_format_request_city_id_list():
# def test_format_response():
# def test_poll():
| [
"yaml.load",
"unittest.mock.Mock",
"pytest.fixture",
"os.environ.get",
"weatherman.weather_butler.WeatherButler"
] | [((161, 181), 'unittest.mock.Mock', 'unittest.mock.Mock', ([], {}), '()\n', (179, 181), False, 'import unittest\n'), ((321, 350), 'os.environ.get', 'os.environ.get', (['"""ENVIRONMENT"""'], {}), "('ENVIRONMENT')\n", (335, 350), False, 'import os\n'), ((353, 385), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (367, 385), False, 'import pytest\n'), ((268, 306), 'yaml.load', 'yaml.load', (['ycf'], {'Loader': 'yaml.FullLoader'}), '(ycf, Loader=yaml.FullLoader)\n', (277, 306), False, 'import yaml\n'), ((411, 461), 'weatherman.weather_butler.WeatherButler', 'weather_butler.WeatherButler', (['"""db/weatherman_unit"""'], {}), "('db/weatherman_unit')\n", (439, 461), False, 'from weatherman import weather_butler\n')] |
from mean import mean
import pytest
def test_ints():
num_list = [1, 2, 3, 4, 5]
obs = mean(num_list)
assert obs == 3
def test_not_numbers():
values = [2, "lolcats"]
with pytest.raises(TypeError):
out = mean(values)
def test_zero():
num_list = [0, 2, 4, 6]
assert mean(num_list) == 3
def test_empty():
assert mean([]) == 0
def test_single_int():
with pytest.raises(TypeError):
mean(1)
| [
"pytest.raises",
"mean.mean"
] | [((96, 110), 'mean.mean', 'mean', (['num_list'], {}), '(num_list)\n', (100, 110), False, 'from mean import mean\n'), ((194, 218), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (207, 218), False, 'import pytest\n'), ((234, 246), 'mean.mean', 'mean', (['values'], {}), '(values)\n', (238, 246), False, 'from mean import mean\n'), ((304, 318), 'mean.mean', 'mean', (['num_list'], {}), '(num_list)\n', (308, 318), False, 'from mean import mean\n'), ((354, 362), 'mean.mean', 'mean', (['[]'], {}), '([])\n', (358, 362), False, 'from mean import mean\n'), ((401, 425), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (414, 425), False, 'import pytest\n'), ((435, 442), 'mean.mean', 'mean', (['(1)'], {}), '(1)\n', (439, 442), False, 'from mean import mean\n')] |
# -*- coding: utf-8 -*-
"""Console script for gitlab_migration."""
import os
import sys
import click
from gitlab_migration import gitlab_migration as glm
@click.group()
def cli():
pass
@cli.group()
def projects():
"""Commands for migrating projects."""
return 0
@projects.command()
@click.argument('csv', type=click.File('r'))
@click.argument('target_base_url', type=click.STRING)
@click.argument('target_token', type=click.STRING)
def from_csv(csv, target_base_url, target_token):
'''
read in repos to action from a csv, migrate to target_base_url
csv must contain two columns: source url in the first, and target base url in the second.
target base url MUST be fleshed out. eg: `https://gitlab.example.com/` or `<EMAIL>:`
target_token must be an API-level private token valid on the target server
'''
for line in csv.readlines():
old_url, target_group = [string.strip() for string in line.split(',')]
click.echo(f"working on {old_url}...")
glm.migrate_repo(old_url, target_base_url, target_group, target_token)
@projects.command()
@click.argument('csv', type=click.File('w'))
@click.argument('gitlab_url', type=click.STRING)
@click.argument('token', type=click.STRING)
def to_csv(csv, gitlab_url, token):
'''
get the SSH url for all projects (except archived projects) and write them to a (single-column) csv.
WARNING: this will silently overwrite the specified file if it already exists
'''
click.echo(f"Fetching all project SSH URLs from {gitlab_url}...")
csv.writelines([f"{url},\n" for url in glm.get_project_urls(gitlab_url, token)])
click.echo("Done.")
@projects.command()
@click.argument('path', type=click.STRING)
@click.argument('new_base_url', type=click.STRING)
@click.argument('old_base_url', type=click.STRING)
@click.argument('target_group', type=click.STRING)
@click.option('set_as_origin', '--set-as-origin/--set-as-new', default=True)
def update_local(path, new_base_url, old_base_url, target_group, set_as_origin):
for child_path in os.listdir(path):
if os.path.isdir(child_path) and os.path.isdir(f"{child_path}/.git"):
glm.update_local_repo(child_path, old_base_url, new_base_url, target_group, set_as_origin)
@cli.group()
def variables():
"""Commands for migrating group variables."""
return 0
@variables.command()
@click.option('src_group', '--source-group', default=None, type=click.STRING, help="Leave blank to migrate vars from all groups")
@click.argument('target_group', type=click.STRING)
@click.argument('src_gitlab_url', type=click.STRING)
@click.argument('target_gitlab_url', type=click.STRING)
@click.argument('src_token', type=click.STRING)
@click.argument('target_token', type=click.STRING)
def migrate(src_group, target_group, src_gitlab_url, target_gitlab_url, src_token, target_token):
'''
migrate group variables from 1+ groups on one host to a single group on another host
'''
if src_group:
src_group_id = glm._get_namespace_id(src_gitlab_url, src_group, src_token)
else:
src_group_id = None
target_group_id = glm._get_namespace_id(target_gitlab_url, target_group, target_token)
for var in glm.get_group_vars(src_gitlab_url, src_token, src_group_id):
glm.create_group_var(target_gitlab_url, target_token, var, target_group_id)
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover | [
"gitlab_migration.gitlab_migration.get_project_urls",
"click.argument",
"os.path.isdir",
"gitlab_migration.gitlab_migration.update_local_repo",
"click.option",
"click.echo",
"click.File",
"gitlab_migration.gitlab_migration._get_namespace_id",
"gitlab_migration.gitlab_migration.migrate_repo",
"gitlab_migration.gitlab_migration.create_group_var",
"click.group",
"gitlab_migration.gitlab_migration.get_group_vars",
"os.listdir"
] | [((159, 172), 'click.group', 'click.group', ([], {}), '()\n', (170, 172), False, 'import click\n'), ((346, 398), 'click.argument', 'click.argument', (['"""target_base_url"""'], {'type': 'click.STRING'}), "('target_base_url', type=click.STRING)\n", (360, 398), False, 'import click\n'), ((400, 449), 'click.argument', 'click.argument', (['"""target_token"""'], {'type': 'click.STRING'}), "('target_token', type=click.STRING)\n", (414, 449), False, 'import click\n'), ((1151, 1198), 'click.argument', 'click.argument', (['"""gitlab_url"""'], {'type': 'click.STRING'}), "('gitlab_url', type=click.STRING)\n", (1165, 1198), False, 'import click\n'), ((1200, 1242), 'click.argument', 'click.argument', (['"""token"""'], {'type': 'click.STRING'}), "('token', type=click.STRING)\n", (1214, 1242), False, 'import click\n'), ((1684, 1725), 'click.argument', 'click.argument', (['"""path"""'], {'type': 'click.STRING'}), "('path', type=click.STRING)\n", (1698, 1725), False, 'import click\n'), ((1727, 1776), 'click.argument', 'click.argument', (['"""new_base_url"""'], {'type': 'click.STRING'}), "('new_base_url', type=click.STRING)\n", (1741, 1776), False, 'import click\n'), ((1778, 1827), 'click.argument', 'click.argument', (['"""old_base_url"""'], {'type': 'click.STRING'}), "('old_base_url', type=click.STRING)\n", (1792, 1827), False, 'import click\n'), ((1829, 1878), 'click.argument', 'click.argument', (['"""target_group"""'], {'type': 'click.STRING'}), "('target_group', type=click.STRING)\n", (1843, 1878), False, 'import click\n'), ((1880, 1955), 'click.option', 'click.option', (['"""set_as_origin"""', '"""--set-as-origin/--set-as-new"""'], {'default': '(True)'}), "('set_as_origin', '--set-as-origin/--set-as-new', default=True)\n", (1892, 1955), False, 'import click\n'), ((2377, 2509), 'click.option', 'click.option', (['"""src_group"""', '"""--source-group"""'], {'default': 'None', 'type': 'click.STRING', 'help': '"""Leave blank to migrate vars from all groups"""'}), "('src_group', '--source-group', default=None, type=click.STRING,\n help='Leave blank to migrate vars from all groups')\n", (2389, 2509), False, 'import click\n'), ((2507, 2556), 'click.argument', 'click.argument', (['"""target_group"""'], {'type': 'click.STRING'}), "('target_group', type=click.STRING)\n", (2521, 2556), False, 'import click\n'), ((2558, 2609), 'click.argument', 'click.argument', (['"""src_gitlab_url"""'], {'type': 'click.STRING'}), "('src_gitlab_url', type=click.STRING)\n", (2572, 2609), False, 'import click\n'), ((2611, 2665), 'click.argument', 'click.argument', (['"""target_gitlab_url"""'], {'type': 'click.STRING'}), "('target_gitlab_url', type=click.STRING)\n", (2625, 2665), False, 'import click\n'), ((2667, 2713), 'click.argument', 'click.argument', (['"""src_token"""'], {'type': 'click.STRING'}), "('src_token', type=click.STRING)\n", (2681, 2713), False, 'import click\n'), ((2715, 2764), 'click.argument', 'click.argument', (['"""target_token"""'], {'type': 'click.STRING'}), "('target_token', type=click.STRING)\n", (2729, 2764), False, 'import click\n'), ((1487, 1552), 'click.echo', 'click.echo', (['f"""Fetching all project SSH URLs from {gitlab_url}..."""'], {}), "(f'Fetching all project SSH URLs from {gitlab_url}...')\n", (1497, 1552), False, 'import click\n'), ((1642, 1661), 'click.echo', 'click.echo', (['"""Done."""'], {}), "('Done.')\n", (1652, 1661), False, 'import click\n'), ((2059, 2075), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2069, 2075), False, 'import os\n'), ((3131, 3199), 'gitlab_migration.gitlab_migration._get_namespace_id', 'glm._get_namespace_id', (['target_gitlab_url', 'target_group', 'target_token'], {}), '(target_gitlab_url, target_group, target_token)\n', (3152, 3199), True, 'from gitlab_migration import gitlab_migration as glm\n'), ((3216, 3275), 'gitlab_migration.gitlab_migration.get_group_vars', 'glm.get_group_vars', (['src_gitlab_url', 'src_token', 'src_group_id'], {}), '(src_gitlab_url, src_token, src_group_id)\n', (3234, 3275), True, 'from gitlab_migration import gitlab_migration as glm\n'), ((966, 1004), 'click.echo', 'click.echo', (['f"""working on {old_url}..."""'], {}), "(f'working on {old_url}...')\n", (976, 1004), False, 'import click\n'), ((1013, 1083), 'gitlab_migration.gitlab_migration.migrate_repo', 'glm.migrate_repo', (['old_url', 'target_base_url', 'target_group', 'target_token'], {}), '(old_url, target_base_url, target_group, target_token)\n', (1029, 1083), True, 'from gitlab_migration import gitlab_migration as glm\n'), ((328, 343), 'click.File', 'click.File', (['"""r"""'], {}), "('r')\n", (338, 343), False, 'import click\n'), ((1133, 1148), 'click.File', 'click.File', (['"""w"""'], {}), "('w')\n", (1143, 1148), False, 'import click\n'), ((3009, 3068), 'gitlab_migration.gitlab_migration._get_namespace_id', 'glm._get_namespace_id', (['src_gitlab_url', 'src_group', 'src_token'], {}), '(src_gitlab_url, src_group, src_token)\n', (3030, 3068), True, 'from gitlab_migration import gitlab_migration as glm\n'), ((3285, 3360), 'gitlab_migration.gitlab_migration.create_group_var', 'glm.create_group_var', (['target_gitlab_url', 'target_token', 'var', 'target_group_id'], {}), '(target_gitlab_url, target_token, var, target_group_id)\n', (3305, 3360), True, 'from gitlab_migration import gitlab_migration as glm\n'), ((2088, 2113), 'os.path.isdir', 'os.path.isdir', (['child_path'], {}), '(child_path)\n', (2101, 2113), False, 'import os\n'), ((2118, 2153), 'os.path.isdir', 'os.path.isdir', (['f"""{child_path}/.git"""'], {}), "(f'{child_path}/.git')\n", (2131, 2153), False, 'import os\n'), ((2167, 2261), 'gitlab_migration.gitlab_migration.update_local_repo', 'glm.update_local_repo', (['child_path', 'old_base_url', 'new_base_url', 'target_group', 'set_as_origin'], {}), '(child_path, old_base_url, new_base_url, target_group,\n set_as_origin)\n', (2188, 2261), True, 'from gitlab_migration import gitlab_migration as glm\n'), ((1596, 1635), 'gitlab_migration.gitlab_migration.get_project_urls', 'glm.get_project_urls', (['gitlab_url', 'token'], {}), '(gitlab_url, token)\n', (1616, 1635), True, 'from gitlab_migration import gitlab_migration as glm\n')] |
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
import tensorflow as tf
def add_layer(inputs, in_size, out_size, activation_function=None, ):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
return outputs
# define placeholder for inputs to network
# add output layer
# the error between prediction and real data
sess = tf.Session()
# important step
sess.run(tf.initialize_all_variables())
for i in range(1000):
pass
if i % 50 == 0:
pass
| [
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.random_normal",
"tensorflow.initialize_all_variables"
] | [((719, 731), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (729, 731), True, 'import tensorflow as tf\n'), ((758, 787), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (785, 787), True, 'import tensorflow as tf\n'), ((304, 341), 'tensorflow.random_normal', 'tf.random_normal', (['[in_size, out_size]'], {}), '([in_size, out_size])\n', (320, 341), True, 'import tensorflow as tf\n'), ((417, 443), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'Weights'], {}), '(inputs, Weights)\n', (426, 443), True, 'import tensorflow as tf\n'), ((368, 391), 'tensorflow.zeros', 'tf.zeros', (['[1, out_size]'], {}), '([1, out_size])\n', (376, 391), True, 'import tensorflow as tf\n')] |
import unittest
from ClassCollection import ClassCollection
# Todo
# Check if the classes exist in the classCollection (helper?)
# Check if relationship already exists (helper?)
# if it does, error
# if not, add parameter pair to the relationshipCollection
class RelationshipTest(unittest.TestCase):
def testAddRelationshipNoFirstClass(self):
collection = ClassCollection()
collection.addClass("foo")
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
def testAddRelationshipNoSecondClass(self):
collection = ClassCollection()
collection.addClass("bar")
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
def testAddRelationshipNeitherClassExist(self):
collection = ClassCollection()
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
# Adding a relationship that already exists
def testAddRelationshipAlreadyExists(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("bar", "foo", "aggregation")
self.assertRaises(KeyError, collection.addRelationship, "bar", "foo", "aggregation")
def testRelationshipAddedSuccesfully(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("foo", "bar", "realization")
self.assertIsNotNone(collection.getRelationship("foo", "bar"))
def testDeleteRelationshipNoFirstClass(self):
collection = ClassCollection()
collection.addClass("foo")
self.assertRaises(KeyError, collection.deleteRelationship, "bar", "foo")
def testDeleteRelationshipNoSecondClass(self):
collection = ClassCollection()
collection.addClass("bar")
self.assertRaises(KeyError, collection.deleteRelationship, "bar", "foo")
def testDeleteRelationshipNeitherClassExist(self):
collection = ClassCollection()
self.assertRaises(KeyError, collection.deleteRelationship, "bar", "foo")
def testRelationshipDeletedSuccesfully(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("foo", "bar", "inheritance")
collection.deleteRelationship("foo", "bar")
self.assertNotIn(("foo", "bar"), collection.relationshipDict)
self.assertRaises(KeyError, collection.deleteRelationship, "foo", "bar")
def testRenameRelationship(self):
collection = ClassCollection()
collection.addClass("foo")
collection.addClass("bar")
collection.addRelationship("foo", "bar", "inheritance")
collection.renameRelationship("foo", "bar", "composition")
self.assertEquals("composition",collection.relationshipDict[("foo", "bar")].typ)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"ClassCollection.ClassCollection"
] | [((3005, 3020), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3018, 3020), False, 'import unittest\n'), ((370, 387), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (385, 387), False, 'from ClassCollection import ClassCollection\n'), ((586, 603), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (601, 603), False, 'from ClassCollection import ClassCollection\n'), ((810, 827), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (825, 827), False, 'from ClassCollection import ClassCollection\n'), ((1039, 1056), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (1054, 1056), False, 'from ClassCollection import ClassCollection\n'), ((1354, 1371), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (1369, 1371), False, 'from ClassCollection import ClassCollection\n'), ((1649, 1666), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (1664, 1666), False, 'from ClassCollection import ClassCollection\n'), ((1856, 1873), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (1871, 1873), False, 'from ClassCollection import ClassCollection\n'), ((2071, 2088), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (2086, 2088), False, 'from ClassCollection import ClassCollection\n'), ((2242, 2259), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (2257, 2259), False, 'from ClassCollection import ClassCollection\n'), ((2657, 2674), 'ClassCollection.ClassCollection', 'ClassCollection', ([], {}), '()\n', (2672, 2674), False, 'from ClassCollection import ClassCollection\n')] |
from mmgroup.mm_space import MMSpace, MMV, MMVector
from mmgroup.mm_space import characteristics
from mmgroup.structures.mm0_group import MM0Group
from mmgroup.tests.spaces.sparse_mm_space import SparseMmSpace
from mmgroup.tests.spaces.sparse_mm_space import SparseMmV
from mmgroup.tests.groups.mgroup_n import MGroupN
#print("module mmgroup.tests.spaces.spaces is deprecated!!")
spaces_dict = {}
g = MM0Group()
ref_g = MGroupN()
class TestSpace:
def __init__(self, p):
self.p = p
self.space = MMV(p)
self.ref_space = SparseMmV(p)
self.group = g
self.ref_group = ref_g
def __call__(self, *args):
return self.space(*args)
def MMTestSpace(p):
global spaces_dict
try:
return spaces_dict[p]
except KeyError:
spaces_dict[p] = TestSpace(p)
return spaces_dict[p]
| [
"mmgroup.mm_space.MMV",
"mmgroup.tests.groups.mgroup_n.MGroupN",
"mmgroup.structures.mm0_group.MM0Group",
"mmgroup.tests.spaces.sparse_mm_space.SparseMmV"
] | [((407, 417), 'mmgroup.structures.mm0_group.MM0Group', 'MM0Group', ([], {}), '()\n', (415, 417), False, 'from mmgroup.structures.mm0_group import MM0Group\n'), ((426, 435), 'mmgroup.tests.groups.mgroup_n.MGroupN', 'MGroupN', ([], {}), '()\n', (433, 435), False, 'from mmgroup.tests.groups.mgroup_n import MGroupN\n'), ((522, 528), 'mmgroup.mm_space.MMV', 'MMV', (['p'], {}), '(p)\n', (525, 528), False, 'from mmgroup.mm_space import MMSpace, MMV, MMVector\n'), ((555, 567), 'mmgroup.tests.spaces.sparse_mm_space.SparseMmV', 'SparseMmV', (['p'], {}), '(p)\n', (564, 567), False, 'from mmgroup.tests.spaces.sparse_mm_space import SparseMmV\n')] |
"""Advanced Collection of Data: Collects all the walls of height 10"""
__author__ = '<NAME>'
import Autodesk.Revit.DB as DB
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
height_param_id = DB.ElementId(DB.BuiltInParameter.WALL_USER_HEIGHT_PARAM)
height_param_prov = DB.ParameterValueProvider(height_param_id)
param_equality = DB.FilterNumericEquals()
heigh_value_rule = DB.FilterDoubleRule(height_param_prov,
param_equality,
10.0,
1E-6)
param_filter = DB.ElementParameterFilter(heigh_value_rule)
walls = DB.FilteredElementCollector(doc) \
.WherePasses(param_filter) \
.ToElementIds()
uidoc.Selection.SetElementIds(walls) | [
"Autodesk.Revit.DB.FilterNumericEquals",
"Autodesk.Revit.DB.FilteredElementCollector",
"Autodesk.Revit.DB.ElementId",
"Autodesk.Revit.DB.FilterDoubleRule",
"Autodesk.Revit.DB.ElementParameterFilter",
"Autodesk.Revit.DB.ParameterValueProvider"
] | [((223, 279), 'Autodesk.Revit.DB.ElementId', 'DB.ElementId', (['DB.BuiltInParameter.WALL_USER_HEIGHT_PARAM'], {}), '(DB.BuiltInParameter.WALL_USER_HEIGHT_PARAM)\n', (235, 279), True, 'import Autodesk.Revit.DB as DB\n'), ((301, 343), 'Autodesk.Revit.DB.ParameterValueProvider', 'DB.ParameterValueProvider', (['height_param_id'], {}), '(height_param_id)\n', (326, 343), True, 'import Autodesk.Revit.DB as DB\n'), ((362, 386), 'Autodesk.Revit.DB.FilterNumericEquals', 'DB.FilterNumericEquals', ([], {}), '()\n', (384, 386), True, 'import Autodesk.Revit.DB as DB\n'), ((407, 474), 'Autodesk.Revit.DB.FilterDoubleRule', 'DB.FilterDoubleRule', (['height_param_prov', 'param_equality', '(10.0)', '(1e-06)'], {}), '(height_param_prov, param_equality, 10.0, 1e-06)\n', (426, 474), True, 'import Autodesk.Revit.DB as DB\n'), ((607, 650), 'Autodesk.Revit.DB.ElementParameterFilter', 'DB.ElementParameterFilter', (['heigh_value_rule'], {}), '(heigh_value_rule)\n', (632, 650), True, 'import Autodesk.Revit.DB as DB\n'), ((661, 693), 'Autodesk.Revit.DB.FilteredElementCollector', 'DB.FilteredElementCollector', (['doc'], {}), '(doc)\n', (688, 693), True, 'import Autodesk.Revit.DB as DB\n')] |
from big_fiubrother_core import QueueTask
from big_fiubrother_core.db import Database, VideoChunk
from big_fiubrother_core.storage import raw_storage
from big_fiubrother_core.synchronization import ProcessSynchronizer
from os import path
import logging
class StoreVideoChunk(QueueTask):
def __init__(self, configuration, input_queue, output_queue):
super().__init__(input_queue)
self.output_queue = output_queue
self.configuration = configuration
def init(self):
self.db = Database(self.configuration['db'])
self.storage = raw_storage(self.configuration['storage'])
self.process_synchronizer = ProcessSynchronizer(
self.configuration['synchronization'])
def execute_with(self, message):
video_chunk = VideoChunk(camera_id=message.camera_id,
timestamp=message.timestamp)
self.db.add(video_chunk)
logging.info(f"{video_chunk.id} created in DB. Sampling starting!")
self.process_synchronizer.register_video_task(str(video_chunk.id))
filepath = path.join('tmp', '{}.h264'.format(video_chunk.id))
with open(filepath, 'wb') as file:
file.write(message.payload)
self.storage.store_file(str(video_chunk.id), filepath)
self.output_queue.put({
'video_chunk_id': video_chunk.id,
'path': filepath
})
def close(self):
self.db.close()
self.process_synchronizer.close()
| [
"big_fiubrother_core.synchronization.ProcessSynchronizer",
"big_fiubrother_core.db.VideoChunk",
"logging.info",
"big_fiubrother_core.storage.raw_storage",
"big_fiubrother_core.db.Database"
] | [((517, 551), 'big_fiubrother_core.db.Database', 'Database', (["self.configuration['db']"], {}), "(self.configuration['db'])\n", (525, 551), False, 'from big_fiubrother_core.db import Database, VideoChunk\n'), ((575, 617), 'big_fiubrother_core.storage.raw_storage', 'raw_storage', (["self.configuration['storage']"], {}), "(self.configuration['storage'])\n", (586, 617), False, 'from big_fiubrother_core.storage import raw_storage\n'), ((654, 712), 'big_fiubrother_core.synchronization.ProcessSynchronizer', 'ProcessSynchronizer', (["self.configuration['synchronization']"], {}), "(self.configuration['synchronization'])\n", (673, 712), False, 'from big_fiubrother_core.synchronization import ProcessSynchronizer\n'), ((786, 854), 'big_fiubrother_core.db.VideoChunk', 'VideoChunk', ([], {'camera_id': 'message.camera_id', 'timestamp': 'message.timestamp'}), '(camera_id=message.camera_id, timestamp=message.timestamp)\n', (796, 854), False, 'from big_fiubrother_core.db import Database, VideoChunk\n'), ((931, 998), 'logging.info', 'logging.info', (['f"""{video_chunk.id} created in DB. Sampling starting!"""'], {}), "(f'{video_chunk.id} created in DB. Sampling starting!')\n", (943, 998), False, 'import logging\n')] |
from random import randint
for i in range(5):
n = (randint(0,10))
if(i == 0):
m = n
M = n
if (n > M):
M = n
elif (n < m):
m = n
print(n, end=' ')
print(f'\nO maior número foi {M}\nE o Menor foi {m}') | [
"random.randint"
] | [((55, 69), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (62, 69), False, 'from random import randint\n')] |
# apispec-swaggerinherit - Plugin for apispec adding support for Swagger-style
# inheritance using `allOf`
# Copyright (C) 2018 <NAME>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from apispec.ext.marshmallow import swagger
from marshmallow import Schema
std_bases = [Schema]
try:
from marshmallow_oneofschema import OneOfSchema
std_bases.append(OneOfSchema)
except ImportError:
pass
def swaggerinherit_definition_helper(spec, name, schema, definition, **kwargs):
"""Definition helper that modifies the schema definition to make use of
swagger-style inheritance using `allOf`. Uses the `schema` parameter.
"""
parents = [b for b in schema.__bases__ if b not in std_bases]
if not parents:
return
ref_path = swagger.get_ref_path(spec.openapi_version.version[0])
try:
refs = ['#/{}/{}'.format(ref_path,
spec.plugins['apispec.ext.marshmallow']['refs'][schema_cls])
for schema_cls in parents]
except KeyError:
raise ValueError("Parent schemas must be added to the spec before the "
"child schema")
child_def = definition.copy()
for parent in parents:
for name in parent._declared_fields.keys():
del child_def['properties'][name]
try:
child_def['required'].remove(name)
except ValueError:
pass
if not child_def['required']:
del child_def['required']
definition.clear()
return {
'allOf': [{'$ref': ref} for ref in refs] + [child_def]
}
def setup(spec):
spec.register_definition_helper(swaggerinherit_definition_helper)
| [
"apispec.ext.marshmallow.swagger.get_ref_path"
] | [((1390, 1443), 'apispec.ext.marshmallow.swagger.get_ref_path', 'swagger.get_ref_path', (['spec.openapi_version.version[0]'], {}), '(spec.openapi_version.version[0])\n', (1410, 1443), False, 'from apispec.ext.marshmallow import swagger\n')] |
import pytest
from aao.spiders import spiders
pytestmark = pytest.mark.sports
class TestSport():
"""Nothing to test. """
pass
class TestSoccer():
"""Test the Soccer ABC across all bookmakers. """
@pytest.fixture(scope='class', params=spiders.values())
def spider(self, request):
s = request.param()
yield s
s.quit()
competitions = {
'england': 'premier_league',
'italy': 'serie_a',
'spain': 'la_liga',
}
def test_countries(self, spider):
countries = spider.soccer.countries()
assert set(self.competitions.keys()) <= set(countries)
assert isinstance(countries, list)
def test_countries_full(self, spider):
countries = spider.soccer.countries(full=True)
assert set(self.competitions.keys()) <= set(countries.keys())
assert isinstance(countries, dict)
@pytest.mark.parametrize('country', competitions.keys())
def test_leagues(self, spider, country):
leagues = spider.soccer.leagues(country)
assert self.competitions[country] in leagues
assert isinstance(leagues, list)
@pytest.mark.parametrize('country', competitions.keys())
def test_leagues_full(self, spider, country):
leagues = spider.soccer.leagues(country, full=True)
assert self.competitions[country] in leagues.keys()
assert isinstance(leagues, dict)
def test_league_not_supported(self, spider):
country = 'foo_country'
with pytest.raises(KeyError, match=f'{country} is not supported *'):
spider.soccer.leagues(country)
@pytest.mark.parametrize('country,league', competitions.items())
def test_teams(self, spider, country, league):
teams = spider.soccer.teams(country, league)
assert isinstance(teams, list)
@pytest.mark.parametrize('country,league', competitions.items())
def test_teams_full(self, spider, country, league):
teams = spider.soccer.teams(country, league, full=True)
assert isinstance(teams, dict)
def test_teams_not_supported(self, spider):
country, league = 'serie_a', 'foo_league'
with pytest.raises(KeyError, match=f'{league} is not supported *'):
spider.soccer.teams(country, league)
@pytest.mark.parametrize('country,league', competitions.items())
def test_setattr_competiton(self, spider, country, league):
spider.soccer._setattr_competiton(country, league)
assert spider.soccer._country
assert spider.soccer.country
assert spider.soccer._league
assert spider.soccer.league
@pytest.mark.parametrize(
'country,league', [next(iter(competitions.items()))])
def test_events(self, spider, country, league):
events = spider.soccer.events(country, league)
assert isinstance(events, list)
assert events
@pytest.mark.parametrize(
'country,league', [next(iter(competitions.items()))])
def test_odds(self, spider, country, league):
events, odds = spider.soccer.odds(country, league)
assert isinstance(events, list)
assert events
assert isinstance(odds, list)
assert odds
| [
"pytest.raises",
"aao.spiders.spiders.values"
] | [((258, 274), 'aao.spiders.spiders.values', 'spiders.values', ([], {}), '()\n', (272, 274), False, 'from aao.spiders import spiders\n'), ((1510, 1572), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': 'f"""{country} is not supported *"""'}), "(KeyError, match=f'{country} is not supported *')\n", (1523, 1572), False, 'import pytest\n'), ((2171, 2232), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': 'f"""{league} is not supported *"""'}), "(KeyError, match=f'{league} is not supported *')\n", (2184, 2232), False, 'import pytest\n')] |
from bs_dateutil.easter import easter
from bs_dateutil.easter import EASTER_WESTERN, EASTER_ORTHODOX, EASTER_JULIAN
from datetime import date
import pytest
# List of easters between 1990 and 2050
western_easter_dates = [
date(1990, 4, 15),
date(1991, 3, 31),
date(1992, 4, 19),
date(1993, 4, 11),
date(1994, 4, 3),
date(1995, 4, 16),
date(1996, 4, 7),
date(1997, 3, 30),
date(1998, 4, 12),
date(1999, 4, 4),
date(2000, 4, 23),
date(2001, 4, 15),
date(2002, 3, 31),
date(2003, 4, 20),
date(2004, 4, 11),
date(2005, 3, 27),
date(2006, 4, 16),
date(2007, 4, 8),
date(2008, 3, 23),
date(2009, 4, 12),
date(2010, 4, 4),
date(2011, 4, 24),
date(2012, 4, 8),
date(2013, 3, 31),
date(2014, 4, 20),
date(2015, 4, 5),
date(2016, 3, 27),
date(2017, 4, 16),
date(2018, 4, 1),
date(2019, 4, 21),
date(2020, 4, 12),
date(2021, 4, 4),
date(2022, 4, 17),
date(2023, 4, 9),
date(2024, 3, 31),
date(2025, 4, 20),
date(2026, 4, 5),
date(2027, 3, 28),
date(2028, 4, 16),
date(2029, 4, 1),
date(2030, 4, 21),
date(2031, 4, 13),
date(2032, 3, 28),
date(2033, 4, 17),
date(2034, 4, 9),
date(2035, 3, 25),
date(2036, 4, 13),
date(2037, 4, 5),
date(2038, 4, 25),
date(2039, 4, 10),
date(2040, 4, 1),
date(2041, 4, 21),
date(2042, 4, 6),
date(2043, 3, 29),
date(2044, 4, 17),
date(2045, 4, 9),
date(2046, 3, 25),
date(2047, 4, 14),
date(2048, 4, 5),
date(2049, 4, 18),
date(2050, 4, 10),
]
orthodox_easter_dates = [
date(1990, 4, 15),
date(1991, 4, 7),
date(1992, 4, 26),
date(1993, 4, 18),
date(1994, 5, 1),
date(1995, 4, 23),
date(1996, 4, 14),
date(1997, 4, 27),
date(1998, 4, 19),
date(1999, 4, 11),
date(2000, 4, 30),
date(2001, 4, 15),
date(2002, 5, 5),
date(2003, 4, 27),
date(2004, 4, 11),
date(2005, 5, 1),
date(2006, 4, 23),
date(2007, 4, 8),
date(2008, 4, 27),
date(2009, 4, 19),
date(2010, 4, 4),
date(2011, 4, 24),
date(2012, 4, 15),
date(2013, 5, 5),
date(2014, 4, 20),
date(2015, 4, 12),
date(2016, 5, 1),
date(2017, 4, 16),
date(2018, 4, 8),
date(2019, 4, 28),
date(2020, 4, 19),
date(2021, 5, 2),
date(2022, 4, 24),
date(2023, 4, 16),
date(2024, 5, 5),
date(2025, 4, 20),
date(2026, 4, 12),
date(2027, 5, 2),
date(2028, 4, 16),
date(2029, 4, 8),
date(2030, 4, 28),
date(2031, 4, 13),
date(2032, 5, 2),
date(2033, 4, 24),
date(2034, 4, 9),
date(2035, 4, 29),
date(2036, 4, 20),
date(2037, 4, 5),
date(2038, 4, 25),
date(2039, 4, 17),
date(2040, 5, 6),
date(2041, 4, 21),
date(2042, 4, 13),
date(2043, 5, 3),
date(2044, 4, 24),
date(2045, 4, 9),
date(2046, 4, 29),
date(2047, 4, 21),
date(2048, 4, 5),
date(2049, 4, 25),
date(2050, 4, 17),
]
# A random smattering of Julian dates.
# Pulled values from http://www.kevinlaughery.com/east4099.html
julian_easter_dates = [
date(326, 4, 3),
date(375, 4, 5),
date(492, 4, 5),
date(552, 3, 31),
date(562, 4, 9),
date(569, 4, 21),
date(597, 4, 14),
date(621, 4, 19),
date(636, 3, 31),
date(655, 3, 29),
date(700, 4, 11),
date(725, 4, 8),
date(750, 3, 29),
date(782, 4, 7),
date(835, 4, 18),
date(849, 4, 14),
date(867, 3, 30),
date(890, 4, 12),
date(922, 4, 21),
date(934, 4, 6),
date(1049, 3, 26),
date(1058, 4, 19),
date(1113, 4, 6),
date(1119, 3, 30),
date(1242, 4, 20),
date(1255, 3, 28),
date(1257, 4, 8),
date(1258, 3, 24),
date(1261, 4, 24),
date(1278, 4, 17),
date(1333, 4, 4),
date(1351, 4, 17),
date(1371, 4, 6),
date(1391, 3, 26),
date(1402, 3, 26),
date(1412, 4, 3),
date(1439, 4, 5),
date(1445, 3, 28),
date(1531, 4, 9),
date(1555, 4, 14),
]
@pytest.mark.parametrize("easter_date", western_easter_dates)
def test_easter_western(easter_date):
assert easter_date == easter(easter_date.year, EASTER_WESTERN)
@pytest.mark.parametrize("easter_date", orthodox_easter_dates)
def test_easter_orthodox(easter_date):
assert easter_date == easter(easter_date.year, EASTER_ORTHODOX)
@pytest.mark.parametrize("easter_date", julian_easter_dates)
def test_easter_julian(easter_date):
assert easter_date == easter(easter_date.year, EASTER_JULIAN)
def test_easter_bad_method():
with pytest.raises(ValueError):
easter(1975, 4)
| [
"pytest.mark.parametrize",
"pytest.raises",
"datetime.date",
"bs_dateutil.easter.easter"
] | [((4041, 4101), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""easter_date"""', 'western_easter_dates'], {}), "('easter_date', western_easter_dates)\n", (4064, 4101), False, 'import pytest\n'), ((4210, 4271), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""easter_date"""', 'orthodox_easter_dates'], {}), "('easter_date', orthodox_easter_dates)\n", (4233, 4271), False, 'import pytest\n'), ((4382, 4441), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""easter_date"""', 'julian_easter_dates'], {}), "('easter_date', julian_easter_dates)\n", (4405, 4441), False, 'import pytest\n'), ((227, 244), 'datetime.date', 'date', (['(1990)', '(4)', '(15)'], {}), '(1990, 4, 15)\n', (231, 244), False, 'from datetime import date\n'), ((250, 267), 'datetime.date', 'date', (['(1991)', '(3)', '(31)'], {}), '(1991, 3, 31)\n', (254, 267), False, 'from datetime import date\n'), ((273, 290), 'datetime.date', 'date', (['(1992)', '(4)', '(19)'], {}), '(1992, 4, 19)\n', (277, 290), False, 'from datetime import date\n'), ((296, 313), 'datetime.date', 'date', (['(1993)', '(4)', '(11)'], {}), '(1993, 4, 11)\n', (300, 313), False, 'from datetime import date\n'), ((319, 335), 'datetime.date', 'date', (['(1994)', '(4)', '(3)'], {}), '(1994, 4, 3)\n', (323, 335), False, 'from datetime import date\n'), ((341, 358), 'datetime.date', 'date', (['(1995)', '(4)', '(16)'], {}), '(1995, 4, 16)\n', (345, 358), False, 'from datetime import date\n'), ((364, 380), 'datetime.date', 'date', (['(1996)', '(4)', '(7)'], {}), '(1996, 4, 7)\n', (368, 380), False, 'from datetime import date\n'), ((386, 403), 'datetime.date', 'date', (['(1997)', '(3)', '(30)'], {}), '(1997, 3, 30)\n', (390, 403), False, 'from datetime import date\n'), ((409, 426), 'datetime.date', 'date', (['(1998)', '(4)', '(12)'], {}), '(1998, 4, 12)\n', (413, 426), False, 'from datetime import date\n'), ((432, 448), 'datetime.date', 'date', (['(1999)', '(4)', '(4)'], {}), '(1999, 4, 4)\n', (436, 448), False, 'from datetime import date\n'), ((454, 471), 'datetime.date', 'date', (['(2000)', '(4)', '(23)'], {}), '(2000, 4, 23)\n', (458, 471), False, 'from datetime import date\n'), ((477, 494), 'datetime.date', 'date', (['(2001)', '(4)', '(15)'], {}), '(2001, 4, 15)\n', (481, 494), False, 'from datetime import date\n'), ((500, 517), 'datetime.date', 'date', (['(2002)', '(3)', '(31)'], {}), '(2002, 3, 31)\n', (504, 517), False, 'from datetime import date\n'), ((523, 540), 'datetime.date', 'date', (['(2003)', '(4)', '(20)'], {}), '(2003, 4, 20)\n', (527, 540), False, 'from datetime import date\n'), ((546, 563), 'datetime.date', 'date', (['(2004)', '(4)', '(11)'], {}), '(2004, 4, 11)\n', (550, 563), False, 'from datetime import date\n'), ((569, 586), 'datetime.date', 'date', (['(2005)', '(3)', '(27)'], {}), '(2005, 3, 27)\n', (573, 586), False, 'from datetime import date\n'), ((592, 609), 'datetime.date', 'date', (['(2006)', '(4)', '(16)'], {}), '(2006, 4, 16)\n', (596, 609), False, 'from datetime import date\n'), ((615, 631), 'datetime.date', 'date', (['(2007)', '(4)', '(8)'], {}), '(2007, 4, 8)\n', (619, 631), False, 'from datetime import date\n'), ((637, 654), 'datetime.date', 'date', (['(2008)', '(3)', '(23)'], {}), '(2008, 3, 23)\n', (641, 654), False, 'from datetime import date\n'), ((660, 677), 'datetime.date', 'date', (['(2009)', '(4)', '(12)'], {}), '(2009, 4, 12)\n', (664, 677), False, 'from datetime import date\n'), ((683, 699), 'datetime.date', 'date', (['(2010)', '(4)', '(4)'], {}), '(2010, 4, 4)\n', (687, 699), False, 'from datetime import date\n'), ((705, 722), 'datetime.date', 'date', (['(2011)', '(4)', '(24)'], {}), '(2011, 4, 24)\n', (709, 722), False, 'from datetime import date\n'), ((728, 744), 'datetime.date', 'date', (['(2012)', '(4)', '(8)'], {}), '(2012, 4, 8)\n', (732, 744), False, 'from datetime import date\n'), ((750, 767), 'datetime.date', 'date', (['(2013)', '(3)', '(31)'], {}), '(2013, 3, 31)\n', (754, 767), False, 'from datetime import date\n'), ((773, 790), 'datetime.date', 'date', (['(2014)', '(4)', '(20)'], {}), '(2014, 4, 20)\n', (777, 790), False, 'from datetime import date\n'), ((796, 812), 'datetime.date', 'date', (['(2015)', '(4)', '(5)'], {}), '(2015, 4, 5)\n', (800, 812), False, 'from datetime import date\n'), ((818, 835), 'datetime.date', 'date', (['(2016)', '(3)', '(27)'], {}), '(2016, 3, 27)\n', (822, 835), False, 'from datetime import date\n'), ((841, 858), 'datetime.date', 'date', (['(2017)', '(4)', '(16)'], {}), '(2017, 4, 16)\n', (845, 858), False, 'from datetime import date\n'), ((864, 880), 'datetime.date', 'date', (['(2018)', '(4)', '(1)'], {}), '(2018, 4, 1)\n', (868, 880), False, 'from datetime import date\n'), ((886, 903), 'datetime.date', 'date', (['(2019)', '(4)', '(21)'], {}), '(2019, 4, 21)\n', (890, 903), False, 'from datetime import date\n'), ((909, 926), 'datetime.date', 'date', (['(2020)', '(4)', '(12)'], {}), '(2020, 4, 12)\n', (913, 926), False, 'from datetime import date\n'), ((932, 948), 'datetime.date', 'date', (['(2021)', '(4)', '(4)'], {}), '(2021, 4, 4)\n', (936, 948), False, 'from datetime import date\n'), ((954, 971), 'datetime.date', 'date', (['(2022)', '(4)', '(17)'], {}), '(2022, 4, 17)\n', (958, 971), False, 'from datetime import date\n'), ((977, 993), 'datetime.date', 'date', (['(2023)', '(4)', '(9)'], {}), '(2023, 4, 9)\n', (981, 993), False, 'from datetime import date\n'), ((999, 1016), 'datetime.date', 'date', (['(2024)', '(3)', '(31)'], {}), '(2024, 3, 31)\n', (1003, 1016), False, 'from datetime import date\n'), ((1022, 1039), 'datetime.date', 'date', (['(2025)', '(4)', '(20)'], {}), '(2025, 4, 20)\n', (1026, 1039), False, 'from datetime import date\n'), ((1045, 1061), 'datetime.date', 'date', (['(2026)', '(4)', '(5)'], {}), '(2026, 4, 5)\n', (1049, 1061), False, 'from datetime import date\n'), ((1067, 1084), 'datetime.date', 'date', (['(2027)', '(3)', '(28)'], {}), '(2027, 3, 28)\n', (1071, 1084), False, 'from datetime import date\n'), ((1090, 1107), 'datetime.date', 'date', (['(2028)', '(4)', '(16)'], {}), '(2028, 4, 16)\n', (1094, 1107), False, 'from datetime import date\n'), ((1113, 1129), 'datetime.date', 'date', (['(2029)', '(4)', '(1)'], {}), '(2029, 4, 1)\n', (1117, 1129), False, 'from datetime import date\n'), ((1135, 1152), 'datetime.date', 'date', (['(2030)', '(4)', '(21)'], {}), '(2030, 4, 21)\n', (1139, 1152), False, 'from datetime import date\n'), ((1158, 1175), 'datetime.date', 'date', (['(2031)', '(4)', '(13)'], {}), '(2031, 4, 13)\n', (1162, 1175), False, 'from datetime import date\n'), ((1181, 1198), 'datetime.date', 'date', (['(2032)', '(3)', '(28)'], {}), '(2032, 3, 28)\n', (1185, 1198), False, 'from datetime import date\n'), ((1204, 1221), 'datetime.date', 'date', (['(2033)', '(4)', '(17)'], {}), '(2033, 4, 17)\n', (1208, 1221), False, 'from datetime import date\n'), ((1227, 1243), 'datetime.date', 'date', (['(2034)', '(4)', '(9)'], {}), '(2034, 4, 9)\n', (1231, 1243), False, 'from datetime import date\n'), ((1249, 1266), 'datetime.date', 'date', (['(2035)', '(3)', '(25)'], {}), '(2035, 3, 25)\n', (1253, 1266), False, 'from datetime import date\n'), ((1272, 1289), 'datetime.date', 'date', (['(2036)', '(4)', '(13)'], {}), '(2036, 4, 13)\n', (1276, 1289), False, 'from datetime import date\n'), ((1295, 1311), 'datetime.date', 'date', (['(2037)', '(4)', '(5)'], {}), '(2037, 4, 5)\n', (1299, 1311), False, 'from datetime import date\n'), ((1317, 1334), 'datetime.date', 'date', (['(2038)', '(4)', '(25)'], {}), '(2038, 4, 25)\n', (1321, 1334), False, 'from datetime import date\n'), ((1340, 1357), 'datetime.date', 'date', (['(2039)', '(4)', '(10)'], {}), '(2039, 4, 10)\n', (1344, 1357), False, 'from datetime import date\n'), ((1363, 1379), 'datetime.date', 'date', (['(2040)', '(4)', '(1)'], {}), '(2040, 4, 1)\n', (1367, 1379), False, 'from datetime import date\n'), ((1385, 1402), 'datetime.date', 'date', (['(2041)', '(4)', '(21)'], {}), '(2041, 4, 21)\n', (1389, 1402), False, 'from datetime import date\n'), ((1408, 1424), 'datetime.date', 'date', (['(2042)', '(4)', '(6)'], {}), '(2042, 4, 6)\n', (1412, 1424), False, 'from datetime import date\n'), ((1430, 1447), 'datetime.date', 'date', (['(2043)', '(3)', '(29)'], {}), '(2043, 3, 29)\n', (1434, 1447), False, 'from datetime import date\n'), ((1453, 1470), 'datetime.date', 'date', (['(2044)', '(4)', '(17)'], {}), '(2044, 4, 17)\n', (1457, 1470), False, 'from datetime import date\n'), ((1476, 1492), 'datetime.date', 'date', (['(2045)', '(4)', '(9)'], {}), '(2045, 4, 9)\n', (1480, 1492), False, 'from datetime import date\n'), ((1498, 1515), 'datetime.date', 'date', (['(2046)', '(3)', '(25)'], {}), '(2046, 3, 25)\n', (1502, 1515), False, 'from datetime import date\n'), ((1521, 1538), 'datetime.date', 'date', (['(2047)', '(4)', '(14)'], {}), '(2047, 4, 14)\n', (1525, 1538), False, 'from datetime import date\n'), ((1544, 1560), 'datetime.date', 'date', (['(2048)', '(4)', '(5)'], {}), '(2048, 4, 5)\n', (1548, 1560), False, 'from datetime import date\n'), ((1566, 1583), 'datetime.date', 'date', (['(2049)', '(4)', '(18)'], {}), '(2049, 4, 18)\n', (1570, 1583), False, 'from datetime import date\n'), ((1589, 1606), 'datetime.date', 'date', (['(2050)', '(4)', '(10)'], {}), '(2050, 4, 10)\n', (1593, 1606), False, 'from datetime import date\n'), ((1641, 1658), 'datetime.date', 'date', (['(1990)', '(4)', '(15)'], {}), '(1990, 4, 15)\n', (1645, 1658), False, 'from datetime import date\n'), ((1664, 1680), 'datetime.date', 'date', (['(1991)', '(4)', '(7)'], {}), '(1991, 4, 7)\n', (1668, 1680), False, 'from datetime import date\n'), ((1686, 1703), 'datetime.date', 'date', (['(1992)', '(4)', '(26)'], {}), '(1992, 4, 26)\n', (1690, 1703), False, 'from datetime import date\n'), ((1709, 1726), 'datetime.date', 'date', (['(1993)', '(4)', '(18)'], {}), '(1993, 4, 18)\n', (1713, 1726), False, 'from datetime import date\n'), ((1732, 1748), 'datetime.date', 'date', (['(1994)', '(5)', '(1)'], {}), '(1994, 5, 1)\n', (1736, 1748), False, 'from datetime import date\n'), ((1754, 1771), 'datetime.date', 'date', (['(1995)', '(4)', '(23)'], {}), '(1995, 4, 23)\n', (1758, 1771), False, 'from datetime import date\n'), ((1777, 1794), 'datetime.date', 'date', (['(1996)', '(4)', '(14)'], {}), '(1996, 4, 14)\n', (1781, 1794), False, 'from datetime import date\n'), ((1800, 1817), 'datetime.date', 'date', (['(1997)', '(4)', '(27)'], {}), '(1997, 4, 27)\n', (1804, 1817), False, 'from datetime import date\n'), ((1823, 1840), 'datetime.date', 'date', (['(1998)', '(4)', '(19)'], {}), '(1998, 4, 19)\n', (1827, 1840), False, 'from datetime import date\n'), ((1846, 1863), 'datetime.date', 'date', (['(1999)', '(4)', '(11)'], {}), '(1999, 4, 11)\n', (1850, 1863), False, 'from datetime import date\n'), ((1869, 1886), 'datetime.date', 'date', (['(2000)', '(4)', '(30)'], {}), '(2000, 4, 30)\n', (1873, 1886), False, 'from datetime import date\n'), ((1892, 1909), 'datetime.date', 'date', (['(2001)', '(4)', '(15)'], {}), '(2001, 4, 15)\n', (1896, 1909), False, 'from datetime import date\n'), ((1915, 1931), 'datetime.date', 'date', (['(2002)', '(5)', '(5)'], {}), '(2002, 5, 5)\n', (1919, 1931), False, 'from datetime import date\n'), ((1937, 1954), 'datetime.date', 'date', (['(2003)', '(4)', '(27)'], {}), '(2003, 4, 27)\n', (1941, 1954), False, 'from datetime import date\n'), ((1960, 1977), 'datetime.date', 'date', (['(2004)', '(4)', '(11)'], {}), '(2004, 4, 11)\n', (1964, 1977), False, 'from datetime import date\n'), ((1983, 1999), 'datetime.date', 'date', (['(2005)', '(5)', '(1)'], {}), '(2005, 5, 1)\n', (1987, 1999), False, 'from datetime import date\n'), ((2005, 2022), 'datetime.date', 'date', (['(2006)', '(4)', '(23)'], {}), '(2006, 4, 23)\n', (2009, 2022), False, 'from datetime import date\n'), ((2028, 2044), 'datetime.date', 'date', (['(2007)', '(4)', '(8)'], {}), '(2007, 4, 8)\n', (2032, 2044), False, 'from datetime import date\n'), ((2050, 2067), 'datetime.date', 'date', (['(2008)', '(4)', '(27)'], {}), '(2008, 4, 27)\n', (2054, 2067), False, 'from datetime import date\n'), ((2073, 2090), 'datetime.date', 'date', (['(2009)', '(4)', '(19)'], {}), '(2009, 4, 19)\n', (2077, 2090), False, 'from datetime import date\n'), ((2096, 2112), 'datetime.date', 'date', (['(2010)', '(4)', '(4)'], {}), '(2010, 4, 4)\n', (2100, 2112), False, 'from datetime import date\n'), ((2118, 2135), 'datetime.date', 'date', (['(2011)', '(4)', '(24)'], {}), '(2011, 4, 24)\n', (2122, 2135), False, 'from datetime import date\n'), ((2141, 2158), 'datetime.date', 'date', (['(2012)', '(4)', '(15)'], {}), '(2012, 4, 15)\n', (2145, 2158), False, 'from datetime import date\n'), ((2164, 2180), 'datetime.date', 'date', (['(2013)', '(5)', '(5)'], {}), '(2013, 5, 5)\n', (2168, 2180), False, 'from datetime import date\n'), ((2186, 2203), 'datetime.date', 'date', (['(2014)', '(4)', '(20)'], {}), '(2014, 4, 20)\n', (2190, 2203), False, 'from datetime import date\n'), ((2209, 2226), 'datetime.date', 'date', (['(2015)', '(4)', '(12)'], {}), '(2015, 4, 12)\n', (2213, 2226), False, 'from datetime import date\n'), ((2232, 2248), 'datetime.date', 'date', (['(2016)', '(5)', '(1)'], {}), '(2016, 5, 1)\n', (2236, 2248), False, 'from datetime import date\n'), ((2254, 2271), 'datetime.date', 'date', (['(2017)', '(4)', '(16)'], {}), '(2017, 4, 16)\n', (2258, 2271), False, 'from datetime import date\n'), ((2277, 2293), 'datetime.date', 'date', (['(2018)', '(4)', '(8)'], {}), '(2018, 4, 8)\n', (2281, 2293), False, 'from datetime import date\n'), ((2299, 2316), 'datetime.date', 'date', (['(2019)', '(4)', '(28)'], {}), '(2019, 4, 28)\n', (2303, 2316), False, 'from datetime import date\n'), ((2322, 2339), 'datetime.date', 'date', (['(2020)', '(4)', '(19)'], {}), '(2020, 4, 19)\n', (2326, 2339), False, 'from datetime import date\n'), ((2345, 2361), 'datetime.date', 'date', (['(2021)', '(5)', '(2)'], {}), '(2021, 5, 2)\n', (2349, 2361), False, 'from datetime import date\n'), ((2367, 2384), 'datetime.date', 'date', (['(2022)', '(4)', '(24)'], {}), '(2022, 4, 24)\n', (2371, 2384), False, 'from datetime import date\n'), ((2390, 2407), 'datetime.date', 'date', (['(2023)', '(4)', '(16)'], {}), '(2023, 4, 16)\n', (2394, 2407), False, 'from datetime import date\n'), ((2413, 2429), 'datetime.date', 'date', (['(2024)', '(5)', '(5)'], {}), '(2024, 5, 5)\n', (2417, 2429), False, 'from datetime import date\n'), ((2435, 2452), 'datetime.date', 'date', (['(2025)', '(4)', '(20)'], {}), '(2025, 4, 20)\n', (2439, 2452), False, 'from datetime import date\n'), ((2458, 2475), 'datetime.date', 'date', (['(2026)', '(4)', '(12)'], {}), '(2026, 4, 12)\n', (2462, 2475), False, 'from datetime import date\n'), ((2481, 2497), 'datetime.date', 'date', (['(2027)', '(5)', '(2)'], {}), '(2027, 5, 2)\n', (2485, 2497), False, 'from datetime import date\n'), ((2503, 2520), 'datetime.date', 'date', (['(2028)', '(4)', '(16)'], {}), '(2028, 4, 16)\n', (2507, 2520), False, 'from datetime import date\n'), ((2526, 2542), 'datetime.date', 'date', (['(2029)', '(4)', '(8)'], {}), '(2029, 4, 8)\n', (2530, 2542), False, 'from datetime import date\n'), ((2548, 2565), 'datetime.date', 'date', (['(2030)', '(4)', '(28)'], {}), '(2030, 4, 28)\n', (2552, 2565), False, 'from datetime import date\n'), ((2571, 2588), 'datetime.date', 'date', (['(2031)', '(4)', '(13)'], {}), '(2031, 4, 13)\n', (2575, 2588), False, 'from datetime import date\n'), ((2594, 2610), 'datetime.date', 'date', (['(2032)', '(5)', '(2)'], {}), '(2032, 5, 2)\n', (2598, 2610), False, 'from datetime import date\n'), ((2616, 2633), 'datetime.date', 'date', (['(2033)', '(4)', '(24)'], {}), '(2033, 4, 24)\n', (2620, 2633), False, 'from datetime import date\n'), ((2639, 2655), 'datetime.date', 'date', (['(2034)', '(4)', '(9)'], {}), '(2034, 4, 9)\n', (2643, 2655), False, 'from datetime import date\n'), ((2661, 2678), 'datetime.date', 'date', (['(2035)', '(4)', '(29)'], {}), '(2035, 4, 29)\n', (2665, 2678), False, 'from datetime import date\n'), ((2684, 2701), 'datetime.date', 'date', (['(2036)', '(4)', '(20)'], {}), '(2036, 4, 20)\n', (2688, 2701), False, 'from datetime import date\n'), ((2707, 2723), 'datetime.date', 'date', (['(2037)', '(4)', '(5)'], {}), '(2037, 4, 5)\n', (2711, 2723), False, 'from datetime import date\n'), ((2729, 2746), 'datetime.date', 'date', (['(2038)', '(4)', '(25)'], {}), '(2038, 4, 25)\n', (2733, 2746), False, 'from datetime import date\n'), ((2752, 2769), 'datetime.date', 'date', (['(2039)', '(4)', '(17)'], {}), '(2039, 4, 17)\n', (2756, 2769), False, 'from datetime import date\n'), ((2775, 2791), 'datetime.date', 'date', (['(2040)', '(5)', '(6)'], {}), '(2040, 5, 6)\n', (2779, 2791), False, 'from datetime import date\n'), ((2797, 2814), 'datetime.date', 'date', (['(2041)', '(4)', '(21)'], {}), '(2041, 4, 21)\n', (2801, 2814), False, 'from datetime import date\n'), ((2820, 2837), 'datetime.date', 'date', (['(2042)', '(4)', '(13)'], {}), '(2042, 4, 13)\n', (2824, 2837), False, 'from datetime import date\n'), ((2843, 2859), 'datetime.date', 'date', (['(2043)', '(5)', '(3)'], {}), '(2043, 5, 3)\n', (2847, 2859), False, 'from datetime import date\n'), ((2865, 2882), 'datetime.date', 'date', (['(2044)', '(4)', '(24)'], {}), '(2044, 4, 24)\n', (2869, 2882), False, 'from datetime import date\n'), ((2888, 2904), 'datetime.date', 'date', (['(2045)', '(4)', '(9)'], {}), '(2045, 4, 9)\n', (2892, 2904), False, 'from datetime import date\n'), ((2910, 2927), 'datetime.date', 'date', (['(2046)', '(4)', '(29)'], {}), '(2046, 4, 29)\n', (2914, 2927), False, 'from datetime import date\n'), ((2933, 2950), 'datetime.date', 'date', (['(2047)', '(4)', '(21)'], {}), '(2047, 4, 21)\n', (2937, 2950), False, 'from datetime import date\n'), ((2956, 2972), 'datetime.date', 'date', (['(2048)', '(4)', '(5)'], {}), '(2048, 4, 5)\n', (2960, 2972), False, 'from datetime import date\n'), ((2978, 2995), 'datetime.date', 'date', (['(2049)', '(4)', '(25)'], {}), '(2049, 4, 25)\n', (2982, 2995), False, 'from datetime import date\n'), ((3001, 3018), 'datetime.date', 'date', (['(2050)', '(4)', '(17)'], {}), '(2050, 4, 17)\n', (3005, 3018), False, 'from datetime import date\n'), ((3154, 3169), 'datetime.date', 'date', (['(326)', '(4)', '(3)'], {}), '(326, 4, 3)\n', (3158, 3169), False, 'from datetime import date\n'), ((3175, 3190), 'datetime.date', 'date', (['(375)', '(4)', '(5)'], {}), '(375, 4, 5)\n', (3179, 3190), False, 'from datetime import date\n'), ((3196, 3211), 'datetime.date', 'date', (['(492)', '(4)', '(5)'], {}), '(492, 4, 5)\n', (3200, 3211), False, 'from datetime import date\n'), ((3217, 3233), 'datetime.date', 'date', (['(552)', '(3)', '(31)'], {}), '(552, 3, 31)\n', (3221, 3233), False, 'from datetime import date\n'), ((3239, 3254), 'datetime.date', 'date', (['(562)', '(4)', '(9)'], {}), '(562, 4, 9)\n', (3243, 3254), False, 'from datetime import date\n'), ((3260, 3276), 'datetime.date', 'date', (['(569)', '(4)', '(21)'], {}), '(569, 4, 21)\n', (3264, 3276), False, 'from datetime import date\n'), ((3282, 3298), 'datetime.date', 'date', (['(597)', '(4)', '(14)'], {}), '(597, 4, 14)\n', (3286, 3298), False, 'from datetime import date\n'), ((3304, 3320), 'datetime.date', 'date', (['(621)', '(4)', '(19)'], {}), '(621, 4, 19)\n', (3308, 3320), False, 'from datetime import date\n'), ((3326, 3342), 'datetime.date', 'date', (['(636)', '(3)', '(31)'], {}), '(636, 3, 31)\n', (3330, 3342), False, 'from datetime import date\n'), ((3348, 3364), 'datetime.date', 'date', (['(655)', '(3)', '(29)'], {}), '(655, 3, 29)\n', (3352, 3364), False, 'from datetime import date\n'), ((3370, 3386), 'datetime.date', 'date', (['(700)', '(4)', '(11)'], {}), '(700, 4, 11)\n', (3374, 3386), False, 'from datetime import date\n'), ((3392, 3407), 'datetime.date', 'date', (['(725)', '(4)', '(8)'], {}), '(725, 4, 8)\n', (3396, 3407), False, 'from datetime import date\n'), ((3413, 3429), 'datetime.date', 'date', (['(750)', '(3)', '(29)'], {}), '(750, 3, 29)\n', (3417, 3429), False, 'from datetime import date\n'), ((3435, 3450), 'datetime.date', 'date', (['(782)', '(4)', '(7)'], {}), '(782, 4, 7)\n', (3439, 3450), False, 'from datetime import date\n'), ((3456, 3472), 'datetime.date', 'date', (['(835)', '(4)', '(18)'], {}), '(835, 4, 18)\n', (3460, 3472), False, 'from datetime import date\n'), ((3478, 3494), 'datetime.date', 'date', (['(849)', '(4)', '(14)'], {}), '(849, 4, 14)\n', (3482, 3494), False, 'from datetime import date\n'), ((3500, 3516), 'datetime.date', 'date', (['(867)', '(3)', '(30)'], {}), '(867, 3, 30)\n', (3504, 3516), False, 'from datetime import date\n'), ((3522, 3538), 'datetime.date', 'date', (['(890)', '(4)', '(12)'], {}), '(890, 4, 12)\n', (3526, 3538), False, 'from datetime import date\n'), ((3544, 3560), 'datetime.date', 'date', (['(922)', '(4)', '(21)'], {}), '(922, 4, 21)\n', (3548, 3560), False, 'from datetime import date\n'), ((3566, 3581), 'datetime.date', 'date', (['(934)', '(4)', '(6)'], {}), '(934, 4, 6)\n', (3570, 3581), False, 'from datetime import date\n'), ((3587, 3604), 'datetime.date', 'date', (['(1049)', '(3)', '(26)'], {}), '(1049, 3, 26)\n', (3591, 3604), False, 'from datetime import date\n'), ((3610, 3627), 'datetime.date', 'date', (['(1058)', '(4)', '(19)'], {}), '(1058, 4, 19)\n', (3614, 3627), False, 'from datetime import date\n'), ((3633, 3649), 'datetime.date', 'date', (['(1113)', '(4)', '(6)'], {}), '(1113, 4, 6)\n', (3637, 3649), False, 'from datetime import date\n'), ((3655, 3672), 'datetime.date', 'date', (['(1119)', '(3)', '(30)'], {}), '(1119, 3, 30)\n', (3659, 3672), False, 'from datetime import date\n'), ((3678, 3695), 'datetime.date', 'date', (['(1242)', '(4)', '(20)'], {}), '(1242, 4, 20)\n', (3682, 3695), False, 'from datetime import date\n'), ((3701, 3718), 'datetime.date', 'date', (['(1255)', '(3)', '(28)'], {}), '(1255, 3, 28)\n', (3705, 3718), False, 'from datetime import date\n'), ((3724, 3740), 'datetime.date', 'date', (['(1257)', '(4)', '(8)'], {}), '(1257, 4, 8)\n', (3728, 3740), False, 'from datetime import date\n'), ((3746, 3763), 'datetime.date', 'date', (['(1258)', '(3)', '(24)'], {}), '(1258, 3, 24)\n', (3750, 3763), False, 'from datetime import date\n'), ((3769, 3786), 'datetime.date', 'date', (['(1261)', '(4)', '(24)'], {}), '(1261, 4, 24)\n', (3773, 3786), False, 'from datetime import date\n'), ((3792, 3809), 'datetime.date', 'date', (['(1278)', '(4)', '(17)'], {}), '(1278, 4, 17)\n', (3796, 3809), False, 'from datetime import date\n'), ((3815, 3831), 'datetime.date', 'date', (['(1333)', '(4)', '(4)'], {}), '(1333, 4, 4)\n', (3819, 3831), False, 'from datetime import date\n'), ((3837, 3854), 'datetime.date', 'date', (['(1351)', '(4)', '(17)'], {}), '(1351, 4, 17)\n', (3841, 3854), False, 'from datetime import date\n'), ((3860, 3876), 'datetime.date', 'date', (['(1371)', '(4)', '(6)'], {}), '(1371, 4, 6)\n', (3864, 3876), False, 'from datetime import date\n'), ((3882, 3899), 'datetime.date', 'date', (['(1391)', '(3)', '(26)'], {}), '(1391, 3, 26)\n', (3886, 3899), False, 'from datetime import date\n'), ((3905, 3922), 'datetime.date', 'date', (['(1402)', '(3)', '(26)'], {}), '(1402, 3, 26)\n', (3909, 3922), False, 'from datetime import date\n'), ((3928, 3944), 'datetime.date', 'date', (['(1412)', '(4)', '(3)'], {}), '(1412, 4, 3)\n', (3932, 3944), False, 'from datetime import date\n'), ((3950, 3966), 'datetime.date', 'date', (['(1439)', '(4)', '(5)'], {}), '(1439, 4, 5)\n', (3954, 3966), False, 'from datetime import date\n'), ((3972, 3989), 'datetime.date', 'date', (['(1445)', '(3)', '(28)'], {}), '(1445, 3, 28)\n', (3976, 3989), False, 'from datetime import date\n'), ((3995, 4011), 'datetime.date', 'date', (['(1531)', '(4)', '(9)'], {}), '(1531, 4, 9)\n', (3999, 4011), False, 'from datetime import date\n'), ((4017, 4034), 'datetime.date', 'date', (['(1555)', '(4)', '(14)'], {}), '(1555, 4, 14)\n', (4021, 4034), False, 'from datetime import date\n'), ((4166, 4206), 'bs_dateutil.easter.easter', 'easter', (['easter_date.year', 'EASTER_WESTERN'], {}), '(easter_date.year, EASTER_WESTERN)\n', (4172, 4206), False, 'from bs_dateutil.easter import easter\n'), ((4337, 4378), 'bs_dateutil.easter.easter', 'easter', (['easter_date.year', 'EASTER_ORTHODOX'], {}), '(easter_date.year, EASTER_ORTHODOX)\n', (4343, 4378), False, 'from bs_dateutil.easter import easter\n'), ((4505, 4544), 'bs_dateutil.easter.easter', 'easter', (['easter_date.year', 'EASTER_JULIAN'], {}), '(easter_date.year, EASTER_JULIAN)\n', (4511, 4544), False, 'from bs_dateutil.easter import easter\n'), ((4586, 4611), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4599, 4611), False, 'import pytest\n'), ((4621, 4636), 'bs_dateutil.easter.easter', 'easter', (['(1975)', '(4)'], {}), '(1975, 4)\n', (4627, 4636), False, 'from bs_dateutil.easter import easter\n')] |
"""Tests the GHDL backend."""
from unittest import TestCase
import os
import re
from plumbum import local
from .common import run_vhdeps
DIR = os.path.realpath(os.path.dirname(__file__))
class TestPatterns(TestCase):
"""Tests the test case pattern matching logic (also used by the vsim
backend)."""
def test_no_patterns(self):
"""Test the default test case pattern (`*.tc`)"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps('ghdl', '-i', DIR+'/simple/multiple-ok')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_positive_name(self):
"""Test positive entity name test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-pfoo_tc', '-pbaz')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_negative_name(self):
"""Test negative entity name test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-p*_tc', '-p!foo*')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_positive_filename(self):
"""Test positive filename test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-p:*_tc.vhd', '-pbaz')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_negative_filename(self):
"""Test negative filename test case patterns"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps(
'ghdl', '-i', DIR+'/simple/multiple-ok', '-p:*.vhd', '-p:!*baz.vhd')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*foo_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*bar_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*baz.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
def test_multi_tc_per_file(self):
"""Test multiple test cases per file"""
with local.env(PATH=DIR+'/ghdl/fake-ghdl:' + local.env['PATH']):
code, out, _ = run_vhdeps('ghdl', '-i', DIR+'/complex/multi-tc-per-file')
self.assertEqual(code, 0)
self.assertTrue(bool(re.search(r'ghdl -a [^\n]*test_tc.vhd', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -e [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -e [^\n]*baz', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*foo_tc', out)))
self.assertTrue(bool(re.search(r'ghdl -r [^\n]*bar_tc', out)))
self.assertFalse(bool(re.search(r'ghdl -r [^\n]*baz', out)))
| [
"plumbum.local.env",
"os.path.dirname",
"re.search"
] | [((162, 187), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (177, 187), False, 'import os\n'), ((414, 474), 'plumbum.local.env', 'local.env', ([], {'PATH': "(DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])"}), "(PATH=DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])\n", (423, 474), False, 'from plumbum import local\n'), ((1338, 1398), 'plumbum.local.env', 'local.env', ([], {'PATH': "(DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])"}), "(PATH=DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])\n", (1347, 1398), False, 'from plumbum import local\n'), ((2300, 2360), 'plumbum.local.env', 'local.env', ([], {'PATH': "(DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])"}), "(PATH=DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])\n", (2309, 2360), False, 'from plumbum import local\n'), ((3265, 3325), 'plumbum.local.env', 'local.env', ([], {'PATH': "(DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])"}), "(PATH=DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])\n", (3274, 3325), False, 'from plumbum import local\n'), ((4229, 4289), 'plumbum.local.env', 'local.env', ([], {'PATH': "(DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])"}), "(PATH=DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])\n", (4238, 4289), False, 'from plumbum import local\n'), ((5191, 5251), 'plumbum.local.env', 'local.env', ([], {'PATH': "(DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])"}), "(PATH=DIR + '/ghdl/fake-ghdl:' + local.env['PATH'])\n", (5200, 5251), False, 'from plumbum import local\n'), ((616, 659), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*foo_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*foo_tc.vhd', out)\n", (625, 659), False, 'import re\n'), ((691, 734), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*bar_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*bar_tc.vhd', out)\n", (700, 734), False, 'import re\n'), ((766, 806), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*baz.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*baz.vhd', out)\n", (775, 806), False, 'import re\n'), ((838, 877), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*foo_tc', out)\n", (847, 877), False, 'import re\n'), ((909, 948), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*bar_tc', out)\n", (918, 948), False, 'import re\n'), ((981, 1017), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*baz"""', 'out'], {}), "('ghdl -e [^\\\\n]*baz', out)\n", (990, 1017), False, 'import re\n'), ((1049, 1088), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*foo_tc', out)\n", (1058, 1088), False, 'import re\n'), ((1120, 1159), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*bar_tc', out)\n", (1129, 1159), False, 'import re\n'), ((1192, 1228), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*baz"""', 'out'], {}), "('ghdl -r [^\\\\n]*baz', out)\n", (1201, 1228), False, 'import re\n'), ((1578, 1621), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*foo_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*foo_tc.vhd', out)\n", (1587, 1621), False, 'import re\n'), ((1653, 1696), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*bar_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*bar_tc.vhd', out)\n", (1662, 1696), False, 'import re\n'), ((1728, 1768), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*baz.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*baz.vhd', out)\n", (1737, 1768), False, 'import re\n'), ((1800, 1839), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*foo_tc', out)\n", (1809, 1839), False, 'import re\n'), ((1872, 1911), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*bar_tc', out)\n", (1881, 1911), False, 'import re\n'), ((1943, 1979), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*baz"""', 'out'], {}), "('ghdl -e [^\\\\n]*baz', out)\n", (1952, 1979), False, 'import re\n'), ((2011, 2050), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*foo_tc', out)\n", (2020, 2050), False, 'import re\n'), ((2083, 2122), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*bar_tc', out)\n", (2092, 2122), False, 'import re\n'), ((2154, 2190), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*baz"""', 'out'], {}), "('ghdl -r [^\\\\n]*baz', out)\n", (2163, 2190), False, 'import re\n'), ((2540, 2583), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*foo_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*foo_tc.vhd', out)\n", (2549, 2583), False, 'import re\n'), ((2615, 2658), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*bar_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*bar_tc.vhd', out)\n", (2624, 2658), False, 'import re\n'), ((2690, 2730), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*baz.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*baz.vhd', out)\n", (2699, 2730), False, 'import re\n'), ((2763, 2802), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*foo_tc', out)\n", (2772, 2802), False, 'import re\n'), ((2834, 2873), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*bar_tc', out)\n", (2843, 2873), False, 'import re\n'), ((2906, 2942), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*baz"""', 'out'], {}), "('ghdl -e [^\\\\n]*baz', out)\n", (2915, 2942), False, 'import re\n'), ((2975, 3014), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*foo_tc', out)\n", (2984, 3014), False, 'import re\n'), ((3046, 3085), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*bar_tc', out)\n", (3055, 3085), False, 'import re\n'), ((3118, 3154), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*baz"""', 'out'], {}), "('ghdl -r [^\\\\n]*baz', out)\n", (3127, 3154), False, 'import re\n'), ((3508, 3551), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*foo_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*foo_tc.vhd', out)\n", (3517, 3551), False, 'import re\n'), ((3583, 3626), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*bar_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*bar_tc.vhd', out)\n", (3592, 3626), False, 'import re\n'), ((3658, 3698), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*baz.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*baz.vhd', out)\n", (3667, 3698), False, 'import re\n'), ((3730, 3769), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*foo_tc', out)\n", (3739, 3769), False, 'import re\n'), ((3801, 3840), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*bar_tc', out)\n", (3810, 3840), False, 'import re\n'), ((3872, 3908), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*baz"""', 'out'], {}), "('ghdl -e [^\\\\n]*baz', out)\n", (3881, 3908), False, 'import re\n'), ((3940, 3979), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*foo_tc', out)\n", (3949, 3979), False, 'import re\n'), ((4011, 4050), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*bar_tc', out)\n", (4020, 4050), False, 'import re\n'), ((4082, 4118), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*baz"""', 'out'], {}), "('ghdl -r [^\\\\n]*baz', out)\n", (4091, 4118), False, 'import re\n'), ((4476, 4519), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*foo_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*foo_tc.vhd', out)\n", (4485, 4519), False, 'import re\n'), ((4551, 4594), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*bar_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*bar_tc.vhd', out)\n", (4560, 4594), False, 'import re\n'), ((4626, 4666), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*baz.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*baz.vhd', out)\n", (4635, 4666), False, 'import re\n'), ((4698, 4737), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*foo_tc', out)\n", (4707, 4737), False, 'import re\n'), ((4769, 4808), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*bar_tc', out)\n", (4778, 4808), False, 'import re\n'), ((4841, 4877), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*baz"""', 'out'], {}), "('ghdl -e [^\\\\n]*baz', out)\n", (4850, 4877), False, 'import re\n'), ((4909, 4948), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*foo_tc', out)\n", (4918, 4948), False, 'import re\n'), ((4980, 5019), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*bar_tc', out)\n", (4989, 5019), False, 'import re\n'), ((5052, 5088), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*baz"""', 'out'], {}), "('ghdl -r [^\\\\n]*baz', out)\n", (5061, 5088), False, 'import re\n'), ((5400, 5444), 're.search', 're.search', (['"""ghdl -a [^\\\\n]*test_tc.vhd"""', 'out'], {}), "('ghdl -a [^\\\\n]*test_tc.vhd', out)\n", (5409, 5444), False, 'import re\n'), ((5476, 5515), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*foo_tc', out)\n", (5485, 5515), False, 'import re\n'), ((5547, 5586), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -e [^\\\\n]*bar_tc', out)\n", (5556, 5586), False, 'import re\n'), ((5619, 5655), 're.search', 're.search', (['"""ghdl -e [^\\\\n]*baz"""', 'out'], {}), "('ghdl -e [^\\\\n]*baz', out)\n", (5628, 5655), False, 'import re\n'), ((5687, 5726), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*foo_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*foo_tc', out)\n", (5696, 5726), False, 'import re\n'), ((5758, 5797), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*bar_tc"""', 'out'], {}), "('ghdl -r [^\\\\n]*bar_tc', out)\n", (5767, 5797), False, 'import re\n'), ((5830, 5866), 're.search', 're.search', (['"""ghdl -r [^\\\\n]*baz"""', 'out'], {}), "('ghdl -r [^\\\\n]*baz', out)\n", (5839, 5866), False, 'import re\n')] |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
import sys
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import ParameterGrid, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
#print(__doc__)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=Warning) #DeprecationWarning)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class CustomFeatures(BaseEstimator):
def __init__(self):
pass
def get_feature_names(self):
return np.array(['sent_len']) #, 'lang_prob'])
def fit(self, documents, y=None):
return self
def transform(self, x_dataset):
X_num_token = list()
#X_count_nouns = list()
for sentence in x_dataset:
# takes raw text and calculates type token ratio
X_num_token.append(len(sentence))
# takes pos tag text and counts number of noun pos tags (NN, NNS etc.)
# X_count_nouns.append(count_nouns(sentence))
X = np.array([X_num_token]).T #, X_count_nouns]).T
if not hasattr(self, 'scalar'):
self.scalar = StandardScaler().fit(X)
return self.scalar.transform(X)
class FeatureExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('words', object), ('meta', object)]) #('length', object), ('condscore', object), ('score', object), ('normscore', object), ('langpred', bool)])
for i, text in enumerate(posts):
elems = text.split('\t')
words, cs, s, ns, lp = elems[:5]
#print(elems)
features['words'][i] = words
features['meta'][i] = {'length': len(words.split()),
'condscore': float(cs), 'score': float(s),
'normscore': float(ns), 'langpred': bool(lp)}
if len(elems) > 5:
ecs, es, ens, ep = elems[5:]
features['meta'][i].update({'event_condscore': float(ecs),
'event_score': float(es), 'event_normscore': float(ens), 'event_pred': bool(ep)})
return features
# #############################################################################
# Load data test
def load_data(filename, suffix):
contents, labels = [], []
#data = StoryData()
with open(filename+'.true.'+suffix) as tinf, open(filename+'.false.'+suffix) as finf:
for line in tinf:
elems = line.strip()#.split('\t')
contents.append(elems)
labels.append(1)
for line in finf:
elems = line.strip()#.split('\t')
contents.append(elems)
labels.append(0)
print("data size:", len(contents))
return [contents, labels]
def event_orig_mapping(orig_idx_file, event_idx_file):
orig_idx_array = []
event_idx_dict = {}
with open(orig_idx_file) as oinf, open(event_idx_file) as einf:
oinf.readline()
einf.readline()
for line in oinf:
elems = line.strip().split()
orig_idx_array.append(elems[0])
counter = 0
for line in einf:
elems = line.strip().split()
event_idx_dict[elems[0]] = counter
counter += 1
origin_to_event = {}
for i, oidx in enumerate(orig_idx_array):
if oidx in event_idx_dict:
origin_to_event[i] = event_idx_dict[oidx]
print ('map dictionary size:', len(origin_to_event))
return origin_to_event
def add_e2e_scores(original_data_array, event_data_array, origin_to_event):
assert len(event_data_array) == 2 * len(origin_to_event), (len(event_data_array), len(origin_to_event))
assert len(original_data_array) >= len(event_data_array)
half_len = len(original_data_array) / 2
for i, elems in enumerate(original_data_array):
if i in origin_to_event:
original_data_array[i] = elems + '\t' + event_data_array[origin_to_event[i]]
if i - half_len in origin_to_event:
#print(i, origin_to_event[i-half_len], len(origin_to_event))
original_data_array[i] = elems + '\t' + event_data_array[origin_to_event[i-half_len] + len(origin_to_event)]
return original_data_array
def pairwise_eval(probs):
mid = int(len(probs) / 2)
print('middle point: %d' % mid)
pos = probs[:mid]
neg = probs[mid:]
assert len(pos) == len(neg)
count = 0.0
for p, n in zip(pos, neg):
if p[1] > n[1]:
count += 1.0
# print('True')
# else:
# print('False')
acc = count/mid
print('Test result: %.3f' % acc)
return acc
train_data = load_data(sys.argv[1], sys.argv[3])
test_data = load_data(sys.argv[2], sys.argv[3])
#train_event = load_data(sys.argv[4], sys.argv[6])
#test_event = load_data(sys.argv[5], sys.argv[6])
#train_e2o = event_orig_mapping(sys.argv[7], sys.argv[8])
#test_e2o = event_orig_mapping(sys.argv[9], sys.argv[10])
# add event-to-event info
#train_data[0] = add_e2e_scores(train_data[0], train_event[0], train_e2o)
#test_data[0] = add_e2e_scores(test_data[0], test_event[0], test_e2o)
print('Finished data loading!!')
for elem in train_data[0][:10]:
print (elem)
# #############################################################################
# Define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('featextract', FeatureExtractor()),
('union', FeatureUnion(
transformer_list=[
('meta', Pipeline([
('selector', ItemSelector(key='meta')),
('vect', DictVectorizer()),
('scale', StandardScaler(with_mean=False)),
])),
('word', Pipeline([
('selector', ItemSelector(key='words')),
('vect', CountVectorizer(ngram_range=(1,5), max_df=0.9)),
('tfidf', TfidfTransformer()),
])),
('char', Pipeline([
('selector', ItemSelector(key='words')),
('vect', CountVectorizer(ngram_range=(1,5), analyzer='char', max_df=0.8)),
('tfidf', TfidfTransformer()),
])),
],
transformer_weights={
'meta': 0.3,
'word': 1.0,
'char': 1.0,
},
)),
('clf', SGDClassifier(loss='log', alpha=0.0005, tol=0.005, random_state=0)),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'union__transformer_weights': ({'meta': 0.6, 'word': 1.0, 'char': 1.0},
# {'meta': 1.0, 'word': 1.0, 'char': 0.75},
# {'meta': 1.0, 'word': 1.0, 'char': 0.5},
# {'meta': 1.0, 'word': 0.75, 'char': 1.0},
# {'meta': 1.0, 'word': 0.75, 'char': 0.75},
# {'meta': 1.0, 'word': 0.75, 'char': 0.5},
# {'meta': 1.0, 'word': 0.5, 'char': 1.0},
# {'meta': 1.0, 'word': 0.5, 'char': 0.75},
# {'meta': 1.0, 'word': 0.5, 'char': 0.5},
{'meta': 0.7, 'word': 1.0, 'char': 1.0},
{'meta': 0.5, 'word': 1.0, 'char': 1.0},
{'meta': 0.4, 'word': 1.0, 'char': 1.0},
{'meta': 0.3, 'word': 1.0, 'char': 1.0},
# {'meta': 0.75, 'word': 1.0, 'char': 0.75},
# {'meta': 0.75, 'word': 1.0, 'char': 0.5},
# {'meta': 0.75, 'word': 0.75, 'char': 1.0},
# {'meta': 0.75, 'word': 0.75, 'char': 0.75},
# {'meta': 0.75, 'word': 0.75, 'char': 0.5},
# {'meta': 0.75, 'word': 0.5, 'char': 1.0},
# {'meta': 0.75, 'word': 0.5, 'char': 0.75},
# {'meta': 0.75, 'word': 0.5, 'char': 0.5},
# {'meta': 0.5, 'word': 1.0, 'char': 1.0},
# {'meta': 0.5, 'word': 1.0, 'char': 0.75},
# {'meta': 0.5, 'word': 1.0, 'char': 0.5},
# {'meta': 0.5, 'word': 0.75, 'char': 1.0},
# {'meta': 0.5, 'word': 0.75, 'char': 0.75},
# {'meta': 0.5, 'word': 0.75, 'char': 0.5},
# {'meta': 0.5, 'word': 0.5, 'char': 1.0},
# {'meta': 0.5, 'word': 0.5, 'char': 0.75},
# {'meta': 0.5, 'word': 0.5, 'char': 0.5},
),
'union__word__vect__max_df': (0.7, 0.8, 0.9, 1.0), #0.5,
'union__char__vect__max_df': (0.7, 0.8, 0.9, 1.0), #0.5,
#'vect__max_features': (None, 5000, 10000, 50000),
#'union__word__vect__ngram_range': ((1, 4), (1, 5)), # trigram or 5-grams (1, 4),
#'union__char__vect__ngram_range': ((1, 4), (1, 5)), # trigram or 5-grams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.001, 0.0005, 0.0001),
#'clf__penalty': ('l2', 'l1'),
'clf__tol': (5e-3, 1e-3, 5e-4),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
# pipeline.fit(train_data[0], train_data[1])
# probs = pipeline.predict_proba(test_data[0])
# acc = pairwise_eval(probs)
# exit(0)
#grid_params = list(ParameterGrid(parameters))
grid_search = GridSearchCV(pipeline, parameters, cv=5, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
#pipeline.fit(train_data[0], train_data[1]) #.contents, train_data.labels)
'''for params in grid_params:
print('Current parameters:', params)
pipeline.set_params(**params)
pipeline.fit(train_data[0], train_data[1])
probs = pipeline.predict_proba(test_data[0])
acc = pairwise_eval(probs)
exit(0)
'''
grid_search.fit(train_data[0], train_data[1])
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
print('predicting on the test data...')
score = grid_search.score(test_data[0], test_data[1])
print('Test score: %.3f' % score)
probs = grid_search.predict_proba(test_data[0])
pairwise_eval(probs)
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.preprocessing.StandardScaler",
"logging.basicConfig",
"warnings.filterwarnings",
"sklearn.linear_model.SGDClassifier",
"time.time",
"warnings.catch_warnings",
"pprint.pprint",
"numpy.array",
"sklearn.feature_extraction.DictVectorizer",
"sklearn.feature_extraction.text.TfidfTransformer"
] | [((2196, 2288), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (2215, 2288), False, 'import logging\n'), ((2058, 2083), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2081, 2083), False, 'import warnings\n'), ((2089, 2140), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'Warning'}), "('ignore', category=Warning)\n", (2112, 2140), False, 'import warnings\n'), ((11551, 11613), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipeline', 'parameters'], {'cv': '(5)', 'n_jobs': '(-1)', 'verbose': '(1)'}), '(pipeline, parameters, cv=5, n_jobs=-1, verbose=1)\n', (11563, 11613), False, 'from sklearn.model_selection import ParameterGrid, GridSearchCV\n'), ((11744, 11762), 'pprint.pprint', 'pprint', (['parameters'], {}), '(parameters)\n', (11750, 11762), False, 'from pprint import pprint\n'), ((11772, 11778), 'time.time', 'time', ([], {}), '()\n', (11776, 11778), False, 'from time import time\n'), ((2660, 2682), 'numpy.array', 'np.array', (["['sent_len']"], {}), "(['sent_len'])\n", (2668, 2682), True, 'import numpy as np\n'), ((3155, 3178), 'numpy.array', 'np.array', (['[X_num_token]'], {}), '([X_num_token])\n', (3163, 3178), True, 'import numpy as np\n'), ((8663, 8729), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'alpha': '(0.0005)', 'tol': '(0.005)', 'random_state': '(0)'}), "(loss='log', alpha=0.0005, tol=0.005, random_state=0)\n", (8676, 8729), False, 'from sklearn.linear_model import SGDClassifier\n'), ((12214, 12220), 'time.time', 'time', ([], {}), '()\n', (12218, 12220), False, 'from time import time\n'), ((3270, 3286), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3284, 3286), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8009, 8025), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (8023, 8025), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((8050, 8081), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (8064, 8081), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8199, 8246), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 5)', 'max_df': '(0.9)'}), '(ngram_range=(1, 5), max_df=0.9)\n', (8214, 8246), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((8270, 8288), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (8286, 8288), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((8406, 8470), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 5)', 'analyzer': '"""char"""', 'max_df': '(0.8)'}), "(ngram_range=(1, 5), analyzer='char', max_df=0.8)\n", (8421, 8470), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((8494, 8512), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (8510, 8512), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n')] |
"""Summary
Attributes:
Configer (TYPE): Description
"""
import logging
import argparse
from src.preprocess import *
from src.item_preprocessor import *
from src.configer import *
from src import tfidf
Configer = Configer('setting.ini')
logging.basicConfig(
# filename='log.log',
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
parser = argparse.ArgumentParser()
print(Configer.ip_address, Configer.port,
Configer.ssh_username, Configer.ssh_password)
print(Configer.db_name, Configer.db_username, Configer.db_password)
if __name__ == '__main__':
parser.add_argument(
'action',
choices=['define_concepts', 'update_chartevents',
'create_train_dataset', 'crawl_webpages',
'tfidf_medical_webpages', 'cluster', 'backup', 'restore',
'create_los_dataset'],
help='define action for preprocess'
)
parser.add_argument('-p', '--process', default=2, type=int,
help='number of process')
parser.add_argument(
'-cd', '--concept_dir', default='../data',
help='directory to store concept definition')
# options for create train data
parser.add_argument(
'-ed', '--export_dir',
help='directory to store train data (options for create train data)')
args = parser.parse_args()
if args.action == 'define_concepts':
define_concepts(output_dir=args.concept_dir,
processes=args.process)
elif args.action == 'update_chartevents':
update_chartevents_value(concept_dir=args.concept_dir)
elif args.action == 'create_train_dataset':
create_train_feature_dataset(export_dir=args.export_dir,
processes=args.process,
concept_dir=args.concept_dir)
elif args.action == 'create_los_dataset':
create_cvd_los_dataset(export_dir=args.export_dir,
concept_dir=args.concept_dir)
elif args.action == 'crawl_webpages':
# TODO: parameters
export_dir = '../data/webpages'
concept_dir = '../data'
crawl_webpages(concept_dir, export_dir)
elif args.action == 'tfidf_medical_webpages':
tfidf.train_tfidf(min_count=5, chunksize=5000, ngrams=(1, 1),
model_dir='../models')
elif args.action == 'cluster':
cluster()
elif args.action == 'backup':
backup_merge_data()
elif args.action == 'restore':
restore_merge_data()
| [
"src.tfidf.train_tfidf",
"argparse.ArgumentParser",
"logging.basicConfig"
] | [((244, 339), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (263, 339), False, 'import logging\n'), ((381, 406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (404, 406), False, 'import argparse\n'), ((2278, 2367), 'src.tfidf.train_tfidf', 'tfidf.train_tfidf', ([], {'min_count': '(5)', 'chunksize': '(5000)', 'ngrams': '(1, 1)', 'model_dir': '"""../models"""'}), "(min_count=5, chunksize=5000, ngrams=(1, 1), model_dir=\n '../models')\n", (2295, 2367), False, 'from src import tfidf\n')] |
"""!
All functions providing plotting functionalities.
"""
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import matplotlib.image as image
import pandas as pd
import re
import argparse
import datetime as dt
import numpy as np
from pandas.plotting import register_matplotlib_converters
from datetime import datetime
register_matplotlib_converters()
plt.rcParams.update({'font.size': 22})
environment_sensor_pattern = re.compile(r"([0-9-]+)\s([0-9:.]+):\stemperature:\s([0-9.]+),\sgas:\s([0-9]+),\shumidity:\s([0-9.]+),\spressure:\s([0-9.]+),\saltitude:\s([0-9.]+)", re.MULTILINE)
soil_moisture_pattern = re.compile(r"([0-9-]+)\s([0-9.:]+):\s\[([0-9]+),\s([0-9.]+),\s([0-9.]+)\]", re.MULTILINE)
def plot_soil_moisture(dict, past24):
"""!
Plots soil moisture data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 300, 0, zorder=3, alpha=0.2)
ax.xaxis.set_minor_locator(hours3)
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.grid()
plt.xlabel("Day - Hour")
plt.ylabel("Moisture Percentage (%)")
plt.title("Soil Moisture % vs Time")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title("Soil Moisture % Past 24 Hrs")
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Moisture_vs_Time_24H.png', dpi=500)
plt.savefig('Moisture_vs_Time.png', dpi=500)
# plt.show()
def plot_temperature(dict, past24):
"""!
Plots temperature data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 650, 0, zorder=3, alpha=0.2)
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_minor_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.grid()
plt.title("Temperature Over Time")
plt.xlabel("Time (Month-Day Hour)")
plt.ylabel("Temperature (°C)")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title('Temperature Past 24 Hrs')
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Temperature_vs_Time_24H.png', dpi=500)
plt.savefig('Temperature_vs_Time.png', dpi=500)
# plt.show()
def boxplot_environment(df):
"""!
Creates a boxplot of all the relevant environment sensor data.
What is a boxplot?
Text from https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html:
The box extends from the Q1 to Q3 quartile values of the data, with a line at the median (Q2).
The whiskers extend from the edges of box to show the range of the data.
The position of the whiskers is set by default to 1.5 * IQR (IQR = Q3 - Q1) from the edges of the box.
Outlier points are those past the end of the whiskers.
@param df: dataframe object from which we generate a boxplot.
"""
df['VOC'] = df['VOC'].div(1000)
# with plt.style.context("seaborn"):
fig, ax = plt.subplots(1, 3)
fig.suptitle('Environment Sensor Data')
df.boxplot('Temperature', ax=ax[0])
df.boxplot('VOC', ax=ax[1], fontsize=12)
df.boxplot('Humidity', ax=ax[2])
ax[0].set_ylabel("Temperature (°C)")
ax[1].set_ylabel("VOC (kΩ)")
ax[2].set_ylabel("Humidity (%)")
plt.subplots_adjust(top=0.95)
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
plt.savefig('Environment_Boxplot.png', dpi=500)
# plt.show()
def extract_data_from_log(data, pattern):
"""!
Function for extracting data out of a log file using regex matching.
Returns all regex match objects.
@param data: Raw data from the log file.
@param pattern: Regex pattern to use for matching.
"""
matches = list()
for line in data:
matches.append(re.match(pattern, line))
return matches
def generate_plots(root="./logs/", soil_sensor_log="soil_moisture_sensor_1.txt", environment_sensor_log="environment_sensor.txt"):
# Plot soil moisture data
with open(root+soil_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, soil_moisture_pattern)
data_dict = dict()
for match in matches:
# current_val = float(match.group(4)) # Raw voltage reading
current_val = float(match.group(5)) # Percentage reading
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict[index_dt] = current_val
plot_soil_moisture(data_dict, True)
plot_soil_moisture(data_dict, False)
# Plot temperature data
with open(root+environment_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, environment_sensor_pattern)
data_dict = dict()
temperature_dict = dict()
data_dict['Temperature'] = {}
data_dict['VOC'] = {}
data_dict['Humidity'] = {}
for match in matches:
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict['Temperature'][index_dt] = float(match.group(3))
data_dict['VOC'][index_dt] = float(match.group(4))
data_dict['Humidity'][index_dt] = float(match.group(5))
plot_temperature(data_dict['Temperature'], True)
plot_temperature(data_dict['Temperature'], False)
# Plot environment sensor data
df = pd.DataFrame.from_dict(data_dict, orient='columns')
df.reset_index(inplace=True)
boxplot_environment(df)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-r', '--root', type=str, default="", help='Root filepath of the log data')
parser.add_argument('-s', '--soil', type=str, default="soil_moisture_sensor_1.txt", help='Name of soil moisture sensor log file')
parser.add_argument('-e', '--environment', type=str, default="environment_sensor.txt", help='Name of the envrionment sensor log file')
args = parser.parse_args()
if args.root:
root_folder = "./logs/"+args.root+"/"
else:
root_folder = "./logs/"
generate_plots(root_folder, args.soil, args.environment)
| [
"matplotlib.pylab.savefig",
"pandas.DataFrame.from_dict",
"argparse.ArgumentParser",
"numpy.datetime64",
"matplotlib.pylab.title",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pylab.ylabel",
"re.match",
"matplotlib.pylab.rcParams.update",
"matplotlib.dates.HourLocator",
"matplotlib.pylab.subplots_adjust",
"matplotlib.dates.DateFormatter",
"matplotlib.pylab.xlabel",
"datetime.datetime.strptime",
"numpy.timedelta64",
"matplotlib.pylab.subplots",
"re.compile"
] | [((336, 368), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (366, 368), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((369, 407), 'matplotlib.pylab.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (388, 407), True, 'import matplotlib.pylab as plt\n'), ((437, 619), 're.compile', 're.compile', (['"""([0-9-]+)\\\\s([0-9:.]+):\\\\stemperature:\\\\s([0-9.]+),\\\\sgas:\\\\s([0-9]+),\\\\shumidity:\\\\s([0-9.]+),\\\\spressure:\\\\s([0-9.]+),\\\\saltitude:\\\\s([0-9.]+)"""', 're.MULTILINE'], {}), "(\n '([0-9-]+)\\\\s([0-9:.]+):\\\\stemperature:\\\\s([0-9.]+),\\\\sgas:\\\\s([0-9]+),\\\\shumidity:\\\\s([0-9.]+),\\\\spressure:\\\\s([0-9.]+),\\\\saltitude:\\\\s([0-9.]+)'\n , re.MULTILINE)\n", (447, 619), False, 'import re\n'), ((624, 723), 're.compile', 're.compile', (['"""([0-9-]+)\\\\s([0-9.:]+):\\\\s\\\\[([0-9]+),\\\\s([0-9.]+),\\\\s([0-9.]+)\\\\]"""', 're.MULTILINE'], {}), "('([0-9-]+)\\\\s([0-9.:]+):\\\\s\\\\[([0-9]+),\\\\s([0-9.]+),\\\\s([0-9.]+)\\\\]'\n , re.MULTILINE)\n", (634, 723), False, 'import re\n'), ((971, 985), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (983, 985), True, 'import matplotlib.pylab as plt\n'), ((1059, 1089), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(6)'}), '(interval=6)\n', (1077, 1089), True, 'import matplotlib.dates as mdates\n'), ((1103, 1133), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(3)'}), '(interval=3)\n', (1121, 1133), True, 'import matplotlib.dates as mdates\n'), ((1539, 1563), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Day - Hour"""'], {}), "('Day - Hour')\n", (1549, 1563), True, 'import matplotlib.pylab as plt\n'), ((1568, 1605), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Moisture Percentage (%)"""'], {}), "('Moisture Percentage (%)')\n", (1578, 1605), True, 'import matplotlib.pylab as plt\n'), ((1610, 1646), 'matplotlib.pylab.title', 'plt.title', (['"""Soil Moisture % vs Time"""'], {}), "('Soil Moisture % vs Time')\n", (1619, 1646), True, 'import matplotlib.pylab as plt\n'), ((2147, 2191), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Moisture_vs_Time.png"""'], {'dpi': '(500)'}), "('Moisture_vs_Time.png', dpi=500)\n", (2158, 2191), True, 'import matplotlib.pylab as plt\n'), ((2458, 2472), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (2470, 2472), True, 'import matplotlib.pylab as plt\n'), ((2546, 2576), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(6)'}), '(interval=6)\n', (2564, 2576), True, 'import matplotlib.dates as mdates\n'), ((2590, 2620), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(3)'}), '(interval=3)\n', (2608, 2620), True, 'import matplotlib.dates as mdates\n'), ((3026, 3060), 'matplotlib.pylab.title', 'plt.title', (['"""Temperature Over Time"""'], {}), "('Temperature Over Time')\n", (3035, 3060), True, 'import matplotlib.pylab as plt\n'), ((3065, 3100), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Time (Month-Day Hour)"""'], {}), "('Time (Month-Day Hour)')\n", (3075, 3100), True, 'import matplotlib.pylab as plt\n'), ((3105, 3135), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Temperature (°C)"""'], {}), "('Temperature (°C)')\n", (3115, 3135), True, 'import matplotlib.pylab as plt\n'), ((3635, 3682), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Temperature_vs_Time.png"""'], {'dpi': '(500)'}), "('Temperature_vs_Time.png', dpi=500)\n", (3646, 3682), True, 'import matplotlib.pylab as plt\n'), ((4450, 4468), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (4462, 4468), True, 'import matplotlib.pylab as plt\n'), ((4750, 4779), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)'}), '(top=0.95)\n', (4769, 4779), True, 'import matplotlib.pylab as plt\n'), ((4869, 4916), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Environment_Boxplot.png"""'], {'dpi': '(500)'}), "('Environment_Boxplot.png', dpi=500)\n", (4880, 4916), True, 'import matplotlib.pylab as plt\n'), ((6907, 6958), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {'orient': '"""columns"""'}), "(data_dict, orient='columns')\n", (6929, 6958), True, 'import pandas as pd\n'), ((7063, 7107), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (7086, 7107), False, 'import argparse\n'), ((1488, 1519), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d - %H"""'], {}), "('%d - %H')\n", (1508, 1519), True, 'import matplotlib.dates as mdates\n'), ((1835, 1860), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (1848, 1860), True, 'import numpy as np\n'), ((1907, 1925), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Hour"""'], {}), "('Hour')\n", (1917, 1925), True, 'import matplotlib.pylab as plt\n'), ((1934, 1974), 'matplotlib.pylab.title', 'plt.title', (['"""Soil Moisture % Past 24 Hrs"""'], {}), "('Soil Moisture % Past 24 Hrs')\n", (1943, 1974), True, 'import matplotlib.pylab as plt\n'), ((2094, 2142), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Moisture_vs_Time_24H.png"""'], {'dpi': '(500)'}), "('Moisture_vs_Time_24H.png', dpi=500)\n", (2105, 2142), True, 'import matplotlib.pylab as plt\n'), ((2839, 2870), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d - %H"""'], {}), "('%d - %H')\n", (2859, 2870), True, 'import matplotlib.dates as mdates\n'), ((3324, 3349), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (3337, 3349), True, 'import numpy as np\n'), ((3396, 3414), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Hour"""'], {}), "('Hour')\n", (3406, 3414), True, 'import matplotlib.pylab as plt\n'), ((3423, 3459), 'matplotlib.pylab.title', 'plt.title', (['"""Temperature Past 24 Hrs"""'], {}), "('Temperature Past 24 Hrs')\n", (3432, 3459), True, 'import matplotlib.pylab as plt\n'), ((3579, 3630), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Temperature_vs_Time_24H.png"""'], {'dpi': '(500)'}), "('Temperature_vs_Time_24H.png', dpi=500)\n", (3590, 3630), True, 'import matplotlib.pylab as plt\n'), ((5888, 5944), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['index_time', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(index_time, '%Y-%m-%d %H:%M:%S.%f')\n", (5908, 5944), True, 'import datetime as dt\n'), ((6508, 6564), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['index_time', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(index_time, '%Y-%m-%d %H:%M:%S.%f')\n", (6528, 6564), True, 'import datetime as dt\n'), ((1765, 1790), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (1778, 1790), True, 'import numpy as np\n'), ((1793, 1816), 'numpy.timedelta64', 'np.timedelta64', (['(24)', '"""h"""'], {}), "(24, 'h')\n", (1807, 1816), True, 'import numpy as np\n'), ((2055, 2084), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (2075, 2084), True, 'import matplotlib.dates as mdates\n'), ((3254, 3279), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (3267, 3279), True, 'import numpy as np\n'), ((3282, 3305), 'numpy.timedelta64', 'np.timedelta64', (['(24)', '"""h"""'], {}), "(24, 'h')\n", (3296, 3305), True, 'import numpy as np\n'), ((3540, 3569), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (3560, 3569), True, 'import matplotlib.dates as mdates\n'), ((5271, 5294), 're.match', 're.match', (['pattern', 'line'], {}), '(pattern, line)\n', (5279, 5294), False, 'import re\n')] |
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(6)
def map(x):
return poly.fit_transform(x)
| [
"sklearn.preprocessing.PolynomialFeatures"
] | [((61, 82), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(6)'], {}), '(6)\n', (79, 82), False, 'from sklearn.preprocessing import PolynomialFeatures\n')] |
from django.utils.translation import gettext as _
DAY_CHOICES = (
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday')),
(7, _('Sunday')),
) | [
"django.utils.translation.gettext"
] | [((75, 86), 'django.utils.translation.gettext', '_', (['"""Monday"""'], {}), "('Monday')\n", (76, 86), True, 'from django.utils.translation import gettext as _\n'), ((97, 109), 'django.utils.translation.gettext', '_', (['"""Tuesday"""'], {}), "('Tuesday')\n", (98, 109), True, 'from django.utils.translation import gettext as _\n'), ((120, 134), 'django.utils.translation.gettext', '_', (['"""Wednesday"""'], {}), "('Wednesday')\n", (121, 134), True, 'from django.utils.translation import gettext as _\n'), ((145, 158), 'django.utils.translation.gettext', '_', (['"""Thursday"""'], {}), "('Thursday')\n", (146, 158), True, 'from django.utils.translation import gettext as _\n'), ((169, 180), 'django.utils.translation.gettext', '_', (['"""Friday"""'], {}), "('Friday')\n", (170, 180), True, 'from django.utils.translation import gettext as _\n'), ((191, 204), 'django.utils.translation.gettext', '_', (['"""Saturday"""'], {}), "('Saturday')\n", (192, 204), True, 'from django.utils.translation import gettext as _\n'), ((215, 226), 'django.utils.translation.gettext', '_', (['"""Sunday"""'], {}), "('Sunday')\n", (216, 226), True, 'from django.utils.translation import gettext as _\n')] |
# Generated by Django 3.1.12 on 2021-07-12 09:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("polio", "0016_config"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="gpei_email",
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
| [
"django.db.models.EmailField"
] | [((327, 383), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'max_length': '(254)', 'null': '(True)'}), '(blank=True, max_length=254, null=True)\n', (344, 383), False, 'from django.db import migrations, models\n')] |
import os
import sqlite3
import random
import string
import time
import datetime
from datetime import timedelta
import operator
from tabulate import tabulate
import config
TS_GROUP_BY = dict(
timestamp=0b10000,
project=0b1000,
task=0b0100,
track=0b0010,
date=0b0001
)
class Database:
def init_db(self, db_path):
self.conn = sqlite3.connect(
db_path,
detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES
)
self.conn.row_factory = sqlite3.Row
self.conn.text_factory = lambda x: x.decode('utf8')
#try:
#except sqlite3.OperationalError:
self.cursor = self.conn.cursor()
def close_db(self):
self.conn.close()
def create_db(self):
self.cursor.execute("PRAGMA foreign_keys = ON")
# Create Tables if do the not exist
# PROJECTS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Projects('
' id INTEGER PRIMARY KEY, '
' customer_id INTEGER, '
' name VARCHAR UNIQUE COLLATE NOCASE, '
' description TEXT DEFAULT "", '
' created TIMESTAMP'
')')
# TASKS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Tasks('
' id INTEGER PRIMARY KEY, '
' project_id INTEGER REFERENCES Projects(id) ON DELETE CASCADE, '
' name VARCHAR COLLATE NOCASE, '
' description TEXT DEFAULT ""'
')')
# TRACKS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Tracks('
' id INTEGER PRIMARY KEY, '
' task_id INTEGER REFERENCES Tasks(id) ON DELETE CASCADE, '
' started TIMESTAMP, '
' finished TIMESTAMP, '
' is_billed INTEGER DEFAULT 1'
')')
# CUSTOMERS
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS Customers('
'id INTEGER PRIMARY KEY, '
'name VARCHAR UNIQUE COLLATE NOCASE, '
'description TEXT, '
'created TIMESTAMP'
')')
self.conn.commit()
def __init__(self, db_name):
# create DB
self.init_db(db_name)
self.create_db()
def insert_test_task(self, project_id):
name = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(3))
self.cursor.execute(
"insert into Tasks ('name', 'project_id') "
"values('%s', '%s')" % (name, project_id)
)
self.conn.commit()
return self.cursor.lastrowid
def fill(self):
"""Fill with the test tasks"""
self.cursor.execute('DELETE FROM Customers')
self.cursor.execute('DELETE FROM Projects')
self.cursor.execute('DELETE FROM Tasks')
self.cursor.execute('DELETE FROM Tracks')
# Add a Customer
self.cursor.execute(
"insert into Customers ('name', 'description') "
"VALUES ('Andrey', 'Customer Numer One')")
self.cursor.execute("SELECT * FROM Customers ORDER BY id LIMIT 1")
customers = self.cursor.fetchone()
#print('filled customers', customers)
# Add a Project
self.create_project('p1', 'Test Project #1')
self.cursor.execute("SELECT * FROM Projects ORDER BY id LIMIT 1")
project = self.cursor.fetchone()
#print('filled projects', project)
# Add the Task
last_task = self.insert_test_task(project_id=1)
# Add the Tracks
started = datetime.datetime.now() - timedelta(days=4)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3601))
self.create_track(last_task, started=started+timedelta(seconds=13600),
finished=started+timedelta(seconds=14600))
self.create_track(last_task, started=started+timedelta(seconds=15600),
finished=started+timedelta(seconds=16600))
last_task = self.insert_test_task(project_id=1)
self.create_track(last_task, started=started+timedelta(seconds=17600),
finished=started+timedelta(seconds=18600))
self.create_track(last_task, started=started+timedelta(seconds=19600),
finished=started+timedelta(seconds=20600))
# Add a Project #2
self.create_project('p2', 'Test Project #1')
self.cursor.execute("SELECT * FROM Projects ORDER BY id LIMIT 1")
project = self.cursor.fetchone()
#print('filled projects', project)
# Add the Task
tasks = []
last_task = self.insert_test_task(project_id=2)
self.create_track(last_task, started=started+timedelta(seconds=21600),
finished=started+timedelta(seconds=22600))
self.create_track(last_task, started=started+timedelta(seconds=23600),
finished=started+timedelta(seconds=24600))
self.create_track(last_task, started=started+timedelta(seconds=25600),
finished=started+timedelta(seconds=26600))
started = datetime.datetime.now() - timedelta(days=3)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3600))
started = datetime.datetime.now() - timedelta(days=2)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3600))
started = datetime.datetime.now() - timedelta(days=1)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=3600))
started = datetime.datetime.now() - timedelta(seconds=3300)
self.create_track(last_task, started=started,
finished=started + timedelta(seconds=600))
last_track = self.create_track(last_task)
self.cursor.execute("SELECT * FROM Tracks ")
tracks = self.cursor.fetchall()
#print('filled tracks', tracks)
print(tabulate(tracks, ['Track id', 'Task id', 'started', 'finished', 'billed'],
tablefmt='simple'))
return
# CUSTOMERS
def get_customer(self, customer):
self.cursor.execute(
"SELECT id, name FROM Customers "
"WHERE name == '{name:s}'".format(name=customer)
)
customer = self.cursor.fetchone()
return customer
def get_customer_or_create(self, customer):
self.cursor.execute(
"SELECT id, name FROM Customers "
"WHERE name == '{name:s}'".format(name=customer)
)
customer = self.cursor.fetchone()
if customer:
return customer
self.cursor.execute(
"INSERT INTO Customers ('name')"
"VALUES ('{name:s}')"
.format(name=customer)
)
self.conn.commit()
# PROJECTS
def get_project_by_name(self, pname):
self.cursor.execute(
"SELECT "
" id as pid, name as pname, created as created, "
" description as description "
"FROM Projects "
"WHERE "
" Projects.name == ?", (pname.encode('utf8'),)
)
return self.cursor.fetchone()
def update_project(self, pid, name, description):
"""Updates a project"""
self.cursor.execute(
"UPDATE Projects "
"SET name=?, description=?"
"WHERE id=?", (name.encode('utf8'), description.encode('utf8'),
pid)
)
self.conn.commit()
def is_project_existent(self, pname, pid):
"""Checks if project already exists """
self.cursor.execute(
"SELECT "
" id as pid, name as name, created as created, "
" description as description "
"FROM Projects "
"WHERE "
" pid == '{pid}'"
" name == '{name}'".format(name=pname.encode('utf8'), pid=pid)
)
return self.cursor.fetchone()
def get_projects_with_activity_field(self, from_date='', to_date='', limit=0):
"""Get list of project including a field is a project is finished"""
where_clause = first_limit_clause = last_limit_clause = ''
if limit:
first_limit_clause = "SELECT * FROM ("
last_limit_clause = " DESC LIMIT %d) ORDER BY pid ASC" % limit
if from_date and to_date:
where_clause = " AND DATE(Projects.created) BETWEEN '{from_date}' " \
"AND '{to_date}' ".format(from_date=from_date,
to_date=to_date)
self.cursor.execute(
"{first_limit_clause}"
"SELECT "
" Projects.id as pid, Projects.name, Projects.created, "
" Projects.description, "
" SUM(CASE WHEN Tracks.finished == '' THEN 1 ELSE 0 end) AS active "
"FROM Projects, Tracks, Tasks "
"WHERE "
" Tasks.project_id == Projects.id AND "
" Tracks.task_id == Tasks.id {where_clause}"
"GROUP BY Projects.id "
"UNION SELECT "
" Projects.id as pid, Projects.name, Projects.created,"
" Projects.description, '' as active "
"FROM Projects "
"WHERE NOT EXISTS ("
" SELECT id FROM Tasks WHERE "
" Tasks.project_id == Projects.id "
") {where_clause}"
"ORDER BY Projects.id {last_limit_clause}".format(
where_clause=where_clause, first_limit_clause=first_limit_clause,
last_limit_clause=last_limit_clause)
)
return self.cursor.fetchall()
def create_project(self, pname, description=''):
"""Create a project"""
self.cursor.execute(
"INSERT INTO Projects ('name', 'description', created)"
"VALUES (?, ?, ?)", (
pname.encode('utf8'),
description.encode('utf8'),
str(datetime.datetime.now())
)
)
self.conn.commit()
return self.cursor.lastrowid
def get_project_or_create(self, pname):
self.cursor.execute(
"SELECT id, name FROM Projects "
"WHERE name == '{name:s}'".format(name=pname.encode('utf8'))
)
project = self.cursor.fetchone()
if project:
return project
return self.create_project(name)
def delete_project_by_name(self, pname):
self.cursor.execute(
"DELETE FROM Projects WHERE name == '{name}'"
"".format(name=pname.encode('utf8')))
self.conn.commit()
# TASKS
def get_tasks(self, limit=10, add_activity=False):
"""Lists of last tasks"""
activity_field = ''
if add_activity:
activity_field = ", SUM(CASE WHEN Tracks.finished == '' THEN 1 ELSE 0 END) "
self.cursor.execute(
"SELECT "
" Tasks.id, Tasks.name, Projects.id, Projects.name, "
" Tasks.description {activity_field}"
"FROM Tasks, Projects, Tracks "
"WHERE "
" Tasks.project_id == Projects.id AND "
" Tracks.task_id == Tasks.id "
"GROUP BY Tasks.id "
"ORDER BY Tasks.id DESC LIMIT {limit:d}".format(
limit=limit, activity_field=activity_field)
)
tasks = self.cursor.fetchall()
return tasks
def get_profiled_tasks(self, started='', finished='', limit=0):
"""The list of last tasks between dates including unfinished"""
where_clause = first_limit_clause = last_limit_clause = ''
if started and finished:
where_clause = str(
"WHERE DATE(Tracks.started) BETWEEN '{started}' AND '{finished}'"
"".format(started=started, finished=finished))
if limit:
first_limit_clause = "SELECT * FROM ("
last_limit_clause = " DESC LIMIT %d) ORDER BY tid ASC" % limit
self.cursor.execute(
"{first_limit_clause}"
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.id as pid, "
" Projects.name as pname, Tasks.description as description, "
" Tracks.started as started, Tracks.finished as finished "
"FROM Tasks, Projects, Tracks "
"WHERE "
" Tasks.project_id == Projects.id AND "
" Tracks.task_id == Tasks.id AND "
" Tracks.id IN ("
" SELECT MAX(Tracks.id) FROM Tracks "
" {where_clause} "
" GROUP BY Tracks.task_id "
" ) ORDER BY tid {last_limit_clause}"
"".format(
where_clause=where_clause,
first_limit_clause=first_limit_clause,
last_limit_clause=last_limit_clause)
)
tasks = self.cursor.fetchall()
return tasks
def get_task_by_alias(self, tname, pname):
"""Get task by name"""
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.id as pid, "
" Projects.name as pname, Tasks.description as description "
"FROM Tasks, Projects "
"WHERE "
" Tasks.project_id == pid AND "
" tname == '{task:s}' AND "
" pname == '{project:s}'"
"".format(task=tname.encode('utf8'), project=pname.encode('utf8'))
)
return self.cursor.fetchone()
def create_task(self, name, pid):
self.cursor.execute(
"INSERT INTO Tasks ('name', 'project_id') "
"VALUES "
" (?, ?)", (
name.encode('utf8'),
pid
)
)
self.conn.commit()
return self.cursor.lastrowid
def get_task_or_create(self, name, project_id):
"""Get a task or create one"""
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.id as pid, "
" Projects.name as pname, Tasks.description as description "
"FROM Tasks, Projects "
"WHERE "
" tname == '{task}' AND "
" Tasks.project_id == pid AND "
" pid == '{project!s}'"
"".format(task=name.encode('utf8'), project=project_id)
)
last = self.cursor.fetchone()
if last:
return last['tid']
return self.create_task(name, project_id)
def _get_active_tasks(self):
"""Get active tasks"""
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" Tracks.id as track_id, Tracks.started as started, "
" Tracks.finished as finished, "
" Tasks.description as description "
"FROM Tracks, Tasks, Projects "
"WHERE "
" Tracks.task_id == Tasks.id AND "
" Tasks.project_id == Projects.id AND "
" finished == ''")
return self.cursor.fetchall()
def get_active_task(self, started='', finished='', tname='', pname=''):
"""Get an active task"""
params = []
where_date_clause = where_project_clause = where_task_clause = ''
if tname:
tname = tname.encode('utf8')
where_task_clause = "tname == ? AND "
params.append(tname)
if pname:
pname = pname.encode('utf8')
where_project_clause = "pname == ? AND "
params.append(pname)
if started and finished:
where_date_clause = "AND DATE(Tracks.started) " \
" BETWEEN ? " \
" AND ? "
params.extend([started, finished])
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" Tracks.id as track_id, Tracks.started as started, "
" Tracks.finished as finished, "
" Tasks.description as description "
"FROM Tracks, Tasks, Projects "
"WHERE "
" {where_task_clause}"
" {where_project_clause}"
" Tracks.task_id == Tasks.id AND "
" Tasks.project_id == Projects.id AND "
" finished == '' "
" {where_date_clause}".format(
where_date_clause=where_date_clause,
where_project_clause=where_project_clause,
where_task_clause=where_task_clause
), params
)
return self.cursor.fetchone()
def update_task(self, tid, name, description=''):
"""Updates the task info"""
self.cursor.execute(
"UPDATE Tasks "
"SET name=?, description=?"
"WHERE id=?", (
name.encode('utf8'),
description.encode('utf8'),
tid
)
)
self.conn.commit()
def delete_task(self, tid):
""""""
self.cursor.execute(
"DELETE FROM Tasks WHERE id == '{tid}'".format(tid=tid))
self.conn.commit()
# TRACKS
def get_tracks_by_date(self, started='', finished='', also_unfinished=False):
"""Get tracks"""
where_clause = ''
between_clause = ''
params = []
if not also_unfinished:
where_clause = "AND NOT finished == '' "
if started and finished:
between_clause = "AND DATE(started) BETWEEN ? AND ?"
params.extend([started, finished])
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, "
" Projects.id as pid, Projects.name as pname, "
" Tracks.id as trid, Tracks.started as started, "
" Tracks.finished as finished, "
" Tracks.is_billed as is_billed "
"FROM Tracks, Tasks, Projects "
"WHERE "
" Tracks.task_id == tid AND "
" Tasks.project_id == pid"
" {where_clause} "
" {between_clause} "
"ORDER BY Tracks.id".format(started=started,
finished=finished,
where_clause=where_clause,
between_clause=between_clause),
params
)
return self.cursor.fetchall()
def get_track_by_id(self, tid):
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" Tracks.id as trid, Tracks.started as started, "
" Tracks.finished as finished, "
" Tracks.is_billed as is_billed "
"FROM Tracks, Tasks, Projects "
"WHERE "
" Tracks.task_id == tid AND "
" Tasks.project_id == Projects.id AND "
" trid == %d" % tid
)
return self.cursor.fetchone()
def create_track(self, task_id, started='', finished='', is_billed=True):
# started, finished - 9-item sequence, not float
if not started:
started = datetime.datetime.now()
self.cursor.execute(
"INSERT INTO Tracks "
" ('task_id', 'started', 'finished', 'is_billed') "
"VALUES (?, ?, ?, ?)", (task_id, started, finished, int(is_billed))
)
self.conn.commit()
return self.cursor.lastrowid
def finish_track(self, track_id, started=None):
finished = datetime.datetime.now()
if started and config.BT_TIMESHEET_ROUNDING and config.BT_ROUNDING_INCREMENT:
delta = finished - started
round_to = config.BT_ROUNDING_INCREMENT * 60
seconds = round_to - delta.seconds % round_to
finished = finished + datetime.timedelta(seconds=seconds)
self.cursor.execute(
"UPDATE Tracks SET finished=? WHERE id=?", (finished, track_id)
)
self.conn.commit()
return finished
def update_track(self, track_id, started, finished, is_billed):
"""Updates the time was spend and is billed flag of the track record"""
self.cursor.execute(
"UPDATE Tracks "
"SET started=?, finished=?, is_billed=? "
"WHERE id=?", (started, finished, is_billed, track_id)
)
self.conn.commit()
def delete_tracks_by_date(self, started, finished, also_unfinished=False):
"""Deletes tracks by the date"""
if not also_unfinished:
where_clause = "AND NOT finished == '' "
self.cursor.execute(
"DELETE "
" FROM Tracks "
"WHERE "
" DATE(started) BETWEEN ? AND ?"
" {where_clause}"
"".format(where_clause=where_clause),
(started, finished)
)
self.conn.commit()
# TIMESHEET
def get_group_by_clause(self, mask):
"""Makes a GROUP BY clause by bit mask"""
def set_group_by_clause(bits, value, group_by):
"""Add a field to group_by clause"""
if mask & bits:
if group_by:
group_by = "%s," % group_by
group_by = '{group_by} {value}'.format(group_by=group_by,
value=value)
return group_by
group_by = set_group_by_clause(TS_GROUP_BY['date'], 'DATE(started)', '')
group_by = set_group_by_clause(TS_GROUP_BY['project'], 'Tasks.project_id',
group_by)
group_by = set_group_by_clause(TS_GROUP_BY['task'], 'Tracks.task_id',
group_by)
group_by = set_group_by_clause(TS_GROUP_BY['track'], 'Tracks.id', group_by)
if group_by:
group_by = "GROUP BY %s " % group_by
return group_by
def get_timesheet_fields(self, mask, get_headers=False):
"""Makes a list of ordered fields"""
# Priority:
# datetime - 0
# date - 1
# task - 2
# project - 3
# spent - 4
# date, tname, pname, started, finished, spent
date_field = (0, 'DATE(started) as "date [date]"', 'Date')
task_field = (1, 'tname', 'Task')
project_field = (2, 'pname', 'Project')
started_field = (3, 'DATETIME(started) as "started [timestamp]"', 'From')
finished_field = (4, 'DATETIME(finished) as "finished [timestamp]"', 'To')
spent_field = (5, 'spent', 'Time Spent')
clause = set()
if mask & TS_GROUP_BY['date']:
clause.add(date_field)
if mask & TS_GROUP_BY['task']:
clause.update([task_field, project_field])
if mask & TS_GROUP_BY['project']:
clause.add(project_field)
if mask & TS_GROUP_BY['track']:
clause.update([task_field, project_field, started_field,
finished_field])
clause.add(spent_field)
to_get = 2 if get_headers else 1
return map(operator.itemgetter(to_get),
sorted(clause, key=operator.itemgetter(0)))
def get_timesheet_select_clause(self, mask):
"""Get prepared select's clause list of fields"""
fields = self.get_timesheet_fields(mask)
return ', '.join(fields)
def get_minimal_started_track(self, tname='', pname=''):
"""Get a minimal tracked date"""
params = []
where_project_clause = where_task_clause = ''
if tname:
tname = tname.encode('utf8')
where_task_clause = "tname == ? AND "
params.append(tname)
if pname:
pname = pname.encode('utf8')
where_project_clause = "pname == ? AND "
params.append(pname)
self.cursor.execute(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" DATE(started) as 'started [date]'"
"FROM Tracks, Tasks, Projects "
"WHERE "
" {where_task_clause}"
" {where_project_clause}"
" Tracks.task_id == tid AND "
" Tasks.project_id == Projects.id"
"".format(where_task_clause=where_task_clause,
where_project_clause=where_project_clause), params)
return self.cursor.fetchone()
def get_timesheet(self, started, finished, group_by_mask, only_billed=True,
tname='', pname=''):
""" Gets the time was spent for a task/project"""
params = []
only_billed_clause = where_project_clause = where_task_clause = ''
if tname:
params.append(tname.encode('utf8'))
where_task_clause = "tname == ? AND "
if pname:
params.append(pname.encode('utf8'))
where_project_clause = "pname == ? AND "
if only_billed:
only_billed_clause = " AND Tracks.is_billed == 1 "
params.extend([started, finished])
group_by_clause = self.get_group_by_clause(group_by_mask)
query = str(
"SELECT "
" Tasks.id as tid, Tasks.name as tname, Projects.name as pname, "
" SUM(STRFTIME('%s', finished)-STRFTIME('%s', started)) as spent,"
" Tracks.started as started, "
" Tracks.finished as finished "
"FROM Tracks, Tasks, Projects "
"WHERE "
" {where_task_clause}"
" {where_project_clause}"
" Tracks.task_id == tid AND "
" Tasks.project_id == Projects.id AND "
" ("
" DATE(started) BETWEEN ? AND ?"
" AND NOT Tracks.finished == ''"
" {only_billed_clause}"
" ) "
"{group_by_clause} "
"ORDER BY started, Tasks.id"
"".format(started=started, finished=finished,
where_task_clause=where_task_clause,
where_project_clause=where_project_clause,
group_by_clause=group_by_clause,
only_billed_clause=only_billed_clause)
)
#print(query)
if group_by_mask:
select_clause = self.get_timesheet_select_clause(group_by_mask)
query = "SELECT {clause} FROM ({query})".format(
query=query, clause=select_clause)
self.cursor.execute(query, params)
return self.cursor.fetchall()
| [
"random.choice",
"tabulate.tabulate",
"sqlite3.connect",
"datetime.timedelta",
"operator.itemgetter",
"datetime.datetime.now"
] | [((362, 454), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {'detect_types': '(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)'}), '(db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.\n PARSE_COLNAMES)\n', (377, 454), False, 'import sqlite3\n'), ((19979, 20002), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20000, 20002), False, 'import datetime\n'), ((3620, 3643), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3641, 3643), False, 'import datetime\n'), ((3646, 3663), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (3655, 3663), False, 'from datetime import timedelta\n'), ((5236, 5259), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5257, 5259), False, 'import datetime\n'), ((5262, 5279), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (5271, 5279), False, 'from datetime import timedelta\n'), ((5422, 5445), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5443, 5445), False, 'import datetime\n'), ((5448, 5465), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (5457, 5465), False, 'from datetime import timedelta\n'), ((5608, 5631), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5629, 5631), False, 'import datetime\n'), ((5634, 5651), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5643, 5651), False, 'from datetime import timedelta\n'), ((5794, 5817), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5815, 5817), False, 'import datetime\n'), ((5820, 5843), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3300)'}), '(seconds=3300)\n', (5829, 5843), False, 'from datetime import timedelta\n'), ((6164, 6261), 'tabulate.tabulate', 'tabulate', (['tracks', "['Track id', 'Task id', 'started', 'finished', 'billed']"], {'tablefmt': '"""simple"""'}), "(tracks, ['Track id', 'Task id', 'started', 'finished', 'billed'],\n tablefmt='simple')\n", (6172, 6261), False, 'from tabulate import tabulate\n'), ((19598, 19621), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19619, 19621), False, 'import datetime\n'), ((23538, 23565), 'operator.itemgetter', 'operator.itemgetter', (['to_get'], {}), '(to_get)\n', (23557, 23565), False, 'import operator\n'), ((2362, 2415), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (2375, 2415), False, 'import random\n'), ((20277, 20312), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (20295, 20312), False, 'import datetime\n'), ((3763, 3786), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3601)'}), '(seconds=3601)\n', (3772, 3786), False, 'from datetime import timedelta\n'), ((3841, 3865), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(13600)'}), '(seconds=13600)\n', (3850, 3865), False, 'from datetime import timedelta\n'), ((3910, 3934), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(14600)'}), '(seconds=14600)\n', (3919, 3934), False, 'from datetime import timedelta\n'), ((3989, 4013), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(15600)'}), '(seconds=15600)\n', (3998, 4013), False, 'from datetime import timedelta\n'), ((4058, 4082), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(16600)'}), '(seconds=16600)\n', (4067, 4082), False, 'from datetime import timedelta\n'), ((4194, 4218), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(17600)'}), '(seconds=17600)\n', (4203, 4218), False, 'from datetime import timedelta\n'), ((4263, 4287), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(18600)'}), '(seconds=18600)\n', (4272, 4287), False, 'from datetime import timedelta\n'), ((4342, 4366), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(19600)'}), '(seconds=19600)\n', (4351, 4366), False, 'from datetime import timedelta\n'), ((4411, 4435), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(20600)'}), '(seconds=20600)\n', (4420, 4435), False, 'from datetime import timedelta\n'), ((4826, 4850), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(21600)'}), '(seconds=21600)\n', (4835, 4850), False, 'from datetime import timedelta\n'), ((4895, 4919), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(22600)'}), '(seconds=22600)\n', (4904, 4919), False, 'from datetime import timedelta\n'), ((4974, 4998), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(23600)'}), '(seconds=23600)\n', (4983, 4998), False, 'from datetime import timedelta\n'), ((5043, 5067), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(24600)'}), '(seconds=24600)\n', (5052, 5067), False, 'from datetime import timedelta\n'), ((5122, 5146), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(25600)'}), '(seconds=25600)\n', (5131, 5146), False, 'from datetime import timedelta\n'), ((5191, 5215), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(26600)'}), '(seconds=26600)\n', (5200, 5215), False, 'from datetime import timedelta\n'), ((5379, 5402), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (5388, 5402), False, 'from datetime import timedelta\n'), ((5565, 5588), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (5574, 5588), False, 'from datetime import timedelta\n'), ((5751, 5774), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (5760, 5774), False, 'from datetime import timedelta\n'), ((5943, 5965), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(600)'}), '(seconds=600)\n', (5952, 5965), False, 'from datetime import timedelta\n'), ((10234, 10257), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10255, 10257), False, 'import datetime\n'), ((23605, 23627), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (23624, 23627), False, 'import operator\n')] |
import json
import os
from rest_framework.renderers import JSONRenderer
from api.models import Project, DOCUMENT_CLASSIFICATION, SEQUENCE_LABELING
from api.serializers import LabelSerializer
from api.utils import JSONPainter
def extract_document_classification(label, labels):
return labels.get(pk=label["label"]).text
def extract_label_seq_labeling(label, labels):
return [
label["start_offset"],
label["end_offset"],
labels.get(pk=label["label"]).text,
]
def get_extract_label(project):
return {
DOCUMENT_CLASSIFICATION: extract_document_classification,
SEQUENCE_LABELING: extract_label_seq_labeling,
}[project.project_type]
def get_all_projects_json():
dump_dir = "projects_dump"
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
for project in Project.objects.all():
try:
project_dir = f"{dump_dir}/dump_{project.name.replace('/', '_')}"
if not os.path.exists(project_dir):
os.makedirs(project_dir)
print(f"Dumping {project.name}")
labels = project.labels.all()
label_serializer = LabelSerializer(labels, many=True)
documents = project.documents.all()
data = JSONPainter().paint(documents)
data = map(
lambda x: {
**x,
"labels": list(
map(
lambda y: get_extract_label(project)(y, labels),
x["annotations"],
)
),
},
data,
)
data = map(json.dumps, data)
data = map(lambda x: x + "\n", data)
with open(f"{project_dir}/labels.json", "wb") as f:
f.write(JSONRenderer().render(label_serializer.data))
with open(f"{project_dir}/data.jsonl", "w") as f:
f.writelines(data)
except Exception as ex:
print(f"Error {project.name} {ex}")
| [
"os.makedirs",
"api.utils.JSONPainter",
"os.path.exists",
"rest_framework.renderers.JSONRenderer",
"api.models.Project.objects.all",
"api.serializers.LabelSerializer"
] | [((843, 864), 'api.models.Project.objects.all', 'Project.objects.all', ([], {}), '()\n', (862, 864), False, 'from api.models import Project, DOCUMENT_CLASSIFICATION, SEQUENCE_LABELING\n'), ((768, 792), 'os.path.exists', 'os.path.exists', (['dump_dir'], {}), '(dump_dir)\n', (782, 792), False, 'import os\n'), ((802, 823), 'os.makedirs', 'os.makedirs', (['dump_dir'], {}), '(dump_dir)\n', (813, 823), False, 'import os\n'), ((1164, 1198), 'api.serializers.LabelSerializer', 'LabelSerializer', (['labels'], {'many': '(True)'}), '(labels, many=True)\n', (1179, 1198), False, 'from api.serializers import LabelSerializer\n'), ((976, 1003), 'os.path.exists', 'os.path.exists', (['project_dir'], {}), '(project_dir)\n', (990, 1003), False, 'import os\n'), ((1021, 1045), 'os.makedirs', 'os.makedirs', (['project_dir'], {}), '(project_dir)\n', (1032, 1045), False, 'import os\n'), ((1266, 1279), 'api.utils.JSONPainter', 'JSONPainter', ([], {}), '()\n', (1277, 1279), False, 'from api.utils import JSONPainter\n'), ((1844, 1858), 'rest_framework.renderers.JSONRenderer', 'JSONRenderer', ([], {}), '()\n', (1856, 1858), False, 'from rest_framework.renderers import JSONRenderer\n')] |
# -*- coding: utf-8 -*-
from django.db import models
from gestao.contrato.models.contrato.Contrato import Contrato
from gestao.financeiro.models.movimentacoes.Despesa import Despesa
from gestao.financeiro.models.pagamento.PagamentoDespesa import PagamentoDespesa
class ContratoDespesas(models.Model):
contrato = models.ForeignKey(Contrato, verbose_name="Contrato")
despesa = models.ForeignKey(Despesa, verbose_name="Despesa")
def __unicode__(self):
return u'%s: %s' % (self.contrato.titulo, self.despesa.valor_total)
def pagamento(self):
pagamento_despesa = PagamentoDespesa.objects.filter(despesa=self.despesa)
if pagamento_despesa:
return pagamento_despesa[0]
return None
class Meta:
app_label = 'contrato'
verbose_name = 'Despesa do Contrato'
verbose_name_plural = 'Despesas do Contrato'
| [
"django.db.models.ForeignKey",
"gestao.financeiro.models.pagamento.PagamentoDespesa.PagamentoDespesa.objects.filter"
] | [((318, 370), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Contrato'], {'verbose_name': '"""Contrato"""'}), "(Contrato, verbose_name='Contrato')\n", (335, 370), False, 'from django.db import models\n'), ((385, 435), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Despesa'], {'verbose_name': '"""Despesa"""'}), "(Despesa, verbose_name='Despesa')\n", (402, 435), False, 'from django.db import models\n'), ((602, 655), 'gestao.financeiro.models.pagamento.PagamentoDespesa.PagamentoDespesa.objects.filter', 'PagamentoDespesa.objects.filter', ([], {'despesa': 'self.despesa'}), '(despesa=self.despesa)\n', (633, 655), False, 'from gestao.financeiro.models.pagamento.PagamentoDespesa import PagamentoDespesa\n')] |
from unittest import TestCase
from simplifiedpytrends.request import TrendReq
class TestTrendReq(TestCase):
def test__get_data(self):
"""Should use same values as in the documentation"""
pytrend = TrendReq()
self.assertEqual(pytrend.hl, 'en-US')
self.assertEqual(pytrend.tz, 360)
self.assertEqual(pytrend.geo, '')
self.assertTrue(pytrend.cookies['NID'])
def test_interest_over_time(self):
pytrend = TrendReq()
pytrend.build_payload(kw_list=['pizza', 'bagel'])
self.assertIsNotNone(pytrend.interest_over_time())
| [
"simplifiedpytrends.request.TrendReq"
] | [((221, 231), 'simplifiedpytrends.request.TrendReq', 'TrendReq', ([], {}), '()\n', (229, 231), False, 'from simplifiedpytrends.request import TrendReq\n'), ((468, 478), 'simplifiedpytrends.request.TrendReq', 'TrendReq', ([], {}), '()\n', (476, 478), False, 'from simplifiedpytrends.request import TrendReq\n')] |
from __future__ import absolute_import, print_function
from flask import Blueprint
posts = Blueprint('posts', __name__)
from . import views
from . import models
| [
"flask.Blueprint"
] | [((93, 121), 'flask.Blueprint', 'Blueprint', (['"""posts"""', '__name__'], {}), "('posts', __name__)\n", (102, 121), False, 'from flask import Blueprint\n')] |
# -*- coding:utf-8 -*-
# ###########################
# File Name: hdataset.py
# Author: geekinglcq
# Mail: <EMAIL>
# Created Time: 2020-12-28 20:17:47
# ###########################
import pandas as pd
import os
import logging
from collections import defaultdict
from torch.utils.data import DataLoader, Dataset
from .enum_type import FeatureSource as FS
from .enum_type import item_type_dict
from .dataset import DataSet, SubSet
class HDataSet(DataSet):
"""
Dataset used for heterogenous items
"""
def __init__(self, config, restore_path=None):
self.config = config
self._init_setting()
if restore_path is None:
self._load_feats()
else:
# TODO
pass
self._preprocessing()
def _load_feats(self):
self.user_feat = self._load_meta_feats(self.config["user_feat_path"],
FS.USER, "user_id")
self.item_feat = self._load_item_feats(self.config["item_feat_path"],
FS.ITEM)
self.inter_feat = pd.read_csv(self.config["inter_feat_path"]).sample(
frac=1, random_state=28)
mask = None
if len(self.types) < 3:
for item_type, item_feat in self.item_feat.items():
new_mask = self.inter_feat[self.iid_field].isin(
item_feat[self.iid_field])
if mask is not None:
mask = mask | new_mask
else:
mask = new_mask
self.inter_feat = self.inter_feat[mask]
self.h_inter_feat = {}
self.user_num = len(self.user_feat)
self.item_num = sum([len(i) for i in self.item_feat.values()])
self.item_nums = {k: len(v) for k, v in self.item_feat.items()}
print(f'user num: {self.user_num}')
print(f'item num: {self.item_num}')
print(f'item nums: {self.item_nums}')
def _preprocessing(self):
self._normalize()
if len(self.types) < 3:
self._reID(self.iid_field)
self._reID(self.uid_field)
def _load_item_feats(self, paths, source):
item_feat = {}
for item_type, item_path in paths.items():
if item_type not in self.types:
continue
if os.path.isfile(item_path):
feat = pd.read_csv(item_path)
item_feat[item_type] = feat
else:
raise ValueError("Dataset file not fountd.")
return item_feat
def _init_setting(self):
self.logger = logging.getLogger()
self.name = self.config['name']
print(self.config)
self.uid_field = self.config["USER_ID_FIELD"]
self.iid_field = self.config["ITEM_ID_FIELD"]
self.label_field = self.config["LABEL_FIELD"]
self.itype_field = self.config["TYPE_FIELD"]
self.types = self.config["type"]
self.field2type = {}
self.field2source = {}
self.field2id_token = defaultdict(dict)
self.field2token_id = defaultdict(dict)
self.user_feat_fields = []
self.item_feat_fields = defaultdict(list)
for feat_name, feat_value in self.config['feat'].items():
source = feat_value['source']
self.field2type[feat_name] = feat_value['type']
self.field2source[feat_name] = feat_value['source']
if source == 'user' and feat_name != self.uid_field:
self.user_feat_fields.append(feat_name)
if source.startswith('item') and feat_name != self.iid_field:
item_type = source.split("_")[1]
if item_type in self.types:
self.item_feat_fields[item_type].append(feat_name)
def num(self, field):
if field == self.uid_field:
return self.user_num
if field == self.iid_field:
return self.item_num
if field not in self.field2type:
raise ValueError('field {} not in dataset'.format(field))
# if field not in self.field2token_id:
# raise ValueError('field {} is not token type'.format(field))
if len(self.field2token_id[field]) == 0:
if field in self.user_feat_fields:
return len(self.user_feat[field].unique())
else:
for item_type, item_feat_fields in self.item_feat_fields.items(
):
if field in item_feat_fields:
return len(self.item_feat[item_type][field].unique())
return len(self.field2token_id[field])
def _reID(self, field):
"""
Re-ID the token-type feature, save the id map in self.field2token_id
"""
self.logger.info(f'ReID field {field}.')
ftype = self.field2type.get(field)
assert ftype == 'token'
source = self.field2source.get(field)
if type(source) is str and source.startswith("item_"):
item_type = source.split("_")[1]
dataframe = self.item_feat[item_type]
elif source is FS.ITEM_ID or source == "item":
dataframe = pd.concat(list(self.item_feat.values()), join='inner')
elif source == 'user' or source is FS.USER_ID:
dataframe = self.user_feat
else:
dataframe = self.inter_feat
id_map = {v: k for k, v in enumerate(dataframe[field].unique())}
self.field2token_id[field].update(id_map)
dataframe[field] = dataframe[field].map(id_map)
if source in ['item', 'user', FS.ITEM_ID, FS.USER_ID]:
if field in self.inter_feat:
self.inter_feat[field] = self.inter_feat[field].map(id_map)
for item_type, item_feat in self.item_feat.items():
if field in item_feat:
item_feat[field] = item_feat[field].map(id_map)
def join(self, df):
"""
Join user/item features to interactions.
"""
if self.user_feat is not None and self.uid_field in df:
df = pd.merge(df,
self.user_feat,
on=self.uid_field,
how='left',
suffixes=('_inter', '_user'))
if self.item_feat is not None and self.iid_field in df:
for item_type, item_feat in self.item_feat.items():
df = pd.merge(df,
item_feat,
on=self.iid_field,
how='left',
suffixes=(f'_{item_type}', '_inter'))
type_c = [i for i in df.columns if i.startswith(self.itype_field)]
df[self.itype_field] = df[type_c].agg(sum, axis=1)
return df
def join_interaction(self):
self.inter_feat = self.join(self.inter_feat)
if 'sample' in self.config:
sample_ratio = self.config['sample']
sampled = []
for kind in self.types:
ratio = sample_ratio.get(kind, 1.0)
kind_id = item_type_dict[kind]
# preverse the data for val & test
new_df = self.inter_feat[self.inter_feat['type'] ==
kind_id].sample(frac=ratio * 0.7 +
0.3,
random_state=16)
print(kind, kind_id, ratio, new_df.shape)
sampled.append(new_df)
self.inter_feat = pd.concat(sampled, ignore_index=True)
self.inter_feat = self.inter_feat.sample(frac=1.).reset_index(
drop=True)
def train_val_test_split(self,
ratios=[0.7, 0.2, 0.1],
group_by=None,
**kwargs):
assert len(ratios) == 3
if 'sample' in self.config:
train, val, test = self.split_by_ratio_sampled(
ratios, create_new_dataset=False)
else:
train, val, test = self.split_by_ratio(ratios,
group_by=group_by,
create_new_dataset=False)
user_fs = self.user_feat_fields
item_fs = self.item_feat_fields
type_field = self.itype_field
self.train_inter_subset = {}
self.val_inter_subset = {}
self.test_inter_subset = {}
for item_type in self.types:
item_type_id = item_type_dict[item_type]
self.train_inter_subset[item_type] = SubSet(
train[train[type_field] == item_type_id], self.uid_field,
self.iid_field, self.itype_field, self.label_field, user_fs,
item_fs[item_type])
self.val_inter_subset[item_type] = SubSet(
val[val[type_field] == item_type_id], self.uid_field,
self.iid_field, self.itype_field, self.label_field, user_fs,
item_fs[item_type])
self.test_inter_subset[item_type] = SubSet(
test[test[type_field] == item_type_id], self.uid_field,
self.iid_field, self.itype_field, self.label_field, user_fs,
item_fs[item_type])
self.all_inter_feat = self.inter_feat
self.logger.info(
"Replace interaction features with train interaction fatures.")
self.logger.info(
"Interaction features are stored in self.all_inter_feat")
self.inter_feat = train
def init_data_loader(self, batch_size=256, num_workers=1):
self.train_data_loader = {}
self.val_data_loader = {}
self.test_data_loader = {}
for item_type in self.types:
self.train_data_loader[item_type] = DataLoader(
self.train_inter_subset[item_type],
batch_size=batch_size,
# pin_memory=True,
num_workers=num_workers)
self.val_data_loader[item_type] = DataLoader(
self.val_inter_subset[item_type],
batch_size=batch_size,
num_workers=num_workers)
self.test_data_loader[item_type] = DataLoader(
self.test_inter_subset[item_type],
batch_size=batch_size,
num_workers=num_workers)
class HSubSet(Dataset):
def __init__(self, dataframes, uid_field, iid_field, label_field,
u_feat_fields, i_feat_fields):
self.types = dataframes.keys()
self.dfs = dataframes
self.uid = uid_field
self.iid = iid_field
self.label = label_field
def __len__(self):
return min([len(df.index) for df in self.dfs])
| [
"torch.utils.data.DataLoader",
"pandas.read_csv",
"pandas.merge",
"collections.defaultdict",
"os.path.isfile",
"pandas.concat",
"logging.getLogger"
] | [((2605, 2624), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2622, 2624), False, 'import logging\n'), ((3038, 3055), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3049, 3055), False, 'from collections import defaultdict\n'), ((3086, 3103), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3097, 3103), False, 'from collections import defaultdict\n'), ((3171, 3188), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3182, 3188), False, 'from collections import defaultdict\n'), ((2332, 2357), 'os.path.isfile', 'os.path.isfile', (['item_path'], {}), '(item_path)\n', (2346, 2357), False, 'import os\n'), ((6072, 6166), 'pandas.merge', 'pd.merge', (['df', 'self.user_feat'], {'on': 'self.uid_field', 'how': '"""left"""', 'suffixes': "('_inter', '_user')"}), "(df, self.user_feat, on=self.uid_field, how='left', suffixes=(\n '_inter', '_user'))\n", (6080, 6166), True, 'import pandas as pd\n'), ((7577, 7614), 'pandas.concat', 'pd.concat', (['sampled'], {'ignore_index': '(True)'}), '(sampled, ignore_index=True)\n', (7586, 7614), True, 'import pandas as pd\n'), ((9857, 9955), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_inter_subset[item_type]'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(self.train_inter_subset[item_type], batch_size=batch_size,\n num_workers=num_workers)\n', (9867, 9955), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((10082, 10178), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_inter_subset[item_type]'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(self.val_inter_subset[item_type], batch_size=batch_size,\n num_workers=num_workers)\n', (10092, 10178), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((10271, 10368), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_inter_subset[item_type]'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(self.test_inter_subset[item_type], batch_size=batch_size,\n num_workers=num_workers)\n', (10281, 10368), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1100, 1143), 'pandas.read_csv', 'pd.read_csv', (["self.config['inter_feat_path']"], {}), "(self.config['inter_feat_path'])\n", (1111, 1143), True, 'import pandas as pd\n'), ((2382, 2404), 'pandas.read_csv', 'pd.read_csv', (['item_path'], {}), '(item_path)\n', (2393, 2404), True, 'import pandas as pd\n'), ((6415, 6512), 'pandas.merge', 'pd.merge', (['df', 'item_feat'], {'on': 'self.iid_field', 'how': '"""left"""', 'suffixes': "(f'_{item_type}', '_inter')"}), "(df, item_feat, on=self.iid_field, how='left', suffixes=(\n f'_{item_type}', '_inter'))\n", (6423, 6512), True, 'import pandas as pd\n')] |
"""
Django settings for farmblr project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
from decouple import config, Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# TODO: Make secret key secret
SECRET_KEY = 'django-insecure-xyjd9zz!%+e^k9emeu8--hvpp1zqv01e_85eis(dux3li8t2!$'
# SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
# TODO: Uncomment the below 2 and delete defaults (for production)
# DEBUG = config('DEBUG', default=True, cast=bool)
#
# ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
'blog',
'accounts',
'products'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'farmblr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'farmblr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
BASE_DIR / "static",
]
# User uploaded files
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Email configuration
# EMAIL_BACKEND = config('EMAIL_BACKEND')
# EMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool)
# EMAIL_ACTIVE_FIELD = config('EMAIL_ACTIVE_FIELD')
# EMAIL_SERVER = EMAIL_HOST = config('EMAIL_HOST')
# EMAIL_PORT = config('EMAIL_PORT', cast=int)
# EMAIL_ADDRESS = EMAIL_HOST_USER = config('EMAIL_HOST_USER')
# EMAIL_FROM_ADDRESS = config('EMAIL_HOST_USER')
# EMAIL_PASSWORD = EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD') # os.environ['password_key'] suggested
# EMAIL_MAIL_SUBJECT = config('EMAIL_MAIL_SUBJECT')
# EMAIL_MAIL_HTML = config('EMAIL_MAIL_HTML')
# EMAIL_PAGE_TEMPLATE = config('EMAIL_PAGE_TEMPLATE')
# EMAIL_PAGE_DOMAIN = config('EMAIL_PAGE_DOMAIN')
# DEFAULT_FROM_EMAIL = config('EMAIL_FROM_ADDRESS')
| [
"pathlib.Path",
"os.path.join"
] | [((505, 540), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (517, 540), False, 'import os\n'), ((3482, 3519), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""staticfiles"""'], {}), "(BASE_DIR, 'staticfiles')\n", (3494, 3519), False, 'import os\n'), ((3627, 3659), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""media/"""'], {}), "(BASE_DIR, 'media/')\n", (3639, 3659), False, 'import os\n'), ((451, 465), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (455, 465), False, 'from pathlib import Path\n')] |
import os
import shutil
import pandas as pd
import torch
import PIL.Image as Image
import torchvision.transforms as transforms
import time
t = transforms.Compose([transforms.Resize((224,224))])
data_dir = '../../data'
image_dir = os.path.join(data_dir, 'isic/Images')
def main(csv_filename, include_sonic):
if include_sonic:
new_image_dir = image_dir.replace('Images','ImagesSmallerWithSonic')
p = pd.read_csv(os.path.join(data_dir,csv_filename))
else:
new_image_dir = image_dir.replace('Images','ImagesSmaller')
p = pd.read_csv(os.path.join(data_dir,csv_filename))
image_names = p['image_name'].values
if not os.path.exists(new_image_dir):
print('making ',new_image_dir)
os.mkdir(new_image_dir)
t1 = time.time()
print('resizing images')
for i,image_name in enumerate(image_names):
if i % 1000 == 0:
t2 = time.time()
print(i, t2-t1)
original = os.path.join(image_dir, image_name)
target = os.path.join(new_image_dir, image_name)
#shutil.copyfile(original, target)
#print(image_name)
img = Image.open(os.path.join(image_dir,image_name))
# tranform
img_t = t(img).convert("RGB")
img_t.save(os.path.join(new_image_dir,image_name),"JPEG")
if __name__ == '__main__':
main(csv_filename='isic/df_with_sonic_age_over_50_id.csv',include_sonic=False) | [
"os.mkdir",
"os.path.exists",
"time.time",
"os.path.join",
"torchvision.transforms.Resize"
] | [((233, 270), 'os.path.join', 'os.path.join', (['data_dir', '"""isic/Images"""'], {}), "(data_dir, 'isic/Images')\n", (245, 270), False, 'import os\n'), ((777, 788), 'time.time', 'time.time', ([], {}), '()\n', (786, 788), False, 'import time\n'), ((165, 194), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (182, 194), True, 'import torchvision.transforms as transforms\n'), ((665, 694), 'os.path.exists', 'os.path.exists', (['new_image_dir'], {}), '(new_image_dir)\n', (679, 694), False, 'import os\n'), ((743, 766), 'os.mkdir', 'os.mkdir', (['new_image_dir'], {}), '(new_image_dir)\n', (751, 766), False, 'import os\n'), ((969, 1004), 'os.path.join', 'os.path.join', (['image_dir', 'image_name'], {}), '(image_dir, image_name)\n', (981, 1004), False, 'import os\n'), ((1022, 1061), 'os.path.join', 'os.path.join', (['new_image_dir', 'image_name'], {}), '(new_image_dir, image_name)\n', (1034, 1061), False, 'import os\n'), ((434, 470), 'os.path.join', 'os.path.join', (['data_dir', 'csv_filename'], {}), '(data_dir, csv_filename)\n', (446, 470), False, 'import os\n'), ((573, 609), 'os.path.join', 'os.path.join', (['data_dir', 'csv_filename'], {}), '(data_dir, csv_filename)\n', (585, 609), False, 'import os\n'), ((909, 920), 'time.time', 'time.time', ([], {}), '()\n', (918, 920), False, 'import time\n'), ((1158, 1193), 'os.path.join', 'os.path.join', (['image_dir', 'image_name'], {}), '(image_dir, image_name)\n', (1170, 1193), False, 'import os\n'), ((1270, 1309), 'os.path.join', 'os.path.join', (['new_image_dir', 'image_name'], {}), '(new_image_dir, image_name)\n', (1282, 1309), False, 'import os\n')] |
from turtle import Screen
from snake import Snake
from food import Food
from scoreboard import Score
import time
screener = Screen()
screener.setup(width=600, height=600)
screener.bgcolor("black")
screener.title("SNAKE GAME")
screener.tracer(0)
snake = Snake()
food = Food()
scoreboard = Score()
screener.listen()
screener.onkey(snake.up, "Up")
screener.onkey(snake.down, "Down")
screener.onkey(snake.left, "Left")
screener.onkey(snake.right, "Right")
game_is_on = True
while game_is_on:
screener.update()
time.sleep(.1)
snake.move()
# Collision with food
if snake.head.distance(food) < 15:
food.refresh()
snake.extent()
scoreboard.increase_score()
# Detect collision with wall
if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
scoreboard.reset()
snake.reset()
# Detect collision with tail
for segment in snake.segments[1:]:
if segment == snake.head:
pass
elif snake.head.distance(segment) < 10:
scoreboard.reset()
snake.reset()
screener.exitonclick()
| [
"time.sleep",
"turtle.Screen",
"scoreboard.Score",
"food.Food",
"snake.Snake"
] | [((131, 139), 'turtle.Screen', 'Screen', ([], {}), '()\n', (137, 139), False, 'from turtle import Screen\n'), ((267, 274), 'snake.Snake', 'Snake', ([], {}), '()\n', (272, 274), False, 'from snake import Snake\n'), ((283, 289), 'food.Food', 'Food', ([], {}), '()\n', (287, 289), False, 'from food import Food\n'), ((304, 311), 'scoreboard.Score', 'Score', ([], {}), '()\n', (309, 311), False, 'from scoreboard import Score\n'), ((543, 558), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (553, 558), False, 'import time\n')] |
import os
import cv2
import numpy as np
def get_emojis():
emojis_folder = 'emoji/'
emojis = []
for emoji in range(len(os.listdir(emojis_folder))):
print(emoji)
emojis.append(cv2.imread(emojis_folder + str(emoji) + '.png', -1))
return emojis[0:len(emojis) - 1]
def overlay(image, emoji, x, y, w, h):
emoji = cv2.resize(emoji, (w, h))
try:
image[y:y + h, x:x + w] = blend_transparent(image[y:y + h, x:x + w], emoji)
except:
pass
return image
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes
overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent / 100)
height = int(frame.shape[0] * percent / 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
| [
"cv2.cvtColor",
"os.listdir",
"cv2.resize",
"cv2.addWeighted"
] | [((348, 373), 'cv2.resize', 'cv2.resize', (['emoji', '(w, h)'], {}), '(emoji, (w, h))\n', (358, 373), False, 'import cv2\n'), ((921, 967), 'cv2.cvtColor', 'cv2.cvtColor', (['overlay_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(overlay_mask, cv2.COLOR_GRAY2BGR)\n', (933, 967), False, 'import cv2\n'), ((990, 1039), 'cv2.cvtColor', 'cv2.cvtColor', (['background_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(background_mask, cv2.COLOR_GRAY2BGR)\n', (1002, 1039), False, 'import cv2\n'), ((1663, 1715), 'cv2.resize', 'cv2.resize', (['frame', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, dim, interpolation=cv2.INTER_AREA)\n', (1673, 1715), False, 'import cv2\n'), ((1428, 1487), 'cv2.addWeighted', 'cv2.addWeighted', (['face_part', '(255.0)', 'overlay_part', '(255.0)', '(0.0)'], {}), '(face_part, 255.0, overlay_part, 255.0, 0.0)\n', (1443, 1487), False, 'import cv2\n'), ((133, 158), 'os.listdir', 'os.listdir', (['emojis_folder'], {}), '(emojis_folder)\n', (143, 158), False, 'import os\n')] |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
from caffe2.python import core, workspace
from ngraph.frontends.caffe2.c2_importer.importer import C2Importer
from ngraph.testing import ExecutorFactory
import numpy as np
import random as random
def run_all_close_compare_initiated_with_random_gauss(c2_op_name,
shape=None,
data=None,
expected=None):
workspace.ResetWorkspace()
if not shape:
shape = [2, 7]
if not data:
data = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
net = core.Net("net")
net.GivenTensorFill([], "X", shape=shape, values=data, name="X")
getattr(net, c2_op_name)(["X"], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
c2_y = workspace.FetchBlob("Y")
# compare Caffe2 and ngraph results
assert(np.allclose(f_result, c2_y, atol=1e-4, rtol=0, equal_nan=False))
# compare expected results and ngraph results
if expected:
assert(np.allclose(f_result, expected, atol=1e-3, rtol=0, equal_nan=False))
def test_relu():
run_all_close_compare_initiated_with_random_gauss('Relu',
shape=[10, 10])
def test_softmax():
shape = [2, 7]
data = [
1., 2., 3., 4., 1., 2., 3.,
1., 2., 3., 4., 1., 2., 3.
]
expected = [
[0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175],
[0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175],
]
run_all_close_compare_initiated_with_random_gauss('Softmax',
shape=shape,
data=data,
expected=expected)
def test_negative():
run_all_close_compare_initiated_with_random_gauss('Negative')
def test_sigmoid():
run_all_close_compare_initiated_with_random_gauss('Sigmoid')
def test_tanh():
run_all_close_compare_initiated_with_random_gauss('Tanh')
def test_exp():
workspace.ResetWorkspace()
shape = [2, 7]
data = [
1., 2., 3., 4., 1., 2., 3.,
1., 2., 3., 4., 1., 2., 3.
]
expected = [
[2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
[2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
]
run_all_close_compare_initiated_with_random_gauss('Exp',
shape=shape,
data=data,
expected=expected)
def test_NCHW2NHWC():
workspace.ResetWorkspace()
# NCHW
shape = [2, 3, 4, 5]
data1 = [float(i) for i in range(np.prod(shape))]
net = core.Net("net")
X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
X.NCHW2NHWC([], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
def test_NHWC2NCHW():
workspace.ResetWorkspace()
# NHWC
shape = [2, 3, 4, 5]
data1 = [float(i) for i in range(np.prod(shape))]
net = core.Net("net")
X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
X.NCHW2NHWC([], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
| [
"caffe2.python.workspace.FetchBlob",
"caffe2.python.core.Net",
"numpy.allclose",
"caffe2.python.workspace.RunNetOnce",
"numpy.prod",
"ngraph.frontends.caffe2.c2_importer.importer.C2Importer",
"caffe2.python.workspace.ResetWorkspace",
"random.gauss",
"ngraph.testing.ExecutorFactory"
] | [((1256, 1282), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (1280, 1282), False, 'from caffe2.python import core, workspace\n'), ((1429, 1444), 'caffe2.python.core.Net', 'core.Net', (['"""net"""'], {}), "('net')\n", (1437, 1444), False, 'from caffe2.python import core, workspace\n'), ((1597, 1622), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (1617, 1622), False, 'from caffe2.python import core, workspace\n'), ((1679, 1691), 'ngraph.frontends.caffe2.c2_importer.importer.C2Importer', 'C2Importer', ([], {}), '()\n', (1689, 1691), False, 'from ngraph.frontends.caffe2.c2_importer.importer import C2Importer\n'), ((3194, 3220), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (3218, 3220), False, 'from caffe2.python import core, workspace\n'), ((3796, 3822), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (3820, 3822), False, 'from caffe2.python import core, workspace\n'), ((3925, 3940), 'caffe2.python.core.Net', 'core.Net', (['"""net"""'], {}), "('net')\n", (3933, 3940), False, 'from caffe2.python import core, workspace\n'), ((4084, 4109), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (4104, 4109), False, 'from caffe2.python import core, workspace\n'), ((4166, 4178), 'ngraph.frontends.caffe2.c2_importer.importer.C2Importer', 'C2Importer', ([], {}), '()\n', (4176, 4178), False, 'from ngraph.frontends.caffe2.c2_importer.importer import C2Importer\n'), ((4519, 4545), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (4543, 4545), False, 'from caffe2.python import core, workspace\n'), ((4648, 4663), 'caffe2.python.core.Net', 'core.Net', (['"""net"""'], {}), "('net')\n", (4656, 4663), False, 'from caffe2.python import core, workspace\n'), ((4807, 4832), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (4827, 4832), False, 'from caffe2.python import core, workspace\n'), ((4889, 4901), 'ngraph.frontends.caffe2.c2_importer.importer.C2Importer', 'C2Importer', ([], {}), '()\n', (4899, 4901), False, 'from ngraph.frontends.caffe2.c2_importer.importer import C2Importer\n'), ((1828, 1845), 'ngraph.testing.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (1843, 1845), False, 'from ngraph.testing import ExecutorFactory\n'), ((1908, 1932), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Y"""'], {}), "('Y')\n", (1927, 1932), False, 'from caffe2.python import core, workspace\n'), ((1993, 2058), 'numpy.allclose', 'np.allclose', (['f_result', 'c2_y'], {'atol': '(0.0001)', 'rtol': '(0)', 'equal_nan': '(False)'}), '(f_result, c2_y, atol=0.0001, rtol=0, equal_nan=False)\n', (2004, 2058), True, 'import numpy as np\n'), ((4315, 4332), 'ngraph.testing.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (4330, 4332), False, 'from ngraph.testing import ExecutorFactory\n'), ((5038, 5055), 'ngraph.testing.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (5053, 5055), False, 'from ngraph.testing import ExecutorFactory\n'), ((1357, 1385), 'random.gauss', 'random.gauss', ([], {'mu': '(0)', 'sigma': '(10)'}), '(mu=0, sigma=10)\n', (1369, 1385), True, 'import random as random\n'), ((2153, 2221), 'numpy.allclose', 'np.allclose', (['f_result', 'expected'], {'atol': '(0.001)', 'rtol': '(0)', 'equal_nan': '(False)'}), '(f_result, expected, atol=0.001, rtol=0, equal_nan=False)\n', (2164, 2221), True, 'import numpy as np\n'), ((4464, 4488), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Y"""'], {}), "('Y')\n", (4483, 4488), False, 'from caffe2.python import core, workspace\n'), ((5187, 5211), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Y"""'], {}), "('Y')\n", (5206, 5211), False, 'from caffe2.python import core, workspace\n'), ((3897, 3911), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3904, 3911), True, 'import numpy as np\n'), ((4620, 4634), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (4627, 4634), True, 'import numpy as np\n'), ((1401, 1415), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1408, 1415), True, 'import numpy as np\n')] |
# This is a module with functions that can be used to calculate the Froude
# number in a simple 2D system
# <NAME>, 2015
import numpy as np
import datetime
from salishsea_tools.nowcast import analyze
def find_mixed_depth_indices(n2, n2_thres=5e-6):
"""Finds the index of the mixed layer depth for each x-position.
The mixed layer depth is chosen based on the lowest near-surface vertical
grid cell where n2 >= n2_thres
A resaonable value for n2_thres is 5e-6.
If n2_thres = 'None' then the index of the maximum n2 is returned.
n2 is the masked array of buoyancy frequencies with dimensions (depth, x)
returns a list of indices of mixed layer depth cell for each x-position
"""
if n2_thres == 'None':
dinds = np.argmax(n2, axis=0)
else:
dinds = []
for ii in np.arange(n2.shape[-1]):
inds = np.where(n2[:, ii] >= n2_thres)
# exlclude first vertical index less <=1 because the
# buoyancy frequency is hard to define there
if inds[0].size:
inds = filter(lambda x: x > 1, inds[0])
if inds:
dinds.append(min(inds))
else:
dinds.append(0) # if no mixed layer depth found, set to 0
else:
dinds.append(0) # if no mixed layer depth found, set it to 0
return dinds
def average_mixed_layer_depth(mixed_depths, xmin, xmax):
"""Averages the mixed layer depths over indices xmin and xmax
mixed_depths is a 1d array of mixed layer depths
returns the mean mixed layer depth in the defined region
"""
mean_md = np.mean(mixed_depths[xmin:xmax+1])
return mean_md
def mld_time_series(n2, deps, times, time_origin,
xmin=300, xmax=700, n2_thres=5e-6):
"""Calculates the mean mixed layer depth in a region defined by
xmin and xmax over time
n2 is the buoyancy frequency array with dimensions (time, depth, x)
deps is the model depth array
times is the model time_counter array
time_origin is the model's time_origin as a datetime
returns a list of mixed layer depths mlds and dates
"""
mlds = []
dates = []
for t in np.arange(n2.shape[0]):
dinds = find_mixed_depth_indices(n2[t, ...], n2_thres=n2_thres)
mld = average_mixed_layer_depth(deps[dinds], xmin, xmax,)
mlds.append(mld)
dates.append(time_origin + datetime.timedelta(seconds=times[t]))
return mlds, dates
def calculate_density(t, s):
"""Caluclates the density given temperature in deg C (t)
and salinity in psu (s).
returns the density as an array (rho)
"""
rho = (
999.842594 + 6.793952e-2 * t
- 9.095290e-3 * t*t + 1.001685e-4 * t*t*t
- 1.120083e-6 * t*t*t*t + 6.536332e-9 * t*t*t*t*t
+ 8.24493e-1 * s - 4.0899e-3 * t*s
+ 7.6438e-5 * t*t*s - 8.2467e-7 * t*t*t*s
+ 5.3875e-9 * t*t*t*t*s - 5.72466e-3 * s**1.5
+ 1.0227e-4 * t*s**1.5 - 1.6546e-6 * t*t*s**1.5
+ 4.8314e-4 * s*s
)
return rho
def calculate_internal_wave_speed(rho, deps, dinds):
"""Calculates the internal wave speed
c = sqrt(g*(rho2-rho1)/rho2*h1)
where g is acceleration due to gravity, rho2 is denisty of lower layer,
rho1 is density of upper layer and h1 is thickness of upper layer.
rho is the model density (shape is depth, x), deps is the array of depths
and dinds is a list of indices that define the mixed layer depth.
rho must be a masked array
returns c, an array of internal wave speeds at each x-index in rho
"""
# acceleration due to gravity (m/s^2)
g = 9.81
# calculate average density in upper and lower layers
rho_1 = np.zeros((rho.shape[-1]))
rho_2 = np.zeros((rho.shape[-1]))
for ind, d in enumerate(dinds):
rho_1[ind] = analyze.depth_average(rho[0:d+1, ind],
deps[0:d+1], depth_axis=0)
rho_2[ind] = analyze.depth_average(rho[d+1:, ind],
deps[d+1:], depth_axis=0)
# calculate mixed layer depth
h_1 = deps[dinds]
# calcuate wave speed
c = np.sqrt(g*(rho_2-rho_1)/rho_2*h_1)
return c
def depth_averaged_current(u, deps):
"""Calculates the depth averaged current
u is the array with current speeds (shape is depth, x).
u must be a masked array
deps is the array of depths
returns u_avg, the depths averaged current (shape x)
"""
u_avg = analyze.depth_average(u, deps, depth_axis=0)
return u_avg
def calculate_froude_number(n2, rho, u, deps, depsU, n2_thres=5e-6):
"""Calculates the Froude number
n2, rho, u are buoyancy frequency, density and current arrays
(shape depth, x)
deps is the depth array
depsU is the depth array at U poinnts
returns: Fr, c, u_avg - the Froude number, wave speed, and depth averaged
velocity for each x-index
"""
# calculate mixed layers
dinds = find_mixed_depth_indices(n2, n2_thres=n2_thres)
# calculate internal wave speed
c = calculate_internal_wave_speed(rho, deps, dinds)
# calculate depth averaged currents
u_avg = depth_averaged_current(u, depsU)
# Froude numer
Fr = np.abs(u_avg)/c
return Fr, c, u_avg
def froude_time_series(n2, rho, u, deps, depsU, times, time_origin,
xmin=300, xmax=700, n2_thres=5e-6):
"""Calculates the Froude number time series
n2, rho, u are buoyancy frequency, density and current arrays
(shape time, depth, x)
deps is the model depth array
depsU is the model deps array at U points
times is the model time_counter array
time_origin is the mode's time_origin as a datetime
xmin,xmax define the averaging area
returns: Frs, cs, u_avgs, dates
the Froude number, internal wave speed, and depth averaged current
for each time associated with dates
"""
Frs = []
cs = []
u_avgs = []
dates = []
for t in np.arange(n2.shape[0]):
Fr, c, u_avg = calculate_froude_number(n2[t, ...], rho[t, ...],
u[t, ...], deps, depsU,
n2_thres=n2_thres)
Frs.append(np.mean(Fr[xmin:xmax+1]))
cs.append(np.mean(c[xmin:xmax+1]))
u_avgs.append(np.mean(u_avg[xmin:xmax+1]))
dates.append(time_origin + datetime.timedelta(seconds=times[t]))
return Frs, cs, u_avgs, dates
def calculate_buoyancy_frequency(temp, sal, e3, depth_axis=1):
""" Calculate the squared buoyancy frequency (n2) given temperature and
salinity profiles. N2 is set to g*drho/dz/rho. Note that NEMO uses a defini tion based on an question of state: g* (alpha dk[T] + beta dk[S] ) / e3w
temp and sal are the temperature and salinity arrays
e3 is an array of the vertical scale factors (grid spacing). Use e3w for
constistency with NEMO.
depth_axis defines the axis which corresponds to depth in the temp/sal
arrays
returns n2, an array of square buoyancy frequency at each point in temp/sal.
"""
# acceleration due to gravity
g = 9.80665
# First calculate density.
rho = calculate_density(temp, sal)
# Density gradient
drho = np.zeros(rho.shape)
# roll depth axis in rho and drho to first axis
# assume e3 already has depth axis in first axis
drho_r = np.rollaxis(drho, depth_axis)
rho_r = np.rollaxis(rho, depth_axis)
for k in np.arange(1, drho.shape[depth_axis]-1):
drho_r[k, ...] = 1/e3[k, ...]*(rho_r[k+1, ...] - rho_r[k, ...])
# Unroll drho
drho = np.rollaxis(drho_r, 0, depth_axis+1)
rho = np.rollaxis(rho_r, 0, depth_axis+1)
# Define N2
n2 = g*drho/rho # no negative because depth increases with increasking k
return n2
| [
"numpy.abs",
"numpy.argmax",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"numpy.where",
"datetime.timedelta",
"numpy.rollaxis",
"salishsea_tools.nowcast.analyze.depth_average",
"numpy.sqrt"
] | [((1656, 1692), 'numpy.mean', 'np.mean', (['mixed_depths[xmin:xmax + 1]'], {}), '(mixed_depths[xmin:xmax + 1])\n', (1663, 1692), True, 'import numpy as np\n'), ((2228, 2250), 'numpy.arange', 'np.arange', (['n2.shape[0]'], {}), '(n2.shape[0])\n', (2237, 2250), True, 'import numpy as np\n'), ((3763, 3786), 'numpy.zeros', 'np.zeros', (['rho.shape[-1]'], {}), '(rho.shape[-1])\n', (3771, 3786), True, 'import numpy as np\n'), ((3801, 3824), 'numpy.zeros', 'np.zeros', (['rho.shape[-1]'], {}), '(rho.shape[-1])\n', (3809, 3824), True, 'import numpy as np\n'), ((4211, 4253), 'numpy.sqrt', 'np.sqrt', (['(g * (rho_2 - rho_1) / rho_2 * h_1)'], {}), '(g * (rho_2 - rho_1) / rho_2 * h_1)\n', (4218, 4253), True, 'import numpy as np\n'), ((4544, 4588), 'salishsea_tools.nowcast.analyze.depth_average', 'analyze.depth_average', (['u', 'deps'], {'depth_axis': '(0)'}), '(u, deps, depth_axis=0)\n', (4565, 4588), False, 'from salishsea_tools.nowcast import analyze\n'), ((6043, 6065), 'numpy.arange', 'np.arange', (['n2.shape[0]'], {}), '(n2.shape[0])\n', (6052, 6065), True, 'import numpy as np\n'), ((7319, 7338), 'numpy.zeros', 'np.zeros', (['rho.shape'], {}), '(rho.shape)\n', (7327, 7338), True, 'import numpy as np\n'), ((7457, 7486), 'numpy.rollaxis', 'np.rollaxis', (['drho', 'depth_axis'], {}), '(drho, depth_axis)\n', (7468, 7486), True, 'import numpy as np\n'), ((7499, 7527), 'numpy.rollaxis', 'np.rollaxis', (['rho', 'depth_axis'], {}), '(rho, depth_axis)\n', (7510, 7527), True, 'import numpy as np\n'), ((7541, 7581), 'numpy.arange', 'np.arange', (['(1)', '(drho.shape[depth_axis] - 1)'], {}), '(1, drho.shape[depth_axis] - 1)\n', (7550, 7581), True, 'import numpy as np\n'), ((7682, 7720), 'numpy.rollaxis', 'np.rollaxis', (['drho_r', '(0)', '(depth_axis + 1)'], {}), '(drho_r, 0, depth_axis + 1)\n', (7693, 7720), True, 'import numpy as np\n'), ((7729, 7766), 'numpy.rollaxis', 'np.rollaxis', (['rho_r', '(0)', '(depth_axis + 1)'], {}), '(rho_r, 0, depth_axis + 1)\n', (7740, 7766), True, 'import numpy as np\n'), ((758, 779), 'numpy.argmax', 'np.argmax', (['n2'], {'axis': '(0)'}), '(n2, axis=0)\n', (767, 779), True, 'import numpy as np\n'), ((827, 850), 'numpy.arange', 'np.arange', (['n2.shape[-1]'], {}), '(n2.shape[-1])\n', (836, 850), True, 'import numpy as np\n'), ((3884, 3953), 'salishsea_tools.nowcast.analyze.depth_average', 'analyze.depth_average', (['rho[0:d + 1, ind]', 'deps[0:d + 1]'], {'depth_axis': '(0)'}), '(rho[0:d + 1, ind], deps[0:d + 1], depth_axis=0)\n', (3905, 3953), False, 'from salishsea_tools.nowcast import analyze\n'), ((4014, 4081), 'salishsea_tools.nowcast.analyze.depth_average', 'analyze.depth_average', (['rho[d + 1:, ind]', 'deps[d + 1:]'], {'depth_axis': '(0)'}), '(rho[d + 1:, ind], deps[d + 1:], depth_axis=0)\n', (4035, 4081), False, 'from salishsea_tools.nowcast import analyze\n'), ((5285, 5298), 'numpy.abs', 'np.abs', (['u_avg'], {}), '(u_avg)\n', (5291, 5298), True, 'import numpy as np\n'), ((871, 902), 'numpy.where', 'np.where', (['(n2[:, ii] >= n2_thres)'], {}), '(n2[:, ii] >= n2_thres)\n', (879, 902), True, 'import numpy as np\n'), ((6295, 6321), 'numpy.mean', 'np.mean', (['Fr[xmin:xmax + 1]'], {}), '(Fr[xmin:xmax + 1])\n', (6302, 6321), True, 'import numpy as np\n'), ((6339, 6364), 'numpy.mean', 'np.mean', (['c[xmin:xmax + 1]'], {}), '(c[xmin:xmax + 1])\n', (6346, 6364), True, 'import numpy as np\n'), ((6386, 6415), 'numpy.mean', 'np.mean', (['u_avg[xmin:xmax + 1]'], {}), '(u_avg[xmin:xmax + 1])\n', (6393, 6415), True, 'import numpy as np\n'), ((2450, 2486), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'times[t]'}), '(seconds=times[t])\n', (2468, 2486), False, 'import datetime\n'), ((6450, 6486), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'times[t]'}), '(seconds=times[t])\n', (6468, 6486), False, 'import datetime\n')] |
__copyright__ = "Copyright (C) 2018 CVision AI."
__license__ = "GPLv3"
# This file is part of OpenEM, released under GPLv3.
# OpenEM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenEM. If not, see <http://www.gnu.org/licenses/>.
"""SSD training utils.
"""
import tensorflow as tf
def _l1_smooth_loss(y_true, y_pred):
"""Compute L1-smooth loss.
# Arguments
y_true: Ground truth bounding boxes,
tensor of shape (?, num_boxes, 4).
y_pred: Predicted bounding boxes,
tensor of shape (?, num_boxes, 4).
# Returns
l1_loss: L1-smooth loss, tensor of shape (?, num_boxes).
# References
https://arxiv.org/abs/1504.08083
"""
abs_loss = tf.abs(y_true - y_pred)
sq_loss = 0.5 * (y_true - y_pred)**2
l1_loss = tf.where(tf.less(abs_loss, 1.0), sq_loss, abs_loss - 0.5)
return tf.reduce_sum(l1_loss, -1)
def _softmax_loss(y_true, y_pred):
"""Compute softmax loss.
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, num_classes).
y_pred: Predicted logits,
tensor of shape (?, num_boxes, num_classes).
# Returns
softmax_loss: Softmax loss, tensor of shape (?, num_boxes).
"""
y_pred = tf.maximum(tf.minimum(y_pred, 1 - 1e-15), 1e-15)
softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred),
axis=-1)
return softmax_loss
class MultiboxLoss:
"""Multibox loss with some helper functions.
# Arguments
num_classes: Number of classes including background.
alpha: Weight of L1-smooth loss.
neg_pos_ratio: Max ratio of negative to positive boxes in loss.
background_label_id: Id of background label.
negatives_for_hard: Number of negative boxes to consider
it there is no positive boxes in batch.
# References
https://arxiv.org/abs/1512.02325
"""
def __init__(self, num_classes, alpha=1.0, neg_pos_ratio=3.0,
background_label_id=0, negatives_for_hard=100.0,
pos_cost_multiplier=1.0):
self.pos_cost_multiplier = pos_cost_multiplier
self.num_classes = num_classes
self.alpha = alpha
self.neg_pos_ratio = neg_pos_ratio
if background_label_id != 0:
raise Exception('Only 0 as background label id is supported')
self.background_label_id = background_label_id
self.negatives_for_hard = negatives_for_hard
def compute_loss(self, y_true, y_pred):
"""Compute mutlibox loss.
# Arguments
y_true: Ground truth targets,
tensor of shape (?, num_boxes, 4 + num_classes + 8),
priors in ground truth are fictitious,
y_true[:, :, -8] has 1 if prior should be penalized
or in other words is assigned to some ground truth box,
y_true[:, :, -7:] are all 0.
y_pred: Predicted logits,
tensor of shape (?, num_boxes, 4 + num_classes + 8).
# Returns
loss: Loss for prediction, tensor of shape (?,).
"""
batch_size = tf.shape(y_true)[0]
num_boxes = tf.to_float(tf.shape(y_true)[1])
# loss for all priors
conf_loss = _softmax_loss(y_true[:, :, 4:-8],
y_pred[:, :, 4:-8])
loc_loss = _l1_smooth_loss(y_true[:, :, :4],
y_pred[:, :, :4])
# get positives loss
num_pos = tf.reduce_sum(y_true[:, :, -8], axis=-1)
pos_loc_loss = tf.reduce_sum(loc_loss * y_true[:, :, -8],
axis=1)
pos_conf_loss = tf.reduce_sum(conf_loss * y_true[:, :, -8],
axis=1)
# get negatives loss, we penalize only confidence here
num_neg = tf.minimum(self.neg_pos_ratio * num_pos,
num_boxes - num_pos)
pos_num_neg_mask = tf.greater(num_neg, 0)
has_min = tf.to_float(tf.reduce_any(pos_num_neg_mask))
num_neg = tf.concat(
axis=0,
values=[num_neg, [(1 - has_min) * self.negatives_for_hard]])
num_neg_batch = tf.reduce_min(tf.boolean_mask(num_neg,
tf.greater(num_neg, 0)))
num_neg_batch = tf.to_int32(num_neg_batch)
confs_start = 4 + self.background_label_id + 1
confs_end = confs_start + self.num_classes - 1
max_confs = tf.reduce_max(y_pred[:, :, confs_start:confs_end],
axis=2)
_, indices = tf.nn.top_k(max_confs * (1 - y_true[:, :, -8]),
k=num_neg_batch)
batch_idx = tf.expand_dims(tf.range(0, batch_size), 1)
batch_idx = tf.tile(batch_idx, (1, num_neg_batch))
full_indices = (tf.reshape(batch_idx, [-1]) * tf.to_int32(num_boxes) +
tf.reshape(indices, [-1]))
# full_indices = tf.concat(2, [tf.expand_dims(batch_idx, 2),
# tf.expand_dims(indices, 2)])
# neg_conf_loss = tf.gather_nd(conf_loss, full_indices)
neg_conf_loss = tf.gather(tf.reshape(conf_loss, [-1]),
full_indices)
neg_conf_loss = tf.reshape(neg_conf_loss,
[batch_size, num_neg_batch])
neg_conf_loss = tf.reduce_sum(neg_conf_loss, axis=1)
# loss is sum of positives and negatives
total_loss = pos_conf_loss * self.pos_cost_multiplier + neg_conf_loss
total_loss /= (num_pos + tf.to_float(num_neg_batch))
num_pos = tf.where(tf.not_equal(num_pos, 0), num_pos,
tf.ones_like(num_pos))
total_loss += (self.alpha * pos_loc_loss) / num_pos
return total_loss
| [
"tensorflow.range",
"tensorflow.abs",
"tensorflow.reduce_sum",
"tensorflow.not_equal",
"tensorflow.less",
"tensorflow.nn.top_k",
"tensorflow.reduce_max",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.minimum",
"tensorflow.ones_like",
"tensorflow.to_int32",
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.to_float",
"tensorflow.log",
"tensorflow.greater",
"tensorflow.reduce_any"
] | [((1212, 1235), 'tensorflow.abs', 'tf.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1218, 1235), True, 'import tensorflow as tf\n'), ((1360, 1386), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['l1_loss', '(-1)'], {}), '(l1_loss, -1)\n', (1373, 1386), True, 'import tensorflow as tf\n'), ((1300, 1322), 'tensorflow.less', 'tf.less', (['abs_loss', '(1.0)'], {}), '(abs_loss, 1.0)\n', (1307, 1322), True, 'import tensorflow as tf\n'), ((1770, 1799), 'tensorflow.minimum', 'tf.minimum', (['y_pred', '(1 - 1e-15)'], {}), '(y_pred, 1 - 1e-15)\n', (1780, 1799), True, 'import tensorflow as tf\n'), ((4031, 4071), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_true[:, :, -8]'], {'axis': '(-1)'}), '(y_true[:, :, -8], axis=-1)\n', (4044, 4071), True, 'import tensorflow as tf\n'), ((4095, 4145), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(loc_loss * y_true[:, :, -8])'], {'axis': '(1)'}), '(loc_loss * y_true[:, :, -8], axis=1)\n', (4108, 4145), True, 'import tensorflow as tf\n'), ((4207, 4258), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(conf_loss * y_true[:, :, -8])'], {'axis': '(1)'}), '(conf_loss * y_true[:, :, -8], axis=1)\n', (4220, 4258), True, 'import tensorflow as tf\n'), ((4379, 4440), 'tensorflow.minimum', 'tf.minimum', (['(self.neg_pos_ratio * num_pos)', '(num_boxes - num_pos)'], {}), '(self.neg_pos_ratio * num_pos, num_boxes - num_pos)\n', (4389, 4440), True, 'import tensorflow as tf\n'), ((4497, 4519), 'tensorflow.greater', 'tf.greater', (['num_neg', '(0)'], {}), '(num_neg, 0)\n', (4507, 4519), True, 'import tensorflow as tf\n'), ((4601, 4679), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': '[num_neg, [(1 - has_min) * self.negatives_for_hard]]'}), '(axis=0, values=[num_neg, [(1 - has_min) * self.negatives_for_hard]])\n', (4610, 4679), True, 'import tensorflow as tf\n'), ((4871, 4897), 'tensorflow.to_int32', 'tf.to_int32', (['num_neg_batch'], {}), '(num_neg_batch)\n', (4882, 4897), True, 'import tensorflow as tf\n'), ((5028, 5086), 'tensorflow.reduce_max', 'tf.reduce_max', (['y_pred[:, :, confs_start:confs_end]'], {'axis': '(2)'}), '(y_pred[:, :, confs_start:confs_end], axis=2)\n', (5041, 5086), True, 'import tensorflow as tf\n'), ((5142, 5206), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(max_confs * (1 - y_true[:, :, -8]))'], {'k': 'num_neg_batch'}), '(max_confs * (1 - y_true[:, :, -8]), k=num_neg_batch)\n', (5153, 5206), True, 'import tensorflow as tf\n'), ((5323, 5361), 'tensorflow.tile', 'tf.tile', (['batch_idx', '(1, num_neg_batch)'], {}), '(batch_idx, (1, num_neg_batch))\n', (5330, 5361), True, 'import tensorflow as tf\n'), ((5828, 5882), 'tensorflow.reshape', 'tf.reshape', (['neg_conf_loss', '[batch_size, num_neg_batch]'], {}), '(neg_conf_loss, [batch_size, num_neg_batch])\n', (5838, 5882), True, 'import tensorflow as tf\n'), ((5942, 5978), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['neg_conf_loss'], {'axis': '(1)'}), '(neg_conf_loss, axis=1)\n', (5955, 5978), True, 'import tensorflow as tf\n'), ((3665, 3681), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (3673, 3681), True, 'import tensorflow as tf\n'), ((4550, 4581), 'tensorflow.reduce_any', 'tf.reduce_any', (['pos_num_neg_mask'], {}), '(pos_num_neg_mask)\n', (4563, 4581), True, 'import tensorflow as tf\n'), ((5275, 5298), 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (5283, 5298), True, 'import tensorflow as tf\n'), ((5465, 5490), 'tensorflow.reshape', 'tf.reshape', (['indices', '[-1]'], {}), '(indices, [-1])\n', (5475, 5490), True, 'import tensorflow as tf\n'), ((5727, 5754), 'tensorflow.reshape', 'tf.reshape', (['conf_loss', '[-1]'], {}), '(conf_loss, [-1])\n', (5737, 5754), True, 'import tensorflow as tf\n'), ((6140, 6166), 'tensorflow.to_float', 'tf.to_float', (['num_neg_batch'], {}), '(num_neg_batch)\n', (6151, 6166), True, 'import tensorflow as tf\n'), ((6195, 6219), 'tensorflow.not_equal', 'tf.not_equal', (['num_pos', '(0)'], {}), '(num_pos, 0)\n', (6207, 6219), True, 'import tensorflow as tf\n'), ((6257, 6278), 'tensorflow.ones_like', 'tf.ones_like', (['num_pos'], {}), '(num_pos)\n', (6269, 6278), True, 'import tensorflow as tf\n'), ((1851, 1865), 'tensorflow.log', 'tf.log', (['y_pred'], {}), '(y_pred)\n', (1857, 1865), True, 'import tensorflow as tf\n'), ((3717, 3733), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (3725, 3733), True, 'import tensorflow as tf\n'), ((4822, 4844), 'tensorflow.greater', 'tf.greater', (['num_neg', '(0)'], {}), '(num_neg, 0)\n', (4832, 4844), True, 'import tensorflow as tf\n'), ((5386, 5413), 'tensorflow.reshape', 'tf.reshape', (['batch_idx', '[-1]'], {}), '(batch_idx, [-1])\n', (5396, 5413), True, 'import tensorflow as tf\n'), ((5416, 5438), 'tensorflow.to_int32', 'tf.to_int32', (['num_boxes'], {}), '(num_boxes)\n', (5427, 5438), True, 'import tensorflow as tf\n')] |
import os
from setuptools import find_packages
if __name__ == '__main__':
for package in find_packages():
if '.test' in package:
continue
os.system(f'cmd /c "python -m pytest -s {package}/test"')
| [
"os.system",
"setuptools.find_packages"
] | [((95, 110), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (108, 110), False, 'from setuptools import find_packages\n'), ((172, 229), 'os.system', 'os.system', (['f"""cmd /c "python -m pytest -s {package}/test\\""""'], {}), '(f\'cmd /c "python -m pytest -s {package}/test"\')\n', (181, 229), False, 'import os\n')] |
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""Observation Log plugin.
**Plugin Type: Global**
``ObsLog`` is a global plugin. Only one instance can be opened.
**Usage**
***Saving the log to a file***
Put in values for the Observation Log folder and filename. The format
of the file saved will depend on the file extension of the filename;
use the type selector combobox to pick the right extension:
* csv:
* xlsx: MS Excel file format
The file is rewritten out every time a new entry is added to the log
***Adding a memo to one or more log entries***
Write a memo in the memo box. Select one or more frames to add the memo
to and press the "Add Memo" button. Multiple selection follows the usual
rules about holding down CTRL and/or SHIFT keys.
***Displaying an image***
Double-click on a log entry.
"""
import os
from collections import OrderedDict
from ginga import GingaPlugin, AstroImage
from ginga.gw import Widgets
__all__ = ['ObsLog']
class ObsLog(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
super(ObsLog, self).__init__(fv)
self.chname = None
self.file_prefixes = []
# columns to be shown in the table
columns = [("Obs Mod", 'OBS-MOD'),
("Datatype", 'DATA-TYP'),
("FrameID", 'FRAMEID'),
("Object", 'OBJECT'),
("UT", 'UT'),
("PropId", 'PROP-ID'),
("Exp Time", 'EXPTIME'),
("Air Mass", 'AIRMASS'),
#("Pos Ang", 'INST-PA'),
#("Ins Rot", 'INSROT'),
#("Foc Val", 'FOC-VAL'),
#("Filter01", 'FILTER01'),
#("Filter02", 'FILTER02'),
#("Filter03", 'FILTER03'),
("RA", 'RA'),
("DEC", 'DEC'),
("EQUINOX", 'EQUINOX'),
("Memo", 'G_MEMO'),
]
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_ObsLog')
self.settings.add_defaults(sortable=True,
color_alternate_rows=True,
#max_rows_for_col_resize=5000,
report_columns=columns,
cache_normalized_images=True)
self.settings.load(onError='silent')
self.rpt_dict = OrderedDict({})
self.rpt_columns = []
self.fv.add_callback('add-image', self.incoming_data_cb)
self.gui_up = False
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(1)
vbox.set_spacing(1)
tv = Widgets.TreeView(sortable=self.settings.get('sortable'),
use_alt_row_color=self.settings.get('color_alternate_rows'),
selection='multiple')
self.w.rpt_tbl = tv
vbox.add_widget(tv, stretch=1)
tv.add_callback('activated', self.dblclick_cb)
tv.add_callback('selected', self.select_cb)
self.rpt_columns = self.settings.get('report_columns')
tv.setup_table(self.rpt_columns, 1, 'FRAMEID')
captions = (("Memo:", 'label', "memo", 'entry', 'Add Memo', 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
vbox.add_widget(w, stretch=0)
b.memo.set_tooltip('Set memo for selected frames')
b.add_memo.add_callback('activated', self.add_memo_cb)
b.add_memo.set_enabled(False)
captions = (("Folder:", 'label', "obslog_dir", 'entry',
"Name:", 'label', "obslog_name", 'entryset',
"Type", 'combobox', "Load", 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
vbox.add_widget(w, stretch=0)
obs_log = self.settings.get('obslog_name', None)
if obs_log is None:
obs_log = ''
b.obslog_name.set_text(obs_log)
b.obslog_name.set_tooltip('File name for observation log')
b.obslog_name.add_callback('activated', self.write_obslog_cb)
b.obslog_dir.set_text("/tmp")
b.obslog_dir.set_tooltip('Folder path for observation log')
b.obslog_dir.add_callback('activated', self.write_obslog_cb)
b.type.insert_alpha("csv")
b.type.insert_alpha("xlsx")
b.type.set_tooltip("Format for saving/loading ObsLog")
b.type.add_callback('activated', self.set_obslog_format_cb)
b.load.set_tooltip("Load a saved ObsLog")
b.load.add_callback('activated', self.load_obslog_cb)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btn.set_enabled(False)
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns, stretch=0)
container.add_widget(vbox, stretch=1)
self.gui_up = True
def replace_kwds(self, header):
"""Subclass this method to do munge the data for special reports."""
d = dict()
d.update(header)
return d
def add_to_obslog(self, header, image):
frameid = header['FRAMEID']
# replace some kwds as needed in the table
d = self.replace_kwds(header.asdict())
# Hack to insure that we get the columns in the desired order
d = OrderedDict([(kwd, d.get(kwd, ''))
for col, kwd in self.rpt_columns])
self.rpt_dict[frameid] = d
self.update_obslog()
def stop(self):
self.gui_up = False
def process_image(self, chname, header, image):
"""Override this method to do something special with the data."""
pass
def incoming_data_cb(self, fv, chname, image, info):
if chname != self.chname:
return
imname = image.get('name', None)
if imname is None:
return
# only accepted list of frames
accepted = False
for prefix in self.file_prefixes:
if imname.startswith(prefix):
accepted = True
break
if not accepted:
return
header = image.get_header()
# add image to obslog
self.fv.gui_do(self.add_to_obslog, header, image)
try:
self.process_image(chname, header, image)
except Exception as e:
self.logger.error("Failed to process image: {}".format(e),
exc_info=True)
def update_obslog(self):
if not self.gui_up:
return
self.w.rpt_tbl.set_tree(self.rpt_dict)
obslog_name = self.w.obslog_name.get_text().strip()
if len(obslog_name) > 0:
obslog_path = os.path.join(self.w.obslog_dir.get_text().strip(),
obslog_name)
self.write_obslog(obslog_path)
def write_obslog(self, filepath):
if len(self.rpt_dict) == 0:
return
try:
import pandas as pd
except ImportError:
self.fv.show_error("Please install 'pandas' and "
"'openpyxl' to use this feature")
return
try:
self.logger.info("writing obslog: {}".format(filepath))
col_hdr = [colname for colname, key in self.rpt_columns]
rows = [list(d.values()) for d in self.rpt_dict.values()]
df = pd.DataFrame(rows, columns=col_hdr)
if filepath.endswith('.csv'):
df.to_csv(filepath, index=False, header=True)
else:
df.to_excel(filepath, index=False, header=True)
except Exception as e:
self.logger.error("Error writing obslog: {}".format(e),
exc_info=True)
def load_obslog(self, filepath):
try:
import pandas as pd
except ImportError:
self.fv.show_error("Please install 'pandas' and "
"'openpyxl' to use this feature")
return
try:
self.logger.info("loading obslog: {}".format(filepath))
col_hdr = [colname for colname, key in self.rpt_columns]
if filepath.endswith('.csv'):
df = pd.read_csv(filepath, header=0, #names=col_hdr,
index_col=None)
else:
df = pd.read_excel(filepath, header=0, #names=col_hdr,
index_col=None)
self.rpt_dict = OrderedDict({})
res = df.to_dict('index')
for row in res.values():
frameid = row['FrameID']
d = OrderedDict([(kwd, row.get(col, ''))
for col, kwd in self.rpt_columns])
self.rpt_dict[frameid] = d
self.w.rpt_tbl.set_tree(self.rpt_dict)
except Exception as e:
self.logger.error("Error loading obslog: {}".format(e),
exc_info=True)
def write_obslog_cb(self, w):
obslog_path = os.path.join(self.w.obslog_dir.get_text().strip(),
self.w.obslog_name.get_text().strip())
self.write_obslog(obslog_path)
def load_obslog_cb(self, w):
obslog_path = os.path.join(self.w.obslog_dir.get_text().strip(),
self.w.obslog_name.get_text().strip())
self.load_obslog(obslog_path)
def get_selected(self):
res_dict = self.w.rpt_tbl.get_selected()
return res_dict
def dblclick_cb(self, widget, d):
"""Switch to the image that was double-clicked in the obslog"""
frameid = list(d.keys())[0]
info = d[frameid]
self.view_image(frameid, info)
def view_image(self, imname, info):
chname = self.chname
channel = self.fv.get_current_channel()
if channel.name != chname:
channel = self.fv.get_channel(chname)
self.fv.change_channel(chname)
channel.switch_name(imname)
def select_cb(self, widget, d):
res = self.get_selected()
if len(res) == 0:
self.w.add_memo.set_enabled(False)
else:
self.w.add_memo.set_enabled(True)
def add_memo_cb(self, widget):
memo_txt = self.w.memo.get_text().strip()
res = self.get_selected()
if len(res) == 0:
self.fv.show_error("No frames selected for memo!")
return
for key in res.keys():
self.rpt_dict[key]['G_MEMO'] = memo_txt
self.w.rpt_tbl.set_tree(self.rpt_dict)
def set_obslog_format_cb(self, w, idx):
ext = w.get_text()
obslog_name = self.w.obslog_name.get_text().strip()
name, old_ext = os.path.splitext(obslog_name)
self.w.obslog_name.set_text(name + '.' + ext)
self.write_obslog_cb(None)
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'obslog'
| [
"pandas.DataFrame",
"pandas.read_csv",
"ginga.gw.Widgets.HBox",
"ginga.gw.Widgets.build_info",
"ginga.gw.Widgets.Button",
"ginga.gw.Widgets.Label",
"ginga.gw.Widgets.VBox",
"pandas.read_excel",
"os.path.splitext",
"collections.OrderedDict"
] | [((2491, 2506), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (2502, 2506), False, 'from collections import OrderedDict\n'), ((2683, 2697), 'ginga.gw.Widgets.VBox', 'Widgets.VBox', ([], {}), '()\n', (2695, 2697), False, 'from ginga.gw import Widgets\n'), ((3384, 3436), 'ginga.gw.Widgets.build_info', 'Widgets.build_info', (['captions'], {'orientation': '"""vertical"""'}), "(captions, orientation='vertical')\n", (3402, 3436), False, 'from ginga.gw import Widgets\n'), ((3889, 3941), 'ginga.gw.Widgets.build_info', 'Widgets.build_info', (['captions'], {'orientation': '"""vertical"""'}), "(captions, orientation='vertical')\n", (3907, 3941), False, 'from ginga.gw import Widgets\n'), ((4801, 4815), 'ginga.gw.Widgets.HBox', 'Widgets.HBox', ([], {}), '()\n', (4813, 4815), False, 'from ginga.gw import Widgets\n'), ((4892, 4915), 'ginga.gw.Widgets.Button', 'Widgets.Button', (['"""Close"""'], {}), "('Close')\n", (4906, 4915), False, 'from ginga.gw import Widgets\n'), ((5052, 5074), 'ginga.gw.Widgets.Button', 'Widgets.Button', (['"""Help"""'], {}), "('Help')\n", (5066, 5074), False, 'from ginga.gw import Widgets\n'), ((11231, 11260), 'os.path.splitext', 'os.path.splitext', (['obslog_name'], {}), '(obslog_name)\n', (11247, 11260), False, 'import os\n'), ((5200, 5217), 'ginga.gw.Widgets.Label', 'Widgets.Label', (['""""""'], {}), "('')\n", (5213, 5217), False, 'from ginga.gw import Widgets\n'), ((7864, 7899), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': 'col_hdr'}), '(rows, columns=col_hdr)\n', (7876, 7899), True, 'import pandas as pd\n'), ((8974, 8989), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (8985, 8989), False, 'from collections import OrderedDict\n'), ((8705, 8752), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'header': '(0)', 'index_col': 'None'}), '(filepath, header=0, index_col=None)\n', (8716, 8752), True, 'import pandas as pd\n'), ((8844, 8893), 'pandas.read_excel', 'pd.read_excel', (['filepath'], {'header': '(0)', 'index_col': 'None'}), '(filepath, header=0, index_col=None)\n', (8857, 8893), True, 'import pandas as pd\n')] |
import argparse
import torch
import sys
import os
import json
from collections import defaultdict
import h5py
from sentence_transformers import SentenceTransformer, util
import numpy
import tqdm
from itertools import zip_longest
from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids
def retrieve_nodes_given_sentences(out_fname, batch_size, all_input_sentences, glosses_bnids, glosses_feats, topk):
"""
out_fname(str): Output file to write retrieved node ids to.
batch_size(int): Batch size for Sentence BERT.
all_input_sentences(list[str]): All input sentences loaded from `input_file`.
glosses_bnids(list[str]): All gloss BNids loaded from `args.glosses_bnids`. Aligned with `glosses_feats`.
glosses_feats(numpy.array): Numpy array with VisualSem gloss features computed with Sentence BERT.
topk(int): Number of nodes to retrieve for each input sentence.
"""
if os.path.isfile(out_fname):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%out_fname)
n_examples = len(all_input_sentences)
print("Number of input examples to extract BNIDs for: ", n_examples)
model_name = "paraphrase-multilingual-mpnet-base-v2"
model = SentenceTransformer(model_name)
with open(out_fname, 'w', encoding='utf8') as fh_out:
ranks_predicted = []
for idxs_ in grouper(batch_size, range(n_examples)):
idxs = []
queries = []
for i in idxs_:
if not i is None:
idxs.append(i)
queries.append( all_input_sentences[i] )
queries_embs = model.encode(queries, convert_to_tensor=True)
if torch.cuda.is_available():
queries_embs = queries_embs.cuda()
scores = util.pytorch_cos_sim(queries_embs, glosses_feats)
scores = scores.cpu().numpy()
ranks = numpy.argsort(scores) # sort scores by cosine similarity (low to high)
ranks = ranks[:,::-1] # sort by cosine similarity (high to low)
for rank_idx in range(len(idxs[:ranks.shape[0]])):
bnids_predicted = []
for rank_predicted in range(topk*10):
bnid_pred = glosses_bnids[ ranks[rank_idx,rank_predicted] ]
bnid_pred_score = scores[rank_idx, ranks[rank_idx, rank_predicted]]
if not bnid_pred in bnids_predicted:
bnids_predicted.append((bnid_pred,bnid_pred_score))
if len(bnids_predicted)>=topk:
break
# write top-k predicted BNids
for iii, (bnid, score) in enumerate(bnids_predicted[:topk]):
fh_out.write(bnid+"\t"+"%.4f"%score)
if iii < topk-1:
fh_out.write("\t")
else: # iii == topk-1
fh_out.write("\n")
def encode_query(out_fname, batch_size, all_sentences):
"""
out_fname(str): Output file to write SBERT features for query.
batch_size(int): Batch size for Sentence BERT.
all_sentences(list[str]): Sentences to be used for retrieval.
"""
n_lines = len(all_sentences)
model_name = "paraphrase-multilingual-mpnet-base-v2"
model = SentenceTransformer(model_name)
shape_features = (n_lines, 768)
with h5py.File(out_fname, 'w') as fh_out:
fh_out.create_dataset("features", shape_features, dtype='float32', chunks=(1,768), maxshape=(None, 768), compression="gzip")
for from_idx in tqdm.trange(0,n_lines,batch_size):
to_idx = from_idx+batch_size if from_idx+batch_size <= n_lines else n_lines
batch_sentences = all_sentences[ from_idx: to_idx ]
emb_sentences = model.encode(batch_sentences, convert_to_tensor=True)
#test_queries(emb_sentences, all_sentences, model)
fh_out["features"][from_idx:to_idx] = emb_sentences.cpu().numpy()
if __name__=="__main__":
visualsem_path = os.path.dirname(os.path.realpath(__file__))
visualsem_nodes_path = "%s/dataset/nodes.v2.json"%visualsem_path
visualsem_images_path = "%s/dataset/images/"%visualsem_path
glosses_sentence_bert_path = "%s/dataset/gloss_files/glosses.en.txt.sentencebert.h5"%visualsem_path
glosses_bnids_path = "%s/dataset/gloss_files/glosses.en.txt.bnids"%visualsem_path
os.makedirs("%s/dataset/gloss_files/"%visualsem_path, exist_ok=True)
p = argparse.ArgumentParser()
p.add_argument('--input_files', type=str, nargs="+", default=["example_data/queries.txt"],
help="""Input file(s) to use for retrieval. Each line in each file should contain a detokenized sentence.""")
p.add_argument('--topk', type=int, default=1, help="Retrieve topk nodes for each input sentence.")
p.add_argument('--batch_size', type=int, default=1000)
p.add_argument('--visualsem_path', type=str, default=visualsem_path,
help="Path to directory containing VisualSem knowledge graph.")
p.add_argument('--visualsem_nodes_path', type=str, default=visualsem_nodes_path,
help="Path to file containing VisualSem nodes.")
p.add_argument('--visualsem_images_path', type=str, default=visualsem_images_path,
help="Path to directory containing VisualSem images.")
p.add_argument('--glosses_sentence_bert_path', type=str, default=glosses_sentence_bert_path,
help="""HDF5 file containing glosses index computed with Sentence BERT (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--glosses_bnids_path', type=str, default=glosses_bnids_path,
help="""Text file containing glosses BabelNet ids, one per line (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--input_valid', action='store_true',
help="""Perform retrieval for the glosses in the validation set. (See paper for reference)""")
p.add_argument('--input_test', action='store_true',
help="""Perform retrieval for the glosses in the test set. (See paper for reference)""")
args = p.parse_args()
# load all nodes in VisualSem
all_bnids = load_visualsem_bnids(args.visualsem_nodes_path, args.visualsem_images_path)
gloss_bnids = load_bnids( args.glosses_bnids_path )
gloss_bnids = numpy.array(gloss_bnids, dtype='object')
with h5py.File(args.glosses_sentence_bert_path, 'r') as fh_glosses:
glosses_feats = fh_glosses["features"][:]
glosses_feats = torch.tensor(glosses_feats)
if torch.cuda.is_available():
glosses_feats = glosses_feats.cuda()
# load train/valid/test gloss splits
glosses_splits = fh_glosses["split_idxs"][:]
train_idxs = (glosses_splits==0).nonzero()[0]
valid_idxs = (glosses_splits==1).nonzero()[0]
test_idxs = (glosses_splits==2).nonzero()[0]
# load gloss language splits
language_splits = fh_glosses["language_idxs"][:]
for input_file in args.input_files:
print("Processing input file: %s ..."%input_file)
sbert_out_fname = input_file+".sentencebert.h5"
if os.path.isfile( sbert_out_fname ):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%sbert_out_fname)
input_sentences = load_sentences( input_file )
encode_query(sbert_out_fname, args.batch_size, input_sentences)
out_fname = input_file+".bnids"
retrieve_nodes_given_sentences(out_fname, args.batch_size, input_sentences, gloss_bnids, glosses_feats, args.topk)
# remove temporary SBERT index created for input file(s)
os.remove( sbert_out_fname )
print("Retrieved glosses: %s"%out_fname)
| [
"utils.load_sentences",
"h5py.File",
"os.remove",
"os.makedirs",
"argparse.ArgumentParser",
"tqdm.trange",
"os.path.realpath",
"numpy.argsort",
"sentence_transformers.util.pytorch_cos_sim",
"os.path.isfile",
"utils.load_bnids",
"numpy.array",
"torch.cuda.is_available",
"utils.load_visualsem_bnids",
"sentence_transformers.SentenceTransformer",
"torch.tensor"
] | [((1034, 1059), 'os.path.isfile', 'os.path.isfile', (['out_fname'], {}), '(out_fname)\n', (1048, 1059), False, 'import os\n'), ((1356, 1387), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name'], {}), '(model_name)\n', (1375, 1387), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((3493, 3524), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name'], {}), '(model_name)\n', (3512, 3524), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((4625, 4695), 'os.makedirs', 'os.makedirs', (["('%s/dataset/gloss_files/' % visualsem_path)"], {'exist_ok': '(True)'}), "('%s/dataset/gloss_files/' % visualsem_path, exist_ok=True)\n", (4636, 4695), False, 'import os\n'), ((4703, 4728), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4726, 4728), False, 'import argparse\n'), ((6396, 6471), 'utils.load_visualsem_bnids', 'load_visualsem_bnids', (['args.visualsem_nodes_path', 'args.visualsem_images_path'], {}), '(args.visualsem_nodes_path, args.visualsem_images_path)\n', (6416, 6471), False, 'from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids\n'), ((6490, 6525), 'utils.load_bnids', 'load_bnids', (['args.glosses_bnids_path'], {}), '(args.glosses_bnids_path)\n', (6500, 6525), False, 'from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids\n'), ((6546, 6586), 'numpy.array', 'numpy.array', (['gloss_bnids'], {'dtype': '"""object"""'}), "(gloss_bnids, dtype='object')\n", (6557, 6586), False, 'import numpy\n'), ((3570, 3595), 'h5py.File', 'h5py.File', (['out_fname', '"""w"""'], {}), "(out_fname, 'w')\n", (3579, 3595), False, 'import h5py\n'), ((3765, 3800), 'tqdm.trange', 'tqdm.trange', (['(0)', 'n_lines', 'batch_size'], {}), '(0, n_lines, batch_size)\n', (3776, 3800), False, 'import tqdm\n'), ((4251, 4277), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4267, 4277), False, 'import os\n'), ((6597, 6644), 'h5py.File', 'h5py.File', (['args.glosses_sentence_bert_path', '"""r"""'], {}), "(args.glosses_sentence_bert_path, 'r')\n", (6606, 6644), False, 'import h5py\n'), ((6736, 6763), 'torch.tensor', 'torch.tensor', (['glosses_feats'], {}), '(glosses_feats)\n', (6748, 6763), False, 'import torch\n'), ((6775, 6800), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6798, 6800), False, 'import torch\n'), ((1831, 1856), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1854, 1856), False, 'import torch\n'), ((1930, 1979), 'sentence_transformers.util.pytorch_cos_sim', 'util.pytorch_cos_sim', (['queries_embs', 'glosses_feats'], {}), '(queries_embs, glosses_feats)\n', (1950, 1979), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((2043, 2064), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (2056, 2064), False, 'import numpy\n'), ((7389, 7420), 'os.path.isfile', 'os.path.isfile', (['sbert_out_fname'], {}), '(sbert_out_fname)\n', (7403, 7420), False, 'import os\n'), ((7579, 7605), 'utils.load_sentences', 'load_sentences', (['input_file'], {}), '(input_file)\n', (7593, 7605), False, 'from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids\n'), ((7936, 7962), 'os.remove', 'os.remove', (['sbert_out_fname'], {}), '(sbert_out_fname)\n', (7945, 7962), False, 'import os\n')] |
import hcat.lib.functional
import hcat.lib.functional as functional
from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter
from hcat.lib.cell import Cell
from hcat.lib.cochlea import Cochlea
from hcat.backends.detection import FasterRCNN_from_url
from hcat.backends.detection import HairCellFasterRCNN
from hcat.lib.utils import warn
import torch
from torch import Tensor
from tqdm import tqdm
from itertools import product
import numpy as np
from hcat.lib.explore_lif import get_xml
import torchvision.ops
import skimage.io as io
import os.path
from typing import Optional, List, Dict
# DOCUMENTED
def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.86, dtype=None,
nms_threshold: float = 0.2, save_xml=False, save_fig=False, pixel_size=None, cell_diameter=None):
"""
2D hair cell detection algorithm.
Loads arbitrarily large 2d image and performs iterative faster rcnn detection on the entire image.
:param *str* f: path to image by which to analyze
:param *float* cell_detection_threshold: cells below threshold are rejected
:param *float* nms_threshold: iou rejection threshold for nms.
:return: *Cochlea* object containing data of analysis.
"""
print('Initializing hair cell detection algorithm...')
if f is None:
warn('ERROR: No File to Analyze... \nAborting.', color='red')
return None
if not pixel_size:
warn('WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. '
'Consider suplying value for optimal performance.', color='yellow')
with torch.no_grad():
# Load and preprocess Image
image_base = load(f, 'TileScan 1 Merged', verbose=True) # from hcat.lib.utils
image_base = image_base[[2, 3],...].max(-1) if image_base.ndim == 4 else image_base
shape = list(image_base.shape)
shape[0] = 1
dtype = image_base.dtype if dtype is None else dtype
scale: int = hcat.lib.utils.get_dtype_offset(dtype)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
temp = np.zeros(shape)
temp = np.concatenate((temp, image_base)) / scale * 255
c, x, y = image_base.shape
print(
f'DONE: shape: {image_base.shape}, min: {image_base.min()}, max: {image_base.max()}, dtype: {image_base.dtype}')
if image_base.max() < scale * 0.33:
warn(f'WARNING: Image max value less than 1/3 the scale factor for bit depth. Image Max: {image_base.max()},'
f' Scale Factor: {scale}, dtype: {dtype}. Readjusting scale to 1.5 time Image max.', color='yellow')
scale = image_base.max() * 1.5
image_base = torch.from_numpy(image_base.astype(np.uint16) / scale).to(device)
if pixel_size is not None:
image_base: Tensor = correct_pixel_size(image_base, pixel_size) #model expects pixel size of 288.88
print(f'Rescaled Image to match pixel size of 288.88nm with a new shape of: {image_base.shape}')
elif cell_diameter is not None:
image_base: Tensor = scale_to_hair_cell_diameter(image_base, cell_diameter)
print(f'Rescaled Image to match pixel size of 288.88nm with a new shape of: {image_base.shape}')
# normalize around zero
image_base.sub_(0.5).div_(0.5)
if device == 'cuda':
warn('CUDA: GPU successfully initialized!', color='green')
else:
warn('WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. '
'Analysis may be slow.', color='yellow')
# Initalize the model...
model = FasterRCNN_from_url(url='https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true', device=device)
model.eval()
# Initalize curvature detection
predict_curvature = hcat.lib.functional.PredictCurvature(erode=3)
# Get the indicies for evaluating cropped regions
c, x, y = image_base.shape
image_base = torch.cat((torch.zeros((1, x, y), device=device), image_base), dim=0)
x_ind: List[List[int]] = calculate_indexes(10, 235, x, x) # [[0, 255], [30, 285], ...]
y_ind: List[List[int]] = calculate_indexes(10, 235, y, y) # [[0, 255], [30, 285], ...]
total: int = len(x_ind) * len(y_ind)
# Initalize other small things
cell_id = 1
cells = []
add_cell = cells.append # stupid but done for speed
for x, y in tqdm(product(x_ind, y_ind), total=total, desc='Detecting: '):
# Load and prepare image crop for ML model evaluation
image: Tensor = image_base[:, x[0]:x[1], y[0]:y[1]].unsqueeze(0)
# If the image has nothing in it we can skip for speed
if image.max() == -1:
continue
# Evaluate Deep Learning Model
out: Dict[str, Tensor] = model(image.float())[0]
scores: Tensor = out['scores'].cpu()
boxes: Tensor = out['boxes'].cpu()
labels: Tensor = out['labels'].cpu()
# The model output coords with respect to the crop of image_base. We have to adjust
# idk why the y and x are flipped. Breaks otherwise.
boxes[:, [0, 2]] += y[0]
boxes[:, [1, 3]] += x[0]
# center x, center y, width, height
centers: Tensor = torchvision.ops.box_convert(boxes, 'xyxy', 'cxcywh').cpu()
cx = centers[:, 0]
cy = centers[:, 1]
for i, score in enumerate(scores):
if score > cell_detection_threshold:
add_cell(Cell(id=cell_id,
loc=torch.tensor([0, cx[i], cy[i], 0]),
image=None,
mask=None,
cell_type='OHC' if labels[i] == 1 else 'IHC',
boxes=boxes[i, :],
scores=scores[i]))
cell_id += 1
# some cells may overlap. We remove cells after analysis is complete.
cells: List[Cell] = _cell_nms(cells, nms_threshold)
ohc = sum([int(c.type == 'OHC') for c in cells]) # number of ohc
ihc = sum([int(c.type == 'IHC') for c in cells]) # number of ihc
print(f'Total Cells: {len(cells)}\n OHC: {ohc}\n IHC: {ihc}' )
max_projection: Tensor = image_base[[1], ...].mul(0.5).add(0.5).unsqueeze(-1).cpu()
curvature, distance, apex = predict_curvature(max_projection, cells, curve_path)
if curvature is None:
warn('WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is '
'limited. Consider Manual Calculation.', color='yellow')
# curvature estimation really only works if there is a lot of tissue...
if distance is not None and distance.max() > 4000:
for c in cells: c.calculate_frequency(curvature[[0, 1], :], distance) # calculate cell's best frequency
cells = [c for c in cells if not c._distance_is_far_away] # remove a cell if its far away from curve
else:
curvature, distance, apex = None, None, None
warn('WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient '
'information to determine cell frequency.', color='yellow')
xml = get_xml(f) if f.endswith('.lif') else None
filename = os.path.split(f)[-1]
# remove weird cell ID's
for i, c in enumerate(cells): c.id = i+1
# Store in compressible object for further use
c = Cochlea(mask=None,
filename=filename,
path=f,
analysis_type='detect',
leica_metadata=xml,
im_shape=image_base.shape,
cochlear_distance=distance,
curvature=curvature,
cells=cells,
apex=apex)
c.write_csv()
if save_xml: cochlea_to_xml(c)
if save_fig: c.make_detect_fig(image_base)
print('')
return c
def _cell_nms(cells: List[Cell], nms_threshold: float) -> List[Cell]:
"""
Perforns non maximum supression on the resulting cell predictions
:param cells: Iterable of cells
:param nms_threshold: cell iou threshold
:return: Iterable of cells
"""
# nms to get rid of cells
boxes = torch.zeros((len(cells), 4))
scores = torch.zeros(len(cells))
for i, c in enumerate(cells):
boxes[i, :] = c.boxes
scores[i] = c.scores
ind = torchvision.ops.nms(boxes, scores, nms_threshold)
# need to pop off list elements from an int64 tensor
ind_bool = torch.zeros(len(cells))
ind_bool[ind] = 1
for i, val in enumerate(ind_bool):
if val == 0:
cells[i] = None
return [c for c in cells if c]
| [
"hcat.lib.utils.correct_pixel_size",
"hcat.lib.utils.warn",
"hcat.lib.utils.scale_to_hair_cell_diameter",
"numpy.zeros",
"hcat.lib.explore_lif.get_xml",
"hcat.lib.utils.calculate_indexes",
"hcat.lib.cochlea.Cochlea",
"hcat.backends.detection.FasterRCNN_from_url",
"hcat.lib.utils.load",
"torch.cuda.is_available",
"hcat.lib.utils.cochlea_to_xml",
"itertools.product",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"numpy.concatenate"
] | [((1374, 1438), 'hcat.lib.utils.warn', 'warn', (['"""ERROR: No File to Analyze... \nAborting."""'], {'color': '"""red"""'}), '("""ERROR: No File to Analyze... \nAborting.""", color=\'red\')\n', (1378, 1438), False, 'from hcat.lib.utils import warn\n'), ((1487, 1628), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. Consider suplying value for optimal performance."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. Consider suplying value for optimal performance.'\n , color='yellow')\n", (1491, 1628), False, 'from hcat.lib.utils import warn\n'), ((1645, 1660), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1658, 1660), False, 'import torch\n'), ((1720, 1762), 'hcat.lib.utils.load', 'load', (['f', '"""TileScan 1 Merged"""'], {'verbose': '(True)'}), "(f, 'TileScan 1 Merged', verbose=True)\n", (1724, 1762), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((2141, 2156), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2149, 2156), True, 'import numpy as np\n'), ((3724, 3855), 'hcat.backends.detection.FasterRCNN_from_url', 'FasterRCNN_from_url', ([], {'url': '"""https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true"""', 'device': 'device'}), "(url=\n 'https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true'\n , device=device)\n", (3743, 3855), False, 'from hcat.backends.detection import FasterRCNN_from_url\n'), ((4201, 4233), 'hcat.lib.utils.calculate_indexes', 'calculate_indexes', (['(10)', '(235)', 'x', 'x'], {}), '(10, 235, x, x)\n', (4218, 4233), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((4297, 4329), 'hcat.lib.utils.calculate_indexes', 'calculate_indexes', (['(10)', '(235)', 'y', 'y'], {}), '(10, 235, y, y)\n', (4314, 4329), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((7743, 7941), 'hcat.lib.cochlea.Cochlea', 'Cochlea', ([], {'mask': 'None', 'filename': 'filename', 'path': 'f', 'analysis_type': '"""detect"""', 'leica_metadata': 'xml', 'im_shape': 'image_base.shape', 'cochlear_distance': 'distance', 'curvature': 'curvature', 'cells': 'cells', 'apex': 'apex'}), "(mask=None, filename=filename, path=f, analysis_type='detect',\n leica_metadata=xml, im_shape=image_base.shape, cochlear_distance=\n distance, curvature=curvature, cells=cells, apex=apex)\n", (7750, 7941), False, 'from hcat.lib.cochlea import Cochlea\n'), ((2088, 2113), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2111, 2113), False, 'import torch\n'), ((2888, 2930), 'hcat.lib.utils.correct_pixel_size', 'correct_pixel_size', (['image_base', 'pixel_size'], {}), '(image_base, pixel_size)\n', (2906, 2930), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((3428, 3486), 'hcat.lib.utils.warn', 'warn', (['"""CUDA: GPU successfully initialized!"""'], {'color': '"""green"""'}), "('CUDA: GPU successfully initialized!', color='green')\n", (3432, 3486), False, 'from hcat.lib.utils import warn\n'), ((3513, 3662), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. Analysis may be slow."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. Analysis may be slow.'\n , color='yellow')\n", (3517, 3662), False, 'from hcat.lib.utils import warn\n'), ((4571, 4592), 'itertools.product', 'product', (['x_ind', 'y_ind'], {}), '(x_ind, y_ind)\n', (4578, 4592), False, 'from itertools import product\n'), ((6701, 6873), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is limited. Consider Manual Calculation."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is limited. Consider Manual Calculation.'\n , color='yellow')\n", (6705, 6873), False, 'from hcat.lib.utils import warn\n'), ((7339, 7482), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient information to determine cell frequency."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient information to determine cell frequency.'\n , color='yellow')\n", (7343, 7482), False, 'from hcat.lib.utils import warn\n'), ((7508, 7518), 'hcat.lib.explore_lif.get_xml', 'get_xml', (['f'], {}), '(f)\n', (7515, 7518), False, 'from hcat.lib.explore_lif import get_xml\n'), ((8158, 8175), 'hcat.lib.utils.cochlea_to_xml', 'cochlea_to_xml', (['c'], {}), '(c)\n', (8172, 8175), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((2172, 2206), 'numpy.concatenate', 'np.concatenate', (['(temp, image_base)'], {}), '((temp, image_base))\n', (2186, 2206), True, 'import numpy as np\n'), ((3150, 3204), 'hcat.lib.utils.scale_to_hair_cell_diameter', 'scale_to_hair_cell_diameter', (['image_base', 'cell_diameter'], {}), '(image_base, cell_diameter)\n', (3177, 3204), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((4109, 4146), 'torch.zeros', 'torch.zeros', (['(1, x, y)'], {'device': 'device'}), '((1, x, y), device=device)\n', (4120, 4146), False, 'import torch\n'), ((5771, 5805), 'torch.tensor', 'torch.tensor', (['[0, cx[i], cy[i], 0]'], {}), '([0, cx[i], cy[i], 0])\n', (5783, 5805), False, 'import torch\n')] |
import json
from . import api, root
from .decorators import gateway_belong_to_user, require_basic_or_oauth
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from flask import request, Response
from .forms import get_formdata_from_json_or_form
@api.route(root + 'gateways', methods=['GET', 'POST'])
@require_basic_or_oauth
def gateways(user):
if request.method == 'GET':
gateways_list = []
gateways = Gateway.query.filter_by(user_id=user.id)
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
data = json.dumps(gateways_list)
return Response(status=200, response=data)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_gateway = AddGatewayForm(formdata)
if add_gateway.validate():
try:
gateway = import_gateway(user, add_gateway)
gateway.save()
new_gateway = Gateway.query.get(gateway.id)
return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
except KeyDuplicateError as error:
errors = {'mac_addr': str(error)}
return Response(status=406, response=json.dumps({"errors": errors}))
except AssertionError as error:
return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
else:
errors = {}
for key, value in add_gateway.errors.items():
errors[key] = value[0]
return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/pull_info', methods=['GET'])
@require_basic_or_oauth
@gateway_belong_to_user
def gateway_pull_info(user, gateway):
"""
:param user:
:param gateway:
:return:
"""
gateway.get_pull_info()
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@require_basic_or_oauth
@gateway_belong_to_user
def gateway(user, gateway):
if request.method == 'GET':
return Response(status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location) | [
"userver.object.gateway.Gateway.query.get",
"userver.object.gateway.Gateway.query.filter_by",
"userver.object.gateway.Location",
"json.dumps",
"userver.object.gateway.Gateway",
"flask.Response"
] | [((3413, 3516), 'userver.object.gateway.Location', 'Location', (["add_gateway['longitude'].data", "add_gateway['latitude'].data", "add_gateway['altitude'].data"], {}), "(add_gateway['longitude'].data, add_gateway['latitude'].data,\n add_gateway['altitude'].data)\n", (3421, 3516), False, 'from userver.object.gateway import Gateway, Location\n'), ((3524, 3617), 'userver.object.gateway.Gateway', 'Gateway', (['user.id', 'mac_addr', 'name', 'platform', 'model'], {'freq_plan': 'freq_plan', 'location': 'location'}), '(user.id, mac_addr, name, platform, model, freq_plan=freq_plan,\n location=location)\n', (3531, 3617), False, 'from userver.object.gateway import Gateway, Location\n'), ((542, 582), 'userver.object.gateway.Gateway.query.filter_by', 'Gateway.query.filter_by', ([], {'user_id': 'user.id'}), '(user_id=user.id)\n', (565, 582), False, 'from userver.object.gateway import Gateway, Location\n'), ((711, 736), 'json.dumps', 'json.dumps', (['gateways_list'], {}), '(gateways_list)\n', (721, 736), False, 'import json\n'), ((752, 787), 'flask.Response', 'Response', ([], {'status': '(200)', 'response': 'data'}), '(status=200, response=data)\n', (760, 787), False, 'from flask import request, Response\n'), ((1102, 1131), 'userver.object.gateway.Gateway.query.get', 'Gateway.query.get', (['gateway.id'], {}), '(gateway.id)\n', (1119, 1131), False, 'from userver.object.gateway import Gateway, Location\n'), ((2717, 2746), 'json.dumps', 'json.dumps', (["{'success': True}"], {}), "({'success': True})\n", (2727, 2746), False, 'import json\n'), ((1734, 1764), 'json.dumps', 'json.dumps', (["{'errors': errors}"], {}), "({'errors': errors})\n", (1744, 1764), False, 'import json\n'), ((1374, 1404), 'json.dumps', 'json.dumps', (["{'errors': errors}"], {}), "({'errors': errors})\n", (1384, 1404), False, 'import json\n')] |
from django.contrib import admin
from products.views import portfolio
# Register your models here.
from . models import Product, New, About, LatestNew
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','price','created_at')
list_links = ('id', 'name')
list_filter = ('name','price','created_at')
search_fields = ('name','price')
ordering =('name','price','created_at')
class NewAdmin(admin.ModelAdmin):
list_display=('title','time')
list_filter=('title','time')
search_fields = ('title','time')
admin.site.register(Product, ProductAdmin)
admin.site.register(New, NewAdmin)
admin.site.register(LatestNew)
admin.site.register(About) | [
"django.contrib.admin.site.register"
] | [((543, 585), 'django.contrib.admin.site.register', 'admin.site.register', (['Product', 'ProductAdmin'], {}), '(Product, ProductAdmin)\n', (562, 585), False, 'from django.contrib import admin\n'), ((586, 620), 'django.contrib.admin.site.register', 'admin.site.register', (['New', 'NewAdmin'], {}), '(New, NewAdmin)\n', (605, 620), False, 'from django.contrib import admin\n'), ((621, 651), 'django.contrib.admin.site.register', 'admin.site.register', (['LatestNew'], {}), '(LatestNew)\n', (640, 651), False, 'from django.contrib import admin\n'), ((652, 678), 'django.contrib.admin.site.register', 'admin.site.register', (['About'], {}), '(About)\n', (671, 678), False, 'from django.contrib import admin\n')] |
"""
@author <NAME>
A.I. Engineer & Software developer
<EMAIL>
Created on 27 December, 2017 @ 12:40 AM.
Copyright © 2017. Victor. All rights reserved.
"""
import os
APP_NAME = 'folktales'
PROJECT_DIR = os.getcwd()
STATIC_DIR = os.path.join(PROJECT_DIR, 'static')
DATASET_DIR = os.path.join(STATIC_DIR, 'datasets')
| [
"os.getcwd",
"os.path.join"
] | [((219, 230), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (228, 230), False, 'import os\n'), ((245, 280), 'os.path.join', 'os.path.join', (['PROJECT_DIR', '"""static"""'], {}), "(PROJECT_DIR, 'static')\n", (257, 280), False, 'import os\n'), ((296, 332), 'os.path.join', 'os.path.join', (['STATIC_DIR', '"""datasets"""'], {}), "(STATIC_DIR, 'datasets')\n", (308, 332), False, 'import os\n')] |
import csv
import time
import numpy as np
import argparse
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import scale
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
def load_eyedata(data_folder):
datafile = '{}/eyedata.csv'.format(data_folder)
data = np.loadtxt(datafile, skiprows=1, delimiter=',')
data = scale(data)
X, y = data[:, :-1], data[:, -1]
featnames = np.array(
list(map(lambda i: '{:03}'.format(i), range(X.shape[1]))))
return X, y, featnames
def load_iwpc(data_folder):
datafile = '{}/iwpc-scaled.csv'.format(data_folder)
col_types = {'race': str,
'age': float,
'height': float,
'weight': float,
'amiodarone': int,
'decr': int,
'cyp2c9': str,
'vkorc1': str,
'dose': float}
X, y = [], []
with open(datafile) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for col_name in reader.fieldnames:
col_type = col_types[col_name]
row[col_name] = col_type(row[col_name]) # cast to correct type
if col_name == 'dose':
y.append(row[col_name])
del row[col_name]
X.append(row)
dv = DictVectorizer()
X = dv.fit_transform(X)
y = np.array(y)
featnames = np.array(dv.get_feature_names())
return X, y, featnames
if __name__ == '__main__':
data_folder = '../data'
parser = argparse.ArgumentParser()
parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use')
args = parser.parse_args()
dataset = args.data
if dataset == 'eyedata':
X, y, featnames = load_eyedata(data_folder)
if dataset == 'iwpc':
X, y, featnames = load_iwpc(data_folder)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9)
time
params = {
'activation' : ['identity', 'logistic', 'tanh', 'relu'],
'solver' : ['lbfgs', 'sgd', 'adam'],
'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)],
'max_iter':[200,250,300,350]
}
mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1)
mlp_clf_grid.fit(train_X,train_y)
print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y))
print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y))
print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_)
print('Best Parameters : ',mlp_clf_grid.best_params_)
print('Best Estimators: ',mlp_clf_grid.best_estimator_)
| [
"argparse.ArgumentParser",
"sklearn.preprocessing.scale",
"warnings.filterwarnings",
"sklearn.model_selection.train_test_split",
"csv.DictReader",
"sklearn.neural_network.MLPRegressor",
"numpy.array",
"sklearn.feature_extraction.DictVectorizer",
"numpy.loadtxt"
] | [((74, 107), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (97, 107), False, 'import warnings\n'), ((449, 496), 'numpy.loadtxt', 'np.loadtxt', (['datafile'], {'skiprows': '(1)', 'delimiter': '""","""'}), "(datafile, skiprows=1, delimiter=',')\n", (459, 496), True, 'import numpy as np\n'), ((508, 519), 'sklearn.preprocessing.scale', 'scale', (['data'], {}), '(data)\n', (513, 519), False, 'from sklearn.preprocessing import scale\n'), ((1507, 1523), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (1521, 1523), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((1560, 1571), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1568, 1571), True, 'import numpy as np\n'), ((1719, 1744), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1742, 1744), False, 'import argparse\n'), ((2091, 2129), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(9)'}), '(X, y, random_state=9)\n', (2107, 2129), False, 'from sklearn.model_selection import train_test_split\n'), ((1125, 1148), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (1139, 1148), False, 'import csv\n'), ((2482, 2510), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'random_state': '(9)'}), '(random_state=9)\n', (2494, 2510), False, 'from sklearn.neural_network import MLPRegressor\n')] |
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import inspect
import unittest
from traits.observation import expression
from traits.observation._dict_item_observer import DictItemObserver
from traits.observation._filtered_trait_observer import FilteredTraitObserver
from traits.observation._list_item_observer import ListItemObserver
from traits.observation._metadata_filter import MetadataFilter
from traits.observation._named_trait_observer import NamedTraitObserver
from traits.observation._set_item_observer import SetItemObserver
from traits.observation._observer_graph import ObserverGraph
def create_graph(*nodes):
""" Create an ObserverGraph with the given nodes joined one after another.
Parameters
----------
*nodes : hashable
Items to be attached as nodes
Returns
-------
ObserverGraph
"""
node = nodes[-1]
graph = ObserverGraph(node=node)
for node in nodes[:-1][::-1]:
graph = ObserverGraph(node=node, children=[graph])
return graph
def create_expression(observer):
""" Create an expression with a dummy observer for testing purposes.
Parameters
----------
observer : hashable
Item to be used as a node on ObserverGraph
Returns
-------
expression : ObserverExpression
"""
return expression.SingleObserverExpression(observer)
class TestObserverExpressionComposition(unittest.TestCase):
""" Test composition of ObserverExpression with generic observers."""
def test_new_with_branches(self):
observer = 1
expr = create_expression(observer)
expected = [
create_graph(observer),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_or_operator(self):
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr = expr1 | expr2
expected = [
create_graph(observer1),
create_graph(observer2),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_or_maintain_order(self):
# Test __or__ will maintain the order provided by the user.
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
combined1 = expr1 | expr2
combined2 = expr2 | expr1
self.assertEqual(combined1._as_graphs(), combined2._as_graphs()[::-1])
def test_then_operator(self):
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr = expr1.then(expr2)
expected = [
create_graph(
observer1,
observer2,
)
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_chained_then_or(self):
observer1 = 1
observer2 = 2
observer3 = 3
observer4 = 4
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr3 = create_expression(observer3)
expr4 = create_expression(observer4)
expr = (expr1.then(expr2)) | (expr3.then(expr4))
expected = [
create_graph(
observer1,
observer2,
),
create_graph(
observer3,
observer4,
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_or_then_chained(self):
observer1 = 1
observer2 = 2
observer3 = 3
observer4 = 4
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr3 = create_expression(observer3)
expr4 = create_expression(observer4)
expr = (expr1 | expr2).then(expr3 | expr4)
expected = [
ObserverGraph(
node=observer1,
children=[
create_graph(observer3),
create_graph(observer4),
],
),
ObserverGraph(
node=observer2,
children=[
create_graph(observer3),
create_graph(observer4),
],
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_join_expressions(self):
observer1 = 1
observer2 = 2
expr1 = create_expression(observer1)
expr2 = create_expression(observer2)
expr = expression.join(expr1, expr2)
expected = [
create_graph(
observer1,
observer2,
)
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
class TestObserverExpressionFilter(unittest.TestCase):
""" Test ObserverExpression.match """
def setUp(self):
def anytrait(name, trait):
return True
self.anytrait = anytrait
def test_match_notify_true(self):
# Test the top-level function
expr = expression.match(filter=self.anytrait)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_match_notify_false(self):
# Test the top-level function
expr = expression.match(filter=self.anytrait, notify=False)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_match_method_notify_true(self):
# Test the instance method calls the top-level function correctly.
expr = expression.match(filter=self.anytrait).match(
filter=self.anytrait
)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=True),
FilteredTraitObserver(filter=self.anytrait, notify=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_match_method_notify_false(self):
# Test the instance method calls the top-level function correctly.
expr = expression.match(filter=self.anytrait).match(
filter=self.anytrait, notify=False,
)
expected = [
create_graph(
FilteredTraitObserver(filter=self.anytrait, notify=True),
FilteredTraitObserver(filter=self.anytrait, notify=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.match
method = expression.ObserverExpression().match
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionFilterMetadata(unittest.TestCase):
""" Test ObserverExpression.metadata """
def test_metadata_notify_true(self):
# Test the top-level function
expr = expression.metadata("butterfly")
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="butterfly"),
notify=True,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_metadata_notify_false(self):
# Test the top-level function
expr = expression.metadata("butterfly", notify=False)
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="butterfly"),
notify=False,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_metadata_method_notify_true(self):
# Test the instance method calls the top-level function correctly.
expr = expression.metadata("bee").metadata("ant")
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="bee"),
notify=True,
),
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="ant"),
notify=True,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_metadata_method_notify_false(self):
# Test the instance method calls the top-level function correctly.
expr = expression.metadata("bee").metadata("ant", notify=False)
expected = [
create_graph(
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="bee"),
notify=True,
),
FilteredTraitObserver(
filter=MetadataFilter(metadata_name="ant"),
notify=False,
),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.metadata
method = expression.ObserverExpression().metadata
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionTrait(unittest.TestCase):
""" Test ObserverExpression.trait """
def test_trait_name(self):
# Test the top-level function
expr = expression.trait("name")
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False)
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_name_notify_false(self):
# Test the top-level function
expr = expression.trait("name", notify=False)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=False, optional=False)
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_name_optional_true(self):
# Test the top-level function
expr = expression.trait("name", optional=True)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=True)
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_method(self):
# Test the instance method calls the top-level function correctly.
expr = expression.trait("name").trait("attr")
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False),
NamedTraitObserver(name="attr", notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_method_notify_false(self):
# Test the instance method calls the top-level function correctly.
expr = expression.trait("name").trait("attr", notify=False)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False),
NamedTraitObserver(name="attr", notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_trait_method_optional_true(self):
# Test the instance method calls the top-level function correctly.
expr = expression.trait("name").trait("attr", optional=True)
expected = [
create_graph(
NamedTraitObserver(name="name", notify=True, optional=False),
NamedTraitObserver(name="attr", notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level_trait = expression.trait
method_trait = expression.ObserverExpression().trait
self.assertEqual(
inspect.signature(top_level_trait), inspect.signature(method_trait)
)
class TestObserverExpressionDictItem(unittest.TestCase):
""" Test ObserverExpression.dict_items """
def test_dict_items(self):
expr = expression.dict_items()
expected = [
create_graph(
DictItemObserver(notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_notify_false(self):
expr = expression.dict_items(notify=False)
expected = [
create_graph(
DictItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_optional_true(self):
expr = expression.dict_items(optional=True)
expected = [
create_graph(
DictItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_method_notify(self):
# Test the instance method calls the top-level function correctly.
expr = expression.dict_items().dict_items(notify=False)
expected = [
create_graph(
DictItemObserver(notify=True, optional=False),
DictItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_dict_items_method_optional(self):
# Test the instance method calls the top-level function correctly.
expr = expression.dict_items().dict_items(optional=True)
expected = [
create_graph(
DictItemObserver(notify=True, optional=False),
DictItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.dict_items
method = expression.ObserverExpression().dict_items
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionListItem(unittest.TestCase):
""" Test ObserverExpression.list_items """
def test_list_items(self):
expr = expression.list_items()
expected = [
create_graph(
ListItemObserver(notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_notify_false(self):
expr = expression.list_items(notify=False)
expected = [
create_graph(
ListItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_optional_true(self):
expr = expression.list_items(optional=True)
expected = [
create_graph(
ListItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_method_notify(self):
# Test the instance method calls the top-level function correctly.
expr = expression.list_items().list_items(notify=False)
expected = [
create_graph(
ListItemObserver(notify=True, optional=False),
ListItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_list_items_method_optional(self):
# Test the instance method calls the top-level function correctly.
expr = expression.list_items().list_items(optional=True)
expected = [
create_graph(
ListItemObserver(notify=True, optional=False),
ListItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.list_items
method = expression.ObserverExpression().list_items
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionSetItem(unittest.TestCase):
""" Test ObserverExpression.set_items """
def test_set_items(self):
expr = expression.set_items()
expected = [
create_graph(
SetItemObserver(notify=True, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_notify_false(self):
expr = expression.set_items(notify=False)
expected = [
create_graph(
SetItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_optional_true(self):
expr = expression.set_items(optional=True)
expected = [
create_graph(
SetItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_method_notify(self):
# Test the instance method calls the top-level function correctly.
expr = expression.set_items().set_items(notify=False)
expected = [
create_graph(
SetItemObserver(notify=True, optional=False),
SetItemObserver(notify=False, optional=False),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_set_items_method_optional(self):
# Test the instance method calls the top-level function correctly.
expr = expression.set_items().set_items(optional=True)
expected = [
create_graph(
SetItemObserver(notify=True, optional=False),
SetItemObserver(notify=True, optional=True),
),
]
actual = expr._as_graphs()
self.assertEqual(actual, expected)
def test_call_signatures(self):
# Test to help developers keeping the two function signatures in-sync.
# Remove this if the two need to divert in the future.
top_level = expression.set_items
method = expression.ObserverExpression().set_items
self.assertEqual(
inspect.signature(top_level), inspect.signature(method)
)
class TestObserverExpressionEquality(unittest.TestCase):
""" Test ObserverExpression.__eq__ """
def test_trait_equality(self):
expr1 = create_expression(1)
expr2 = create_expression(1)
self.assertEqual(expr1, expr2)
def test_join_equality_with_then(self):
# The following all result in the same graphs
expr1 = create_expression(1)
expr2 = create_expression(2)
combined1 = expression.join(expr1, expr2)
combined2 = expr1.then(expr2)
self.assertEqual(combined1, combined2)
def test_equality_different_type(self):
expr = create_expression(1)
self.assertNotEqual(expr, "1")
| [
"traits.observation._list_item_observer.ListItemObserver",
"traits.observation._metadata_filter.MetadataFilter",
"traits.observation.expression.metadata",
"traits.observation.expression.dict_items",
"traits.observation.expression.ObserverExpression",
"traits.observation.expression.match",
"traits.observation.expression.trait",
"traits.observation.expression.list_items",
"traits.observation.expression.set_items",
"inspect.signature",
"traits.observation._observer_graph.ObserverGraph",
"traits.observation.expression.SingleObserverExpression",
"traits.observation._named_trait_observer.NamedTraitObserver",
"traits.observation._dict_item_observer.DictItemObserver",
"traits.observation.expression.join",
"traits.observation._set_item_observer.SetItemObserver",
"traits.observation._filtered_trait_observer.FilteredTraitObserver"
] | [((1239, 1263), 'traits.observation._observer_graph.ObserverGraph', 'ObserverGraph', ([], {'node': 'node'}), '(node=node)\n', (1252, 1263), False, 'from traits.observation._observer_graph import ObserverGraph\n'), ((1668, 1713), 'traits.observation.expression.SingleObserverExpression', 'expression.SingleObserverExpression', (['observer'], {}), '(observer)\n', (1703, 1713), False, 'from traits.observation import expression\n'), ((1314, 1356), 'traits.observation._observer_graph.ObserverGraph', 'ObserverGraph', ([], {'node': 'node', 'children': '[graph]'}), '(node=node, children=[graph])\n', (1327, 1356), False, 'from traits.observation._observer_graph import ObserverGraph\n'), ((5013, 5042), 'traits.observation.expression.join', 'expression.join', (['expr1', 'expr2'], {}), '(expr1, expr2)\n', (5028, 5042), False, 'from traits.observation import expression\n'), ((5554, 5592), 'traits.observation.expression.match', 'expression.match', ([], {'filter': 'self.anytrait'}), '(filter=self.anytrait)\n', (5570, 5592), False, 'from traits.observation import expression\n'), ((5910, 5962), 'traits.observation.expression.match', 'expression.match', ([], {'filter': 'self.anytrait', 'notify': '(False)'}), '(filter=self.anytrait, notify=False)\n', (5926, 5962), False, 'from traits.observation import expression\n'), ((7831, 7863), 'traits.observation.expression.metadata', 'expression.metadata', (['"""butterfly"""'], {}), "('butterfly')\n", (7850, 7863), False, 'from traits.observation import expression\n'), ((8271, 8317), 'traits.observation.expression.metadata', 'expression.metadata', (['"""butterfly"""'], {'notify': '(False)'}), "('butterfly', notify=False)\n", (8290, 8317), False, 'from traits.observation import expression\n'), ((10494, 10518), 'traits.observation.expression.trait', 'expression.trait', (['"""name"""'], {}), "('name')\n", (10510, 10518), False, 'from traits.observation import expression\n'), ((10844, 10882), 'traits.observation.expression.trait', 'expression.trait', (['"""name"""'], {'notify': '(False)'}), "('name', notify=False)\n", (10860, 10882), False, 'from traits.observation import expression\n'), ((11210, 11249), 'traits.observation.expression.trait', 'expression.trait', (['"""name"""'], {'optional': '(True)'}), "('name', optional=True)\n", (11226, 11249), False, 'from traits.observation import expression\n'), ((13491, 13514), 'traits.observation.expression.dict_items', 'expression.dict_items', ([], {}), '()\n', (13512, 13514), False, 'from traits.observation import expression\n'), ((13788, 13823), 'traits.observation.expression.dict_items', 'expression.dict_items', ([], {'notify': '(False)'}), '(notify=False)\n', (13809, 13823), False, 'from traits.observation import expression\n'), ((14099, 14135), 'traits.observation.expression.dict_items', 'expression.dict_items', ([], {'optional': '(True)'}), '(optional=True)\n', (14120, 14135), False, 'from traits.observation import expression\n'), ((15811, 15834), 'traits.observation.expression.list_items', 'expression.list_items', ([], {}), '()\n', (15832, 15834), False, 'from traits.observation import expression\n'), ((16108, 16143), 'traits.observation.expression.list_items', 'expression.list_items', ([], {'notify': '(False)'}), '(notify=False)\n', (16129, 16143), False, 'from traits.observation import expression\n'), ((16419, 16455), 'traits.observation.expression.list_items', 'expression.list_items', ([], {'optional': '(True)'}), '(optional=True)\n', (16440, 16455), False, 'from traits.observation import expression\n'), ((18128, 18150), 'traits.observation.expression.set_items', 'expression.set_items', ([], {}), '()\n', (18148, 18150), False, 'from traits.observation import expression\n'), ((18422, 18456), 'traits.observation.expression.set_items', 'expression.set_items', ([], {'notify': '(False)'}), '(notify=False)\n', (18442, 18456), False, 'from traits.observation import expression\n'), ((18730, 18765), 'traits.observation.expression.set_items', 'expression.set_items', ([], {'optional': '(True)'}), '(optional=True)\n', (18750, 18765), False, 'from traits.observation import expression\n'), ((20720, 20749), 'traits.observation.expression.join', 'expression.join', (['expr1', 'expr2'], {}), '(expr1, expr2)\n', (20735, 20749), False, 'from traits.observation import expression\n'), ((7484, 7515), 'traits.observation.expression.ObserverExpression', 'expression.ObserverExpression', ([], {}), '()\n', (7513, 7515), False, 'from traits.observation import expression\n'), ((7560, 7588), 'inspect.signature', 'inspect.signature', (['top_level'], {}), '(top_level)\n', (7577, 7588), False, 'import inspect\n'), ((7590, 7615), 'inspect.signature', 'inspect.signature', (['method'], {}), '(method)\n', (7607, 7615), False, 'import inspect\n'), ((10166, 10197), 'traits.observation.expression.ObserverExpression', 'expression.ObserverExpression', ([], {}), '()\n', (10195, 10197), False, 'from traits.observation import expression\n'), ((10245, 10273), 'inspect.signature', 'inspect.signature', (['top_level'], {}), '(top_level)\n', (10262, 10273), False, 'import inspect\n'), ((10275, 10300), 'inspect.signature', 'inspect.signature', (['method'], {}), '(method)\n', (10292, 10300), False, 'import inspect\n'), ((13184, 13215), 'traits.observation.expression.ObserverExpression', 'expression.ObserverExpression', ([], {}), '()\n', (13213, 13215), False, 'from traits.observation import expression\n'), ((13260, 13294), 'inspect.signature', 'inspect.signature', (['top_level_trait'], {}), '(top_level_trait)\n', (13277, 13294), False, 'import inspect\n'), ((13296, 13327), 'inspect.signature', 'inspect.signature', (['method_trait'], {}), '(method_trait)\n', (13313, 13327), False, 'import inspect\n'), ((15511, 15542), 'traits.observation.expression.ObserverExpression', 'expression.ObserverExpression', ([], {}), '()\n', (15540, 15542), False, 'from traits.observation import expression\n'), ((15592, 15620), 'inspect.signature', 'inspect.signature', (['top_level'], {}), '(top_level)\n', (15609, 15620), False, 'import inspect\n'), ((15622, 15647), 'inspect.signature', 'inspect.signature', (['method'], {}), '(method)\n', (15639, 15647), False, 'import inspect\n'), ((17831, 17862), 'traits.observation.expression.ObserverExpression', 'expression.ObserverExpression', ([], {}), '()\n', (17860, 17862), False, 'from traits.observation import expression\n'), ((17912, 17940), 'inspect.signature', 'inspect.signature', (['top_level'], {}), '(top_level)\n', (17929, 17940), False, 'import inspect\n'), ((17942, 17967), 'inspect.signature', 'inspect.signature', (['method'], {}), '(method)\n', (17959, 17967), False, 'import inspect\n'), ((20129, 20160), 'traits.observation.expression.ObserverExpression', 'expression.ObserverExpression', ([], {}), '()\n', (20158, 20160), False, 'from traits.observation import expression\n'), ((20209, 20237), 'inspect.signature', 'inspect.signature', (['top_level'], {}), '(top_level)\n', (20226, 20237), False, 'import inspect\n'), ((20239, 20264), 'inspect.signature', 'inspect.signature', (['method'], {}), '(method)\n', (20256, 20264), False, 'import inspect\n'), ((5656, 5712), 'traits.observation._filtered_trait_observer.FilteredTraitObserver', 'FilteredTraitObserver', ([], {'filter': 'self.anytrait', 'notify': '(True)'}), '(filter=self.anytrait, notify=True)\n', (5677, 5712), False, 'from traits.observation._filtered_trait_observer import FilteredTraitObserver\n'), ((6026, 6083), 'traits.observation._filtered_trait_observer.FilteredTraitObserver', 'FilteredTraitObserver', ([], {'filter': 'self.anytrait', 'notify': '(False)'}), '(filter=self.anytrait, notify=False)\n', (6047, 6083), False, 'from traits.observation._filtered_trait_observer import FilteredTraitObserver\n'), ((6324, 6362), 'traits.observation.expression.match', 'expression.match', ([], {'filter': 'self.anytrait'}), '(filter=self.anytrait)\n', (6340, 6362), False, 'from traits.observation import expression\n'), ((6476, 6532), 'traits.observation._filtered_trait_observer.FilteredTraitObserver', 'FilteredTraitObserver', ([], {'filter': 'self.anytrait', 'notify': '(True)'}), '(filter=self.anytrait, notify=True)\n', (6497, 6532), False, 'from traits.observation._filtered_trait_observer import FilteredTraitObserver\n'), ((6550, 6606), 'traits.observation._filtered_trait_observer.FilteredTraitObserver', 'FilteredTraitObserver', ([], {'filter': 'self.anytrait', 'notify': '(True)'}), '(filter=self.anytrait, notify=True)\n', (6571, 6606), False, 'from traits.observation._filtered_trait_observer import FilteredTraitObserver\n'), ((6848, 6886), 'traits.observation.expression.match', 'expression.match', ([], {'filter': 'self.anytrait'}), '(filter=self.anytrait)\n', (6864, 6886), False, 'from traits.observation import expression\n'), ((7015, 7071), 'traits.observation._filtered_trait_observer.FilteredTraitObserver', 'FilteredTraitObserver', ([], {'filter': 'self.anytrait', 'notify': '(True)'}), '(filter=self.anytrait, notify=True)\n', (7036, 7071), False, 'from traits.observation._filtered_trait_observer import FilteredTraitObserver\n'), ((7089, 7146), 'traits.observation._filtered_trait_observer.FilteredTraitObserver', 'FilteredTraitObserver', ([], {'filter': 'self.anytrait', 'notify': '(False)'}), '(filter=self.anytrait, notify=False)\n', (7110, 7146), False, 'from traits.observation._filtered_trait_observer import FilteredTraitObserver\n'), ((8769, 8795), 'traits.observation.expression.metadata', 'expression.metadata', (['"""bee"""'], {}), "('bee')\n", (8788, 8795), False, 'from traits.observation import expression\n'), ((9412, 9438), 'traits.observation.expression.metadata', 'expression.metadata', (['"""bee"""'], {}), "('bee')\n", (9431, 9438), False, 'from traits.observation import expression\n'), ((10582, 10642), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""name"""', 'notify': '(True)', 'optional': '(False)'}), "(name='name', notify=True, optional=False)\n", (10600, 10642), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((10946, 11007), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""name"""', 'notify': '(False)', 'optional': '(False)'}), "(name='name', notify=False, optional=False)\n", (10964, 11007), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((11313, 11372), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""name"""', 'notify': '(True)', 'optional': '(True)'}), "(name='name', notify=True, optional=True)\n", (11331, 11372), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((11600, 11624), 'traits.observation.expression.trait', 'expression.trait', (['"""name"""'], {}), "('name')\n", (11616, 11624), False, 'from traits.observation import expression\n'), ((11702, 11762), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""name"""', 'notify': '(True)', 'optional': '(False)'}), "(name='name', notify=True, optional=False)\n", (11720, 11762), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((11780, 11840), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""attr"""', 'notify': '(True)', 'optional': '(False)'}), "(name='attr', notify=True, optional=False)\n", (11798, 11840), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((12082, 12106), 'traits.observation.expression.trait', 'expression.trait', (['"""name"""'], {}), "('name')\n", (12098, 12106), False, 'from traits.observation import expression\n'), ((12198, 12258), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""name"""', 'notify': '(True)', 'optional': '(False)'}), "(name='name', notify=True, optional=False)\n", (12216, 12258), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((12276, 12337), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""attr"""', 'notify': '(False)', 'optional': '(False)'}), "(name='attr', notify=False, optional=False)\n", (12294, 12337), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((12580, 12604), 'traits.observation.expression.trait', 'expression.trait', (['"""name"""'], {}), "('name')\n", (12596, 12604), False, 'from traits.observation import expression\n'), ((12697, 12757), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""name"""', 'notify': '(True)', 'optional': '(False)'}), "(name='name', notify=True, optional=False)\n", (12715, 12757), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((12775, 12834), 'traits.observation._named_trait_observer.NamedTraitObserver', 'NamedTraitObserver', ([], {'name': '"""attr"""', 'notify': '(True)', 'optional': '(True)'}), "(name='attr', notify=True, optional=True)\n", (12793, 12834), False, 'from traits.observation._named_trait_observer import NamedTraitObserver\n'), ((13578, 13623), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (13594, 13623), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((13887, 13933), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(False)', 'optional': '(False)'}), '(notify=False, optional=False)\n', (13903, 13933), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((14199, 14243), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(True)', 'optional': '(True)'}), '(notify=True, optional=True)\n', (14215, 14243), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((14484, 14507), 'traits.observation.expression.dict_items', 'expression.dict_items', ([], {}), '()\n', (14505, 14507), False, 'from traits.observation import expression\n'), ((14596, 14641), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (14612, 14641), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((14659, 14705), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(False)', 'optional': '(False)'}), '(notify=False, optional=False)\n', (14675, 14705), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((14948, 14971), 'traits.observation.expression.dict_items', 'expression.dict_items', ([], {}), '()\n', (14969, 14971), False, 'from traits.observation import expression\n'), ((15061, 15106), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (15077, 15106), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((15124, 15168), 'traits.observation._dict_item_observer.DictItemObserver', 'DictItemObserver', ([], {'notify': '(True)', 'optional': '(True)'}), '(notify=True, optional=True)\n', (15140, 15168), False, 'from traits.observation._dict_item_observer import DictItemObserver\n'), ((15898, 15943), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (15914, 15943), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((16207, 16253), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(False)', 'optional': '(False)'}), '(notify=False, optional=False)\n', (16223, 16253), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((16519, 16563), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(True)', 'optional': '(True)'}), '(notify=True, optional=True)\n', (16535, 16563), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((16804, 16827), 'traits.observation.expression.list_items', 'expression.list_items', ([], {}), '()\n', (16825, 16827), False, 'from traits.observation import expression\n'), ((16916, 16961), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (16932, 16961), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((16979, 17025), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(False)', 'optional': '(False)'}), '(notify=False, optional=False)\n', (16995, 17025), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((17268, 17291), 'traits.observation.expression.list_items', 'expression.list_items', ([], {}), '()\n', (17289, 17291), False, 'from traits.observation import expression\n'), ((17381, 17426), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (17397, 17426), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((17444, 17488), 'traits.observation._list_item_observer.ListItemObserver', 'ListItemObserver', ([], {'notify': '(True)', 'optional': '(True)'}), '(notify=True, optional=True)\n', (17460, 17488), False, 'from traits.observation._list_item_observer import ListItemObserver\n'), ((18214, 18258), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (18229, 18258), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((18520, 18565), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(False)', 'optional': '(False)'}), '(notify=False, optional=False)\n', (18535, 18565), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((18829, 18872), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(True)', 'optional': '(True)'}), '(notify=True, optional=True)\n', (18844, 18872), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((19112, 19134), 'traits.observation.expression.set_items', 'expression.set_items', ([], {}), '()\n', (19132, 19134), False, 'from traits.observation import expression\n'), ((19222, 19266), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (19237, 19266), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((19284, 19329), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(False)', 'optional': '(False)'}), '(notify=False, optional=False)\n', (19299, 19329), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((19571, 19593), 'traits.observation.expression.set_items', 'expression.set_items', ([], {}), '()\n', (19591, 19593), False, 'from traits.observation import expression\n'), ((19682, 19726), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(True)', 'optional': '(False)'}), '(notify=True, optional=False)\n', (19697, 19726), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((19744, 19787), 'traits.observation._set_item_observer.SetItemObserver', 'SetItemObserver', ([], {'notify': '(True)', 'optional': '(True)'}), '(notify=True, optional=True)\n', (19759, 19787), False, 'from traits.observation._set_item_observer import SetItemObserver\n'), ((7977, 8018), 'traits.observation._metadata_filter.MetadataFilter', 'MetadataFilter', ([], {'metadata_name': '"""butterfly"""'}), "(metadata_name='butterfly')\n", (7991, 8018), False, 'from traits.observation._metadata_filter import MetadataFilter\n'), ((8431, 8472), 'traits.observation._metadata_filter.MetadataFilter', 'MetadataFilter', ([], {'metadata_name': '"""butterfly"""'}), "(metadata_name='butterfly')\n", (8445, 8472), False, 'from traits.observation._metadata_filter import MetadataFilter\n'), ((8925, 8960), 'traits.observation._metadata_filter.MetadataFilter', 'MetadataFilter', ([], {'metadata_name': '"""bee"""'}), "(metadata_name='bee')\n", (8939, 8960), False, 'from traits.observation._metadata_filter import MetadataFilter\n'), ((9080, 9115), 'traits.observation._metadata_filter.MetadataFilter', 'MetadataFilter', ([], {'metadata_name': '"""ant"""'}), "(metadata_name='ant')\n", (9094, 9115), False, 'from traits.observation._metadata_filter import MetadataFilter\n'), ((9582, 9617), 'traits.observation._metadata_filter.MetadataFilter', 'MetadataFilter', ([], {'metadata_name': '"""bee"""'}), "(metadata_name='bee')\n", (9596, 9617), False, 'from traits.observation._metadata_filter import MetadataFilter\n'), ((9737, 9772), 'traits.observation._metadata_filter.MetadataFilter', 'MetadataFilter', ([], {'metadata_name': '"""ant"""'}), "(metadata_name='ant')\n", (9751, 9772), False, 'from traits.observation._metadata_filter import MetadataFilter\n')] |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import requests
import requests.adapters
from six.moves import http_client
import google.auth.transport.requests
from tests.transport import compliance
class TestRequestResponse(compliance.RequestResponseTests):
def make_request(self):
return google.auth.transport.requests.Request()
def test_timeout(self):
http = mock.Mock()
request = google.auth.transport.requests.Request(http)
request(url='http://example.com', method='GET', timeout=5)
assert http.request.call_args[1]['timeout'] == 5
class MockCredentials(object):
def __init__(self, token='token'):
self.token = token
def apply(self, headers):
headers['authorization'] = self.token
def before_request(self, request, method, url, headers):
self.apply(headers)
def refresh(self, request):
self.token += '1'
class MockAdapter(requests.adapters.BaseAdapter):
def __init__(self, responses, headers=None):
self.responses = responses
self.requests = []
self.headers = headers or {}
def send(self, request, **kwargs):
self.requests.append(request)
return self.responses.pop(0)
def make_response(status=http_client.OK, data=None):
response = requests.Response()
response.status_code = status
response._content = data
return response
class TestAuthorizedHttp(object):
TEST_URL = 'http://example.com/'
def test_constructor(self):
authed_session = google.auth.transport.requests.AuthorizedSession(
mock.sentinel.credentials)
assert authed_session.credentials == mock.sentinel.credentials
def test_request_no_refresh(self):
mock_credentials = mock.Mock(wraps=MockCredentials())
mock_response = make_response()
mock_adapter = MockAdapter([mock_response])
authed_session = google.auth.transport.requests.AuthorizedSession(
mock_credentials)
authed_session.mount(self.TEST_URL, mock_adapter)
response = authed_session.request('GET', self.TEST_URL)
assert response == mock_response
assert mock_credentials.before_request.called
assert not mock_credentials.refresh.called
assert len(mock_adapter.requests) == 1
assert mock_adapter.requests[0].url == self.TEST_URL
assert mock_adapter.requests[0].headers['authorization'] == 'token'
def test_request_refresh(self):
mock_credentials = mock.Mock(wraps=MockCredentials())
mock_final_response = make_response(status=http_client.OK)
# First request will 401, second request will succeed.
mock_adapter = MockAdapter([
make_response(status=http_client.UNAUTHORIZED),
mock_final_response])
authed_session = google.auth.transport.requests.AuthorizedSession(
mock_credentials)
authed_session.mount(self.TEST_URL, mock_adapter)
response = authed_session.request('GET', self.TEST_URL)
assert response == mock_final_response
assert mock_credentials.before_request.call_count == 2
assert mock_credentials.refresh.called
assert len(mock_adapter.requests) == 2
assert mock_adapter.requests[0].url == self.TEST_URL
assert mock_adapter.requests[0].headers['authorization'] == 'token'
assert mock_adapter.requests[1].url == self.TEST_URL
assert mock_adapter.requests[1].headers['authorization'] == 'token1'
| [
"requests.Response",
"mock.Mock"
] | [((1842, 1861), 'requests.Response', 'requests.Response', ([], {}), '()\n', (1859, 1861), False, 'import requests\n'), ((932, 943), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (941, 943), False, 'import mock\n')] |
import csv
import matplotlib.pyplot as plt
# pip install matplotlib-venn
from matplotlib_venn import venn3
with open('../../Data/Annotations/ethnicity.csv') as f:
reader = csv.DictReader(f)
entries = list(reader)
images_per_language = {'dutch': set(), 'german': set(), 'english': set()}
for entry in entries:
language = entry['language']
flickr_id = entry['flickr_id']
images_per_language[language].add(flickr_id)
diagram = venn3([images_per_language['dutch'],
images_per_language['german'],
images_per_language['english']],
['Dutch','German','English'])
for patch in diagram.patches:
patch.set_facecolor('white')
patch.set_linewidth(1)
patch.set_edgecolor('black')
patch.set_alpha(1.0)
for label in diagram.set_labels:
label.set_size(20)
for label in diagram.subset_labels:
label.set_size(20)
# Minor tweaks
label_12 = diagram.subset_labels[2]
x,y = label_12.get_position()
label_12.set_y(y+0.03)
label_12.set_x(x+0.02)
label_11 = diagram.subset_labels[4]
x,y = label_11.get_position()
#label_11.set_x(x-0.025)
label_11.set_y(y-0.07)
plt.savefig('../Output/ethnicity.pdf')
| [
"csv.DictReader",
"matplotlib_venn.venn3",
"matplotlib.pyplot.savefig"
] | [((449, 585), 'matplotlib_venn.venn3', 'venn3', (["[images_per_language['dutch'], images_per_language['german'],\n images_per_language['english']]", "['Dutch', 'German', 'English']"], {}), "([images_per_language['dutch'], images_per_language['german'],\n images_per_language['english']], ['Dutch', 'German', 'English'])\n", (454, 585), False, 'from matplotlib_venn import venn3\n'), ((1141, 1179), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Output/ethnicity.pdf"""'], {}), "('../Output/ethnicity.pdf')\n", (1152, 1179), True, 'import matplotlib.pyplot as plt\n'), ((178, 195), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (192, 195), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'loader2.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(500, 426)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("QWidget{\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(185, 38, 38, 255), stop:0.502778 rgba(118, 61, 93, 255), stop:0.827778 rgba(87, 46, 43, 255));\n"
"}\n"
"\n"
"\n"
".QLabel{\n"
"\n"
" background-color: transparent;\n"
"\n"
"}\n"
"\n"
".QPushButton{\n"
"\n"
" background-color: rgb(31, 0, 1);\n"
" border-radius: 1px;\n"
"\n"
" color: rgb(255, 255, 255);\n"
" width : 60px;\n"
" height: 20px;\n"
" border-radius: 10px;\n"
" border: none;\n"
" padding: 2px;\n"
" \n"
"}\n"
"")
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(36)
self.label.setFont(font)
self.label.setStyleSheet("")
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setMouseTracking(False)
self.lineEdit.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: transparent;")
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setEnabled(True)
self.tableWidget.setStyleSheet("background-color: rgb(255, 255, 255);")
self.tableWidget.setAutoScroll(False)
self.tableWidget.setCornerButtonEnabled(False)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(10)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(9, item)
self.tableWidget.horizontalHeader().setHighlightSections(False)
self.tableWidget.verticalHeader().setVisible(True)
self.tableWidget.verticalHeader().setHighlightSections(False)
self.verticalLayout.addWidget(self.tableWidget)
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setEnabled(True)
self.progressBar.setStyleSheet("QProgressBar{\n"
" background-color: rgb(98,114,164);\n"
" color: rgb(200, 200, 200);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
" \n"
"}\n"
"\n"
"QProgressBar::chunk{ \n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0.0170455 rgba(226, 0, 185, 255), stop:1 rgba(21, 25, 255, 255));\n"
" border-radius: 10px;\n"
" \n"
"}")
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setEnabled(True)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout.addWidget(self.pushButton_4)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout.addWidget(self.pushButton_5)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setEnabled(True)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.tableWidget, self.lineEdit)
MainWindow.setTabOrder(self.lineEdit, self.pushButton_3)
MainWindow.setTabOrder(self.pushButton_3, self.pushButton_2)
MainWindow.setTabOrder(self.pushButton_2, self.pushButton_4)
MainWindow.setTabOrder(self.pushButton_4, self.pushButton_5)
MainWindow.setTabOrder(self.pushButton_5, self.pushButton)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Scrapelancer"))
self.label_2.setText(_translate("MainWindow", "Search"))
self.tableWidget.setSortingEnabled(False)
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Username"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Tagline"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Country"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Ratings"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "EarningLabel"))
item = self.tableWidget.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "Reviews"))
item = self.tableWidget.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "Rate"))
item = self.tableWidget.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "description"))
item = self.tableWidget.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "skills"))
item = self.tableWidget.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "imgSrc"))
self.pushButton_3.setText(_translate("MainWindow", "Sort"))
self.pushButton_2.setText(_translate("MainWindow", "OK"))
self.pushButton_4.setText(_translate("MainWindow", "Pause"))
self.pushButton_5.setText(_translate("MainWindow", "Resume"))
self.pushButton.setText(_translate("MainWindow", "Load Data"))
| [
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QProgressBar",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QSpacerItem",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QTableWidgetItem",
"PyQt5.QtWidgets.QMenuBar"
] | [((548, 577), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (565, 577), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1266, 1307), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1287, 1307), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1402, 1425), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1423, 1425), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1515, 1551), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1531, 1551), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1603, 1616), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1614, 1616), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1834, 1932), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (1855, 1932), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2003, 2039), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2019, 2039), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2166, 2205), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2185, 2205), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2514, 2612), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (2535, 2612), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2751, 2793), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2773, 2793), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3170, 3198), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3196, 3198), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3272, 3300), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3298, 3300), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3374, 3402), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3400, 3402), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3476, 3504), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3502, 3504), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3578, 3606), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3604, 3606), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3680, 3708), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3706, 3708), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3782, 3810), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3808, 3810), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3884, 3912), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (3910, 3912), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3986, 4014), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (4012, 4014), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4088, 4116), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', ([], {}), '()\n', (4114, 4116), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4459, 4501), 'PyQt5.QtWidgets.QProgressBar', 'QtWidgets.QProgressBar', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4481, 4501), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5267, 5290), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (5288, 5290), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5383, 5424), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5404, 5424), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5562, 5660), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(40)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (5583, 5660), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5735, 5776), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5756, 5776), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5963, 6004), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5984, 6004), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6148, 6189), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6169, 6189), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6331, 6372), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6352, 6372), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6663, 6693), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (6681, 6693), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6871, 6903), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (6891, 6903), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7050, 7099), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (7087, 7099), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6727, 6754), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(500)', '(21)'], {}), '(0, 0, 500, 21)\n', (6739, 6754), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
# Script to add photometry from the BDNYC database into SIMPLE
from astrodbkit2.astrodb import Database, and_
from sqlalchemy import types # for BDNYC column overrides
verbose = True
# --------------------------------------------------------------------------------------
# Establish connection to databases
# Note that special parameters have to be passed to allow the BDNYC schema work properly
connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db'
bdnyc = Database(connection_string,
reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes',
'systems', 'telescopes', 'versions', 'instruments'],
primary_table='sources',
primary_table_key='id',
foreign_key='source_id',
column_type_overrides={'spectra.spectrum': types.TEXT(),
'spectra.local_spectrum': types.TEXT()})
# SIMPLE
connection_string = 'sqlite:///SIMPLE.db'
db = Database(connection_string)
# --------------------------------------------------------------------------------------
# Reload from directory, if needed
db.load_database('data', verbose=False)
# --------------------------------------------------------------------------------------
# For each source in SIMPLE, search in BDNYC and grab specified photometry
# Will be only grabbing WISE data for now
telescope = 'WISE'
band_list = ['WISE_W1', 'WISE_W2', 'WISE_W3', 'WISE_W4']
# Don't include sources that already have photometry in these bands
temp = db.query(db.Photometry.c.source).filter(db.Photometry.c.band.in_(band_list)).distinct().all()
sources_with_photometry = [s[0] for s in temp]
sources = db.query(db.Sources).\
filter(db.Sources.c.source.notin_(sources_with_photometry)).\
pandas()
# Get the BDNYC source_id values for our SIMPLE sources
source_dict = {}
for i, row in sources.iterrows():
bd_source = bdnyc.search_object(row['source'], output_table='sources',
table_names={'sources': ['designation', 'names']},
fmt='pandas')
if len(bd_source) != 1:
print(f"ERROR matching {row['source']}")
else:
source_dict[row['source']] = int(bd_source['id'].values[0])
# Grab only photometry in the band list that has version flags and publications
for source, bdnyc_id in source_dict.items():
print(f'{source} : {bdnyc_id}')
bd_data = bdnyc.query(bdnyc.photometry).\
filter(and_(bdnyc.photometry.c.source_id == bdnyc_id,
bdnyc.photometry.c.publication_shortname.isnot(None),
bdnyc.photometry.c.version <= 2,
bdnyc.photometry.c.band.in_(band_list))).\
pandas()
if len(bd_data) == 0:
continue
# Insert into the database
new_data = []
for i, row in bd_data.iterrows():
old_data = db.query(db.Photometry).filter(db.Photometry.c.source == source).pandas()
if len(old_data) > 0:
if (row['band'], row['publication_shortname']) in zip(old_data['band'].tolist(),
old_data['reference'].tolist()):
if verbose:
print(f"{source}: {row['band']} already in database for reference {row['publication_shortname']}")
new_data = None
continue
datum = {'source': source,
'band': row['band'],
'magnitude': row['magnitude'],
'magnitude_error': row['magnitude_unc'],
'telescope': 'WISE',
'reference': row['publication_shortname'],
'epoch': row['epoch'],
'comments': row['comments']}
new_data.append(datum)
if new_data is not None:
print(f"{source} : Ingesting new data: {new_data}")
db.Photometry.insert().execute(new_data)
# --------------------------------------------------------------------------------------
# Output changes to directory
db.save_database('data')
| [
"astrodbkit2.astrodb.Database",
"sqlalchemy.types.TEXT"
] | [((1021, 1048), 'astrodbkit2.astrodb.Database', 'Database', (['connection_string'], {}), '(connection_string)\n', (1029, 1048), False, 'from astrodbkit2.astrodb import Database, and_\n'), ((869, 881), 'sqlalchemy.types.TEXT', 'types.TEXT', ([], {}), '()\n', (879, 881), False, 'from sqlalchemy import types\n'), ((949, 961), 'sqlalchemy.types.TEXT', 'types.TEXT', ([], {}), '()\n', (959, 961), False, 'from sqlalchemy import types\n')] |
#!/usr/bin/python3
import scrapy
from yarl import URL
from datetime import datetime
import json
import newspaper.spiders.config as config
from newspaper.spiders.generate_links import generate_links as generate
from newspaper.spiders.makepdf import make_pdf
class IndianExpressSpider(scrapy.Spider):
name = "indian_express"
allowed_domains = [config.INDIAN_EXPRESS_ROOT]
tag = ""
def start_requests(self):
with open(config.JSON_FILE) as json_file:
terms = json.load(json_file)
terms = terms["search"]
for term in terms:
self.tag = term
urls = generate(self.name, term)
for url in urls:
yield scrapy.Request(url, self.parse)
def parse(self, response):
response_links = response.css("div.details")
for response_link in response_links:
anchor = response_link.css("h3 a::attr(href)").get()
name = response_link.css("h3 a::text").get()
article_name = name.replace(" ", "_")
indian_express_link = str(anchor)
try:
date_list = response_link.css("time::text").getall()
date_list.reverse()
date = str(date_list[0])
date = date[14:-11].replace(" ", "")
date = datetime.strptime(date, "%B%d,%Y").strftime("%Y-%b-%d")
print(date)
mpdf = make_pdf(
str(self.name),
str(indian_express_link),
str(date),
str(self.tag),
str(article_name),
)
mpdf.print()
except IndexError:
pass
| [
"newspaper.spiders.generate_links.generate_links",
"datetime.datetime.strptime",
"json.load",
"scrapy.Request"
] | [((494, 514), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (503, 514), False, 'import json\n'), ((637, 662), 'newspaper.spiders.generate_links.generate_links', 'generate', (['self.name', 'term'], {}), '(self.name, term)\n', (645, 662), True, 'from newspaper.spiders.generate_links import generate_links as generate\n'), ((722, 753), 'scrapy.Request', 'scrapy.Request', (['url', 'self.parse'], {}), '(url, self.parse)\n', (736, 753), False, 'import scrapy\n'), ((1341, 1375), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%B%d,%Y"""'], {}), "(date, '%B%d,%Y')\n", (1358, 1375), False, 'from datetime import datetime\n')] |
from django.shortcuts import render
from django.contrib.auth.hashers import make_password
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View, ListView, DetailView,TemplateView
from django.urls import reverse
from django.http import HttpResponseRedirect,HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404
from django.conf import settings
from users.models import UserProfile
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from pure_pagination.mixins import PaginationMixin
from django.db.models import Q
from django.contrib.auth.hashers import make_password
from users.forms import UserProfileForm, UserUpdateForm
from django.contrib.auth.models import Group, Permission
class UserView(LoginRequiredMixin, PaginationMixin, ListView):
"""
组功能
"""
model = UserProfile
template_name = 'user/user_list.html'
context_object_name = 'userlist'
paginate_by = 2
keyword = ''
login_url = '/login'
def get_queryset(self):
queryset = super(UserView, self).get_queryset()
self.keyword = self.request.GET.get("keyword", "").strip()
if self.keyword:
queryset = queryset.filter(Q(name_cn__icontains=self.keyword)|
Q(username__icontains=self.keyword))
return queryset
def get_context_data(self, **kwargs):
context = super(UserView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
_userForm = UserProfileForm(request.POST)
if _userForm.is_valid():
try:
_userForm.cleaned_data['password'] = make_password("<PASSWORD>")
_userForm.cleaned_data['is_active'] = True
data = _userForm.cleaned_data
self.model.objects.create(**data)
res = {'code': 0, 'result': '添加用户成功'}
except:
res = {'code': 1, 'errmsg': '添加用户失败'}
else:
# 获取自定义的表单错误的两种常用方式
# print(_userForm.errors)
print(_userForm.errors.as_json())
# print(_userForm.errors['phone'][0]) # 手机号码非法
# print(_userForm.errors['username'][0]) # 已存在一位使用该名字的用户
res = {'code': 1, 'errmsg': _userForm.errors.as_json()}
return JsonResponse(res, safe=True)
def delete(self, request):
data = QueryDict(request.body).dict()
print(data)
pk = data.get('id')
try:
if pk == 1:
res = {'code': 1, 'result': '不能删除管理员'}
else:
user = self.model.objects.filter(pk=pk)
user.delete()
res = {'code':0,'result':'删除用户成功'}
except:
res = {'code':1, 'result':'删除用户失败'}
return JsonResponse(res,safe=True)
class UserDetailView(LoginRequiredMixin, DetailView):
model = UserProfile
template_name = 'user/user_edit.html'
context_object_name = 'user'
def post(self, request, **kwargs):
print(
request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}>
print(kwargs) # {'pk': '7'}
print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168'
pk = kwargs.get("pk")
data = QueryDict(request.body).dict()
print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'}
_userForm = UserUpdateForm(request.POST)
if _userForm.is_valid():
try:
self.model.objects.filter(pk=pk).update(**data)
res = {'code': 0, "next_url": reverse("users:user_list"), 'result': '更新用户成功'}
except:
res = {'code': 1, "next_url": reverse("users:user_list"), 'errmsg': '更新用户失败'}
else:
# 获取所有的表单错误
print(_userForm.errors)
res = {'code': 1, "next_url": reverse("users:user_list"), 'errmsg': _userForm.errors}
return render(request, settings.JUMP_PAGE, res)
| [
"users.forms.UserUpdateForm",
"django.contrib.auth.hashers.make_password",
"django.db.models.Q",
"django.http.JsonResponse",
"django.urls.reverse",
"users.forms.UserProfileForm",
"django.shortcuts.render",
"django.http.QueryDict"
] | [((1593, 1622), 'users.forms.UserProfileForm', 'UserProfileForm', (['request.POST'], {}), '(request.POST)\n', (1608, 1622), False, 'from users.forms import UserProfileForm, UserUpdateForm\n'), ((2381, 2409), 'django.http.JsonResponse', 'JsonResponse', (['res'], {'safe': '(True)'}), '(res, safe=True)\n', (2393, 2409), False, 'from django.http import HttpResponseRedirect, HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404\n'), ((2865, 2893), 'django.http.JsonResponse', 'JsonResponse', (['res'], {'safe': '(True)'}), '(res, safe=True)\n', (2877, 2893), False, 'from django.http import HttpResponseRedirect, HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404\n'), ((3533, 3561), 'users.forms.UserUpdateForm', 'UserUpdateForm', (['request.POST'], {}), '(request.POST)\n', (3547, 3561), False, 'from users.forms import UserProfileForm, UserUpdateForm\n'), ((4072, 4112), 'django.shortcuts.render', 'render', (['request', 'settings.JUMP_PAGE', 'res'], {}), '(request, settings.JUMP_PAGE, res)\n', (4078, 4112), False, 'from django.shortcuts import render\n'), ((1726, 1753), 'django.contrib.auth.hashers.make_password', 'make_password', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (1739, 1753), False, 'from django.contrib.auth.hashers import make_password\n'), ((2456, 2479), 'django.http.QueryDict', 'QueryDict', (['request.body'], {}), '(request.body)\n', (2465, 2479), False, 'from django.http import HttpResponseRedirect, HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404\n'), ((3388, 3411), 'django.http.QueryDict', 'QueryDict', (['request.body'], {}), '(request.body)\n', (3397, 3411), False, 'from django.http import HttpResponseRedirect, HttpRequest, HttpResponse, JsonResponse, QueryDict, Http404\n'), ((4001, 4027), 'django.urls.reverse', 'reverse', (['"""users:user_list"""'], {}), "('users:user_list')\n", (4008, 4027), False, 'from django.urls import reverse\n'), ((1232, 1266), 'django.db.models.Q', 'Q', ([], {'name_cn__icontains': 'self.keyword'}), '(name_cn__icontains=self.keyword)\n', (1233, 1266), False, 'from django.db.models import Q\n'), ((1307, 1342), 'django.db.models.Q', 'Q', ([], {'username__icontains': 'self.keyword'}), '(username__icontains=self.keyword)\n', (1308, 1342), False, 'from django.db.models import Q\n'), ((3722, 3748), 'django.urls.reverse', 'reverse', (['"""users:user_list"""'], {}), "('users:user_list')\n", (3729, 3748), False, 'from django.urls import reverse\n'), ((3836, 3862), 'django.urls.reverse', 'reverse', (['"""users:user_list"""'], {}), "('users:user_list')\n", (3843, 3862), False, 'from django.urls import reverse\n')] |
from django.conf.urls import url
from blog import views
from django.conf import settings
from django.conf.urls.static import static
app_name="blog"
urlpatterns = [
url(r'(?P<id>\d+)/post_edit/$', views.post_edit, name="post_edit"),
url(r'(?P<reqid>\d+)/(?P<proid>\d+)/(?P<posid>\d+)/(?P<comid>\d+)/report/$', views.report, name="report"),
url(r'(?P<id>\d+)/post_delete/$', views.post_delete, name="post_delete"),
url(r'(?P<id>\d+)/cmnt_delete/$', views.cmnt_delete, name="cmnt_delete"),
url(r'(?P<id>\d+)/(?P<slug>[\w-]+)/$', views.post_detail, name="post_detail"),
url(r'post_create/$', views.post_create, name="post_create"),
url(r'edit_profile/$', views.edit_profile, name="edit_profile"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"django.conf.urls.static.static",
"django.conf.urls.url"
] | [((168, 234), 'django.conf.urls.url', 'url', (['"""(?P<id>\\\\d+)/post_edit/$"""', 'views.post_edit'], {'name': '"""post_edit"""'}), "('(?P<id>\\\\d+)/post_edit/$', views.post_edit, name='post_edit')\n", (171, 234), False, 'from django.conf.urls import url\n'), ((240, 352), 'django.conf.urls.url', 'url', (['"""(?P<reqid>\\\\d+)/(?P<proid>\\\\d+)/(?P<posid>\\\\d+)/(?P<comid>\\\\d+)/report/$"""', 'views.report'], {'name': '"""report"""'}), "('(?P<reqid>\\\\d+)/(?P<proid>\\\\d+)/(?P<posid>\\\\d+)/(?P<comid>\\\\d+)/report/$',\n views.report, name='report')\n", (243, 352), False, 'from django.conf.urls import url\n'), ((351, 423), 'django.conf.urls.url', 'url', (['"""(?P<id>\\\\d+)/post_delete/$"""', 'views.post_delete'], {'name': '"""post_delete"""'}), "('(?P<id>\\\\d+)/post_delete/$', views.post_delete, name='post_delete')\n", (354, 423), False, 'from django.conf.urls import url\n'), ((429, 501), 'django.conf.urls.url', 'url', (['"""(?P<id>\\\\d+)/cmnt_delete/$"""', 'views.cmnt_delete'], {'name': '"""cmnt_delete"""'}), "('(?P<id>\\\\d+)/cmnt_delete/$', views.cmnt_delete, name='cmnt_delete')\n", (432, 501), False, 'from django.conf.urls import url\n'), ((507, 585), 'django.conf.urls.url', 'url', (['"""(?P<id>\\\\d+)/(?P<slug>[\\\\w-]+)/$"""', 'views.post_detail'], {'name': '"""post_detail"""'}), "('(?P<id>\\\\d+)/(?P<slug>[\\\\w-]+)/$', views.post_detail, name='post_detail')\n", (510, 585), False, 'from django.conf.urls import url\n'), ((590, 649), 'django.conf.urls.url', 'url', (['"""post_create/$"""', 'views.post_create'], {'name': '"""post_create"""'}), "('post_create/$', views.post_create, name='post_create')\n", (593, 649), False, 'from django.conf.urls import url\n'), ((656, 718), 'django.conf.urls.url', 'url', (['"""edit_profile/$"""', 'views.edit_profile'], {'name': '"""edit_profile"""'}), "('edit_profile/$', views.edit_profile, name='edit_profile')\n", (659, 718), False, 'from django.conf.urls import url\n'), ((761, 822), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (767, 822), False, 'from django.conf.urls.static import static\n')] |
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from .fixture import RegiaoFactory, CidadeFactory
User = get_user_model()
class CidadeViewSetTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='bruce', email='<EMAIL>', password='<PASSWORD>'
)
self.anon_user = User.objects.create_user(
username='jane', email='<EMAIL>', password='<PASSWORD>'
)
self.unath_client = APIClient()
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')
self.regiao = RegiaoFactory.create(id=1)
def test_perform_create(self):
data = {
'nome': 'Gotham',
'regiao': self.regiao.id
}
response = self.unath_client.post(reverse('cidade-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(reverse('cidade-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['slug'], 'gotham')
def test_list(self):
CidadeFactory.create_batch(5, regiao=self.regiao)
response = self.unath_client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 5)
def test_retrieve(self):
cidade = CidadeFactory.create(id=10, regiao=self.regiao)
response = self.unath_client.get(reverse('cidade-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('cidade-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], cidade.nome)
def test_update(self):
cidade = CidadeFactory.create(id=21, regiao=self.regiao)
data = {'nome': 'Gotham City', 'regiao': self.regiao.id}
self.assertNotEqual(cidade.nome, data['nome'])
response = self.unath_client.put(reverse('cidade-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(reverse('cidade-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['slug'], 'gotham-city')
def test_partial_update(self):
cidade = CidadeFactory.create(id=22, regiao=self.regiao)
data = {'nome': 'Gotham City'}
self.assertNotEqual(cidade.nome, data['nome'])
response = self.unath_client.patch(reverse('cidade-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(reverse('cidade-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
def test_destroy(self):
CidadeFactory.create(id=15, regiao=self.regiao)
response = self.unath_client.get(reverse('cidade-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 1)
response = self.client.delete(reverse('cidade-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(reverse('cidade-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
| [
"rest_framework.authtoken.models.Token.objects.get_or_create",
"rest_framework.test.APIClient",
"django.contrib.auth.get_user_model",
"django.urls.reverse"
] | [((278, 294), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (292, 294), False, 'from django.contrib.auth import get_user_model\n'), ((641, 652), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (650, 652), False, 'from rest_framework.test import APITestCase, APIClient\n'), ((676, 687), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (685, 687), False, 'from rest_framework.test import APITestCase, APIClient\n'), ((707, 750), 'rest_framework.authtoken.models.Token.objects.get_or_create', 'Token.objects.get_or_create', ([], {'user': 'self.user'}), '(user=self.user)\n', (734, 750), False, 'from rest_framework.authtoken.models import Token\n'), ((1046, 1068), 'django.urls.reverse', 'reverse', (['"""cidade-list"""'], {}), "('cidade-list')\n", (1053, 1068), False, 'from django.urls import reverse\n'), ((1195, 1217), 'django.urls.reverse', 'reverse', (['"""cidade-list"""'], {}), "('cidade-list')\n", (1202, 1217), False, 'from django.urls import reverse\n'), ((1548, 1570), 'django.urls.reverse', 'reverse', (['"""cidade-list"""'], {}), "('cidade-list')\n", (1555, 1570), False, 'from django.urls import reverse\n'), ((1685, 1707), 'django.urls.reverse', 'reverse', (['"""cidade-list"""'], {}), "('cidade-list')\n", (1692, 1707), False, 'from django.urls import reverse\n'), ((1960, 1995), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[10]'}), "('cidade-detail', args=[10])\n", (1967, 1995), False, 'from django.urls import reverse\n'), ((2110, 2145), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[10]'}), "('cidade-detail', args=[10])\n", (2117, 2145), False, 'from django.urls import reverse\n'), ((2530, 2565), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[21]'}), "('cidade-detail', args=[21])\n", (2537, 2565), False, 'from django.urls import reverse\n'), ((2691, 2726), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[21]'}), "('cidade-detail', args=[21])\n", (2698, 2726), False, 'from django.urls import reverse\n'), ((3170, 3205), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[22]'}), "('cidade-detail', args=[22])\n", (3177, 3205), False, 'from django.urls import reverse\n'), ((3333, 3368), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[22]'}), "('cidade-detail', args=[22])\n", (3340, 3368), False, 'from django.urls import reverse\n'), ((3636, 3671), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[15]'}), "('cidade-detail', args=[15])\n", (3643, 3671), False, 'from django.urls import reverse\n'), ((3786, 3808), 'django.urls.reverse', 'reverse', (['"""cidade-list"""'], {}), "('cidade-list')\n", (3793, 3808), False, 'from django.urls import reverse\n'), ((3963, 3998), 'django.urls.reverse', 'reverse', (['"""cidade-detail"""'], {'args': '[15]'}), "('cidade-detail', args=[15])\n", (3970, 3998), False, 'from django.urls import reverse\n'), ((4111, 4133), 'django.urls.reverse', 'reverse', (['"""cidade-list"""'], {}), "('cidade-list')\n", (4118, 4133), False, 'from django.urls import reverse\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-05 14:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imageledger', '0010_auto_20161130_1814'),
]
operations = [
migrations.AlterModelOptions(
name='favorite',
options={'ordering': ['-updated_on']},
),
migrations.AlterModelOptions(
name='list',
options={'ordering': ['-updated_on']},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((296, 385), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""favorite"""', 'options': "{'ordering': ['-updated_on']}"}), "(name='favorite', options={'ordering': [\n '-updated_on']})\n", (324, 385), False, 'from django.db import migrations\n'), ((425, 510), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""list"""', 'options': "{'ordering': ['-updated_on']}"}), "(name='list', options={'ordering': ['-updated_on']}\n )\n", (453, 510), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import pandas as pd
import easygui as eg
from collections import defaultdict
from PyQt5 import QtCore as qtc
class file_entry():
def __init__(self,route_response_label,route_destiny_label):
self.route_response_label=route_response_label
self.route_destiny_label=route_destiny_label
self.dwc_terms=self.dict_loader() #Diccionario
def dict_loader(self): #Apertura del dict
dict_path=r"documents\dwc_terms\dwc_fieldName_dict.pkl"
with open(dict_path , 'rb') as dict_file:
return pickle.load(dict_file)
def file_opener(self): #Apertura del archivo, con el index correcto, se borran columnas sin datos
try:
file_path=self.route_response_label
if file_path.endswith('.xlsx') or file_path.endswith('.xls'):
data=pd.read_excel(file_path,header=0)
elif file_path.endswith('.csv'):
data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o ,
except:
print("No hemos podido encontrar la ruta del archivo Excel")
try:
data.dropna(axis=1, how='all',inplace=True)
except:
pass
return data
def darwinizer(self): #Encuentra match entre el df y el diccionario
dataframe=self.file_opener() #proveniente de la funcion file opener
dwc_terms_keys=self.dwc_terms.keys()
dataframe_columns=dataframe.columns.tolist()
darwinizer_list=[] #generar una lista que contenga tuplas verbatimFieldName,stdFieldName
#iterador para encontrar tuplas verbatimFieldName,stdFieldName
for verbatimFieldName in dataframe_columns:
for stdFieldName in dwc_terms_keys:
if verbatimFieldName in self.dwc_terms.get(stdFieldName):
darwinizer_list.append((verbatimFieldName,stdFieldName)) #tupla del match
return dataframe,darwinizer_list
def set_df_index(self,data):
columns_df=data.columns.tolist()
msg="Seleccione una columna para ser el indice de la base de datos\n Este debe ser un valor unico para cada especimen"
title="Seleccion"
indexo=eg.choicebox(msg,title,columns_df)
data=data.set_index(indexo, drop = True)
return data
def dataframe_label_transformer(self,data,listWidget,darwinizer_list):
column_dict=defaultdict()
selected_indexes=[x.row() for x in listWidget.selectedIndexes()]
if not selected_indexes:
column_dict=dict(darwinizer_list)
else:
i=0
while i<=len(darwinizer_list)-1:
if i not in selected_indexes:
column_dict[darwinizer_list[i][0]]=darwinizer_list[i][1] #Fix this method not proud of it
else: pass
i=i+1
data=data.rename(columns=column_dict)
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_column_dict_rename.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_column_dict_rename.pkl","wb")
pickle.dump(column_dict,f)
f.close()
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl","wb")
pickle.dump(data.columns.tolist(),f)
f.close()
listWidget.clear()
#return data
def dwc_label_checker(self,listWidget):
with open(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl", 'rb') as f:
df_columns = pickle.load(f)
not_recommended_labels=[]
for labels in df_columns:
if labels not in self.dwc_terms.keys():
not_recommended_labels.append(labels)
listWidget.addItems(df_columns)
for i in not_recommended_labels:
matching_items = listWidget.findItems(i, qtc.Qt.MatchExactly)
for item in matching_items:
item.setSelected(True)
return df_columns
def dwc_label_transformer(self,listWidget,df_columns):
selected_indexes=[x.row() for x in listWidget.selectedIndexes()]
df_selected_dwc_labels=[]
i=0
while i <= len(df_columns)-1:
if i not in selected_indexes:
df_selected_dwc_labels.append(df_columns[i])
i=i+1
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_selected_dwc_labels.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_selected_dwc_labels.pkl","wb")
pickle.dump(df_selected_dwc_labels,f)
f.close()
listWidget.clear()
def visitors_label_filler(self,listWidget):
with open(f"{self.route_destiny_label}\dwc_terms\df_columns_renamed.pkl", 'rb') as f:
df_columns = pickle.load(f)
listWidget.addItems(df_columns)
return df_columns
def visitors_label_transformer(self,listWidget,df_columns):
selected_indexes=[x.row() for x in listWidget.selectedIndexes()]
df_selected_visitors_labels=[]
i=0
while i <= len(df_columns)-1:
if i not in selected_indexes:
df_selected_visitors_labels.append(df_columns[i])
i=i+1
os.makedirs(os.path.dirname(f"{self.route_destiny_label}\dwc_terms\df_selected_visitors_labels.pkl"), exist_ok=True)
f = open(f"{self.route_destiny_label}\dwc_terms\df_selected_visitors_labels.pkl","wb")
pickle.dump(df_selected_visitors_labels,f)
f.close()
listWidget.clear()
def sensitive_data(self):
pass
#Visitors va a seguir quedando en script refresh data base
#leer columnas de lista de dwc_file, visitors_file, estas se deben leer para refresh data base igual, esto queda para refresh data base
#Comparar columnas dwcorear
#quiero guardar una lista con columnas de dwc del archivo y otras para visitor
# así ya esta predefinido cuando se abra nuevamente
#Esta función debe ir en refresh data base o en darwinizer,
# mejor no ya que al guarar listas de columnas solo debo hacer esto una vez
#Borar dynamic links user info y todos los dwc_terms
#Herramienta para normalizar horas
#Georreferenciación
#Guardar df como csv y ese pasarlo para refreshdatabase | [
"pickle.dump",
"easygui.choicebox",
"pandas.read_csv",
"os.path.dirname",
"collections.defaultdict",
"pandas.read_excel",
"pickle.load"
] | [((2245, 2281), 'easygui.choicebox', 'eg.choicebox', (['msg', 'title', 'columns_df'], {}), '(msg, title, columns_df)\n', (2257, 2281), True, 'import easygui as eg\n'), ((2446, 2459), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (2457, 2459), False, 'from collections import defaultdict\n'), ((3165, 3192), 'pickle.dump', 'pickle.dump', (['column_dict', 'f'], {}), '(column_dict, f)\n', (3176, 3192), False, 'import pickle\n'), ((4697, 4735), 'pickle.dump', 'pickle.dump', (['df_selected_dwc_labels', 'f'], {}), '(df_selected_dwc_labels, f)\n', (4708, 4735), False, 'import pickle\n'), ((5614, 5657), 'pickle.dump', 'pickle.dump', (['df_selected_visitors_labels', 'f'], {}), '(df_selected_visitors_labels, f)\n', (5625, 5657), False, 'import pickle\n'), ((611, 633), 'pickle.load', 'pickle.load', (['dict_file'], {}), '(dict_file)\n', (622, 633), False, 'import pickle\n'), ((2969, 3058), 'os.path.dirname', 'os.path.dirname', (['f"""{self.route_destiny_label}\\\\dwc_terms\\\\df_column_dict_rename.pkl"""'], {}), "(\n f'{self.route_destiny_label}\\\\dwc_terms\\\\df_column_dict_rename.pkl')\n", (2984, 3058), False, 'import os\n'), ((3230, 3316), 'os.path.dirname', 'os.path.dirname', (['f"""{self.route_destiny_label}\\\\dwc_terms\\\\df_columns_renamed.pkl"""'], {}), "(\n f'{self.route_destiny_label}\\\\dwc_terms\\\\df_columns_renamed.pkl')\n", (3245, 3316), False, 'import os\n'), ((3687, 3701), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3698, 3701), False, 'import pickle\n'), ((4499, 4589), 'os.path.dirname', 'os.path.dirname', (['f"""{self.route_destiny_label}\\\\dwc_terms\\\\df_selected_dwc_labels.pkl"""'], {}), "(\n f'{self.route_destiny_label}\\\\dwc_terms\\\\df_selected_dwc_labels.pkl')\n", (4514, 4589), False, 'import os\n'), ((4952, 4966), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4963, 4966), False, 'import pickle\n'), ((5406, 5501), 'os.path.dirname', 'os.path.dirname', (['f"""{self.route_destiny_label}\\\\dwc_terms\\\\df_selected_visitors_labels.pkl"""'], {}), "(\n f'{self.route_destiny_label}\\\\dwc_terms\\\\df_selected_visitors_labels.pkl')\n", (5421, 5501), False, 'import os\n'), ((893, 927), 'pandas.read_excel', 'pd.read_excel', (['file_path'], {'header': '(0)'}), '(file_path, header=0)\n', (906, 927), True, 'import pandas as pd\n'), ((993, 1034), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'header': '(0)', 'sep': '""";"""'}), "(file_path, header=0, sep=';')\n", (1004, 1034), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Scan payloads using Falcon Sandbox
"""
import requests
from time import sleep
from json import JSONDecodeError
from configparser import ConfigParser
from typing import Dict, Optional, Union, Tuple, List
from stoq import helpers
from stoq.plugins import WorkerPlugin
from stoq.exceptions import StoqPluginException
from stoq import Payload, RequestMeta, WorkerResponse
class FalconSandboxPlugin(WorkerPlugin):
def __init__(self, config: ConfigParser, plugin_opts: Optional[Dict]) -> None:
super().__init__(config, plugin_opts)
self.sandbox_url = None
self.apikey = None
self.delay = 30
self.max_attempts = 10
self.useragent = 'Falcon Sandbox'
# Available environments ID:
# 300: 'Linux (Ubuntu 16.04, 64 bit)',
# 200: 'Android Static Analysis’,
# 160: 'Windows 10 64 bit’,
# 110: 'Windows 7 64 bit’,
# 100: ‘Windows 7 32 bit’
self.environment_id = 160
self.wait_for_results = True
if plugin_opts and 'sandbox_url' in plugin_opts:
self.sandbox_url = plugin_opts['sandbox_url']
elif config.has_option('options', 'sandbox_url'):
self.sandbox_url = config.get('options', 'sandbox_url')
if plugin_opts and 'apikey' in plugin_opts:
self.apikey = plugin_opts['apikey']
elif config.has_option('options', 'apikey'):
self.apikey = config.get('options', 'apikey')
if plugin_opts and 'delay' in plugin_opts:
self.delay = int(plugin_opts['delay'])
elif config.has_option('options', 'delay'):
self.delay = int(config.get('options', 'delay'))
if plugin_opts and 'max_attempts' in plugin_opts:
self.max_attempts = int(plugin_opts['max_attempts'])
elif config.has_option('options', 'max_attempts'):
self.max_attempts = config.getint('options', 'max_attempts')
if plugin_opts and 'useragent' in plugin_opts:
self.useragent = plugin_opts['useragent']
elif config.has_option('options', 'useragent'):
self.useragent = config.get('options', 'useragent')
if plugin_opts and 'environment_id' in plugin_opts:
self.environment_id = int(plugin_opts['environment_id'])
elif config.has_option('options', 'environment_id'):
self.environment_id = config.getint('options', 'environment_id')
if plugin_opts and 'wait_for_results' in plugin_opts:
self.wait_for_results = plugin_opts['wait_for_results']
elif config.has_option('options', 'wait_for_results'):
self.wait_for_results = config.getboolean('options', 'wait_for_results')
if not self.sandbox_url:
raise StoqPluginException("Falcon Sandbox URL was not provided")
if not self.apikey:
raise StoqPluginException("Falcon Sandbox API Key was not provided")
def scan(self, payload: Payload, request_meta: RequestMeta) -> WorkerResponse:
"""
Scan payloads using Falcon Sandbox
"""
errors = None
url = f'{self.sandbox_url}/submit/file'
headers = {'api-key': self.apikey, 'user-agent': self.useragent}
filename = payload.payload_meta.extra_data.get(
'filename', helpers.get_sha1(payload.content)
)
if isinstance(filename, bytes):
filename = filename.decode()
files = {'file': (filename, payload.content)}
data = {'environment_id': self.environment_id}
response = requests.post(url, data=data, files=files, headers=headers)
response.raise_for_status()
results = response.json()
if self.wait_for_results:
results, errors = self._parse_results(results['job_id'])
return WorkerResponse(results, errors=errors)
def _parse_results(
self, job_id: str
) -> Tuple[Union[Dict, None], Union[List[str], None]]:
"""
Wait for a scan to complete and then parse the results
"""
count = 0
err = None
while count < self.max_attempts:
sleep(self.delay)
try:
url = f'{self.sandbox_url}/report/{job_id}/summary'
headers = {'api-key': self.apikey, 'user-agent': self.useragent}
response = requests.get(url, headers=headers)
response.raise_for_status()
result = response.json()
if result['state'] not in ('IN_QUEUE', 'IN_PROGRESS'):
return result, None
except (JSONDecodeError, KeyError) as err:
err = str(err)
finally:
count += 1
if count >= self.max_attempts:
err = f'Scan did not complete in time -- attempts: {count}'
return None, [err]
| [
"stoq.WorkerResponse",
"stoq.helpers.get_sha1",
"time.sleep",
"requests.get",
"stoq.exceptions.StoqPluginException",
"requests.post"
] | [((4238, 4297), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'files': 'files', 'headers': 'headers'}), '(url, data=data, files=files, headers=headers)\n', (4251, 4297), False, 'import requests\n'), ((4486, 4524), 'stoq.WorkerResponse', 'WorkerResponse', (['results'], {'errors': 'errors'}), '(results, errors=errors)\n', (4500, 4524), False, 'from stoq import Payload, RequestMeta, WorkerResponse\n'), ((3440, 3498), 'stoq.exceptions.StoqPluginException', 'StoqPluginException', (['"""Falcon Sandbox URL was not provided"""'], {}), "('Falcon Sandbox URL was not provided')\n", (3459, 3498), False, 'from stoq.exceptions import StoqPluginException\n'), ((3546, 3608), 'stoq.exceptions.StoqPluginException', 'StoqPluginException', (['"""Falcon Sandbox API Key was not provided"""'], {}), "('Falcon Sandbox API Key was not provided')\n", (3565, 3608), False, 'from stoq.exceptions import StoqPluginException\n'), ((3985, 4018), 'stoq.helpers.get_sha1', 'helpers.get_sha1', (['payload.content'], {}), '(payload.content)\n', (4001, 4018), False, 'from stoq import helpers\n'), ((4813, 4830), 'time.sleep', 'sleep', (['self.delay'], {}), '(self.delay)\n', (4818, 4830), False, 'from time import sleep\n'), ((5024, 5058), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (5036, 5058), False, 'import requests\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.