code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""SentencePiece Tokenization for Wiki Dataset
Example:
* python scripts/wiki_sp_tokenize_json.py --word --unigram
"""
import gzip
import json
import subprocess
from pathlib import Path
import sentencepiece as spm
import joblib
import numpy as np
import click
from tqdm import tqdm
from opencc import OpenCC
from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST
DATAPATH = "/mnt/Intel/zhwiki.json.gz"
TMPPATH = "/mnt/Intel/tmp_texts.txt"
TMPPATH_WORD = "/mnt/Intel/tmp_words.txt"
MODEL_PREFIX = "data/{algorithm}_{seg_word}_model"
CC = OpenCC('t2s')
VOC_SIZE = 7500
PAD = 1
UNK = 0
def json_to_txt():
with gzip.open(DATAPATH) as f:
with open(TMPPATH, "w") as fw:
for _, line in tqdm(enumerate(f.readlines())):
article = json.loads(line)
if "年表" in article["title"] or "列表" in article["title"]:
continue
for title, section in zip(article["section_titles"], article["section_texts"]):
title = CC.convert(title)
if title in SECTION_BLACKLIST:
continue
for paragraph in [x for x in section.split("\n") if len(x) > 50]:
paragraph = clean_text(paragraph)
if len(paragraph) < 200 or filter_texts(paragraph):
continue
for sentence in [x for x in paragraph.split("。") if len(x) > 10]:
fw.write(sentence + "。\n")
def fit_model(seg_word=True, algorithm="bpe"):
if not Path(TMPPATH).exists():
json_to_txt()
if seg_word:
print("Performing word segmentation...")
res = subprocess.run([
"thulac", "-model_dir", "/mnt/SSD_Data/openai_nlp/THULAC/models/",
"-seg_only", "-input", TMPPATH, "-output", TMPPATH_WORD
], stdout=subprocess.PIPE)
print(res)
# Train Model
print("Training model...")
spm.SentencePieceTrainer.Train(
'--input={} --model_prefix={} --vocab_size={} '
'--input_sentence_size=20000000 '
'--character_coverage=0.995 --model_type={algorithm}'.format(
TMPPATH_WORD if seg_word else TMPPATH,
MODEL_PREFIX.format(algorithm=algorithm, seg_word=seg_word),
VOC_SIZE, algorithm="unigram"
)
)
def tokenize(seg_word=True, algorithm="bpe"):
print("Tokenizing...")
sp = spm.SentencePieceProcessor()
sp.Load(MODEL_PREFIX.format(
algorithm=algorithm, seg_word=seg_word) + ".model")
tokens = []
with open(TMPPATH_WORD if seg_word else TMPPATH) as f:
for _, sentence in tqdm(enumerate(f.readlines())):
tokens.append(
np.array(sp.EncodeAsIds(sentence))
)
joblib.dump(np.array(tokens), f"data/tokens_{algorithm}_{seg_word}.pkl")
@click.command()
@click.option("--word", is_flag=True)
@click.option("--bpe/--unigram", default=True)
def main(word, bpe):
seg_word = True if word else False
algorithm = "bpe" if bpe else "unigram"
# fit_model(seg_word, algorithm)
tokenize(seg_word, algorithm)
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
main()
| [
"subprocess.run",
"gzip.open",
"sentencepiece.SentencePieceProcessor",
"json.loads",
"wiki_tokenize_json.clean_text",
"click.option",
"click.command",
"opencc.OpenCC",
"pathlib.Path",
"numpy.array",
"wiki_tokenize_json.filter_texts"
] | [((564, 577), 'opencc.OpenCC', 'OpenCC', (['"""t2s"""'], {}), "('t2s')\n", (570, 577), False, 'from opencc import OpenCC\n'), ((2890, 2905), 'click.command', 'click.command', ([], {}), '()\n', (2903, 2905), False, 'import click\n'), ((2907, 2943), 'click.option', 'click.option', (['"""--word"""'], {'is_flag': '(True)'}), "('--word', is_flag=True)\n", (2919, 2943), False, 'import click\n'), ((2945, 2990), 'click.option', 'click.option', (['"""--bpe/--unigram"""'], {'default': '(True)'}), "('--bpe/--unigram', default=True)\n", (2957, 2990), False, 'import click\n'), ((2462, 2490), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (2488, 2490), True, 'import sentencepiece as spm\n'), ((640, 659), 'gzip.open', 'gzip.open', (['DATAPATH'], {}), '(DATAPATH)\n', (649, 659), False, 'import gzip\n'), ((1724, 1896), 'subprocess.run', 'subprocess.run', (["['thulac', '-model_dir', '/mnt/SSD_Data/openai_nlp/THULAC/models/',\n '-seg_only', '-input', TMPPATH, '-output', TMPPATH_WORD]"], {'stdout': 'subprocess.PIPE'}), "(['thulac', '-model_dir',\n '/mnt/SSD_Data/openai_nlp/THULAC/models/', '-seg_only', '-input',\n TMPPATH, '-output', TMPPATH_WORD], stdout=subprocess.PIPE)\n", (1738, 1896), False, 'import subprocess\n'), ((2826, 2842), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (2834, 2842), True, 'import numpy as np\n'), ((790, 806), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (800, 806), False, 'import json\n'), ((1597, 1610), 'pathlib.Path', 'Path', (['TMPPATH'], {}), '(TMPPATH)\n', (1601, 1610), False, 'from pathlib import Path\n'), ((1257, 1278), 'wiki_tokenize_json.clean_text', 'clean_text', (['paragraph'], {}), '(paragraph)\n', (1267, 1278), False, 'from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST\n'), ((1330, 1353), 'wiki_tokenize_json.filter_texts', 'filter_texts', (['paragraph'], {}), '(paragraph)\n', (1342, 1353), False, 'from wiki_tokenize_json import clean_text, filter_texts, SECTION_BLACKLIST\n')] |
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('server.urls'), name='server'),
]
| [
"django.conf.urls.include",
"django.conf.urls.url"
] | [((143, 174), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (146, 174), False, 'from django.conf.urls import url, include\n'), ((191, 213), 'django.conf.urls.include', 'include', (['"""server.urls"""'], {}), "('server.urls')\n", (198, 213), False, 'from django.conf.urls import url, include\n')] |
# Adapted for numpy/ma/cdms2 by convertcdms.py
# Import the modules needed for the tuturial
import vcs, cdms2 as cdms, cdutil, time, os, sys
# Open data file:
filepath = os.path.join(vcs.sample_data, 'clt.nc')
cdmsfile = cdms.open( filepath )
# Extract a 3 dimensional data set and get a subset of the time dimension
data = cdmsfile('clt', longitude=(-180, 180), latitude = (-90., 90.))
# Initial VCS:
v = vcs.init()
# Assign the variable "t_asd" to the persistent 'ASD' template.
t_asd = v.gettemplate( 'ASD' )
# Create a new template from the existing 'ASD' template
t2_asd = v.createtemplate( 'new', 'ASD' )
# Plot the data using the above 'ASD' template.
v.plot( data, t_asd )
# Remove picture segments from the page.
t_asd.list( )
t_asd.xlabel2.priority = 0
t_asd.xtic2.priority = 0
t_asd.xtic2.priority = 0
t_asd.legend.priority=0
# save current 'Mean' placemant for x and y coordinates
xmean_current = t_asd.mean.x
ymean_current = t_asd.mean.y
# now change the placement
t_asd.mean.x=0.5 # move the "Mean" text to x-axis center
t_asd.mean.y=0.5 # move the "Mean" text to y-axis center
t_asd.data.priority = 0 # remove the data so the "Mean" text is visable.
v.update()
#############################################################################
# Place the colorbar legend vertically and to the right side #############################################################################
t_asd.data.priority = 1
t_asd.legend.priority = 1
t_asd.legend.list() # list the legend members
v.mode=0 # turn the automatic update off
# move 'Mean' text back where it was
t_asd.mean.x = xmean_current
t_asd.mean.y = ymean_current
# move the right side of a plot to the left to make space for the legend
# first move the inner plot
t_asd.data.x2 = 0.87
# then move the sorrounding box - the right y-axis
t_asd.box1.x2 = 0.87
# set the top x-axis (secind y axis) to be blank
t_asd.xlabel2.priority = 0
t_asd.xtic2.priority = 0
# set the right y-axis (second y axis) to be blank (priority=0)
t_asd.ylabel2.priority = 0
t_asd.ytic2.priority = 0
# move the colorbar legend position, to be vertial and to the right
t_asd.legend.x1=0.9
t_asd.legend.y1=0.82
t_asd.legend.x2=0.95
t_asd.legend.y2=0.3
# clear the canvas and plot the template again
v.clear()
v.plot( data, t_asd )
| [
"cdms2.open",
"os.path.join",
"vcs.init"
] | [((171, 210), 'os.path.join', 'os.path.join', (['vcs.sample_data', '"""clt.nc"""'], {}), "(vcs.sample_data, 'clt.nc')\n", (183, 210), False, 'import vcs, cdms2 as cdms, cdutil, time, os, sys\n'), ((222, 241), 'cdms2.open', 'cdms.open', (['filepath'], {}), '(filepath)\n', (231, 241), True, 'import vcs, cdms2 as cdms, cdutil, time, os, sys\n'), ((409, 419), 'vcs.init', 'vcs.init', ([], {}), '()\n', (417, 419), False, 'import vcs, cdms2 as cdms, cdutil, time, os, sys\n')] |
"""Install or upgrade a bcbio-nextgen installation.
"""
from __future__ import print_function
import os
import subprocess
import sys
import yaml
from bcbiovm.docker import manage, mounts
DEFAULT_IMAGE = "chapmanb/bcbio-nextgen-devel"
def full(args, dockerconf):
"""Full installaction of docker image and data.
"""
updates = []
args = add_install_defaults(args)
if args.wrapper:
updates.append("wrapper scripts")
upgrade_bcbio_vm()
dmounts = mounts.prepare_system(args.datadir, dockerconf["biodata_dir"])
if args.install_tools:
updates.append("bcbio-nextgen code and third party tools")
pull(args, dockerconf)
_check_docker_image(args)
# Ensure external galaxy configuration in sync when doing tool upgrade
manage.run_bcbio_cmd(args.image, dmounts, ["upgrade"])
if args.install_data:
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
sys.exit(1)
elif len(args.aligners) == 0:
print("Data not installed, no aligners provided with `--aligners` flag")
sys.exit(1)
else:
updates.append("biological data")
_check_docker_image(args)
manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args))
_save_install_defaults(args)
if updates:
print("\nbcbio-nextgen-vm updated with latest %s" % " and ".join(updates))
else:
print("\nNo update targets specified, need '--wrapper', '--tools' or '--data'\n"
"See 'bcbio_vm.py upgrade -h' for more details.")
def _get_cl(args):
clargs = ["upgrade"]
if args.install_data:
clargs.append("--data")
for g in args.genomes:
clargs.extend(["--genomes", g])
for a in args.aligners:
clargs.extend(["--aligners", a])
return clargs
def upgrade_bcbio_vm():
"""Upgrade bcbio-nextgen-vm wrapper code.
"""
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if not os.path.exists(conda_bin):
print("Cannot update bcbio-nextgen-vm; not installed with conda")
else:
subprocess.check_call([conda_bin, "install", "--yes",
"-c", "https://conda.binstar.org/bcbio",
"bcbio-nextgen-vm"])
def pull(args, dockerconf):
"""Pull down latest docker image, using export uploaded to S3 bucket.
Long term plan is to use the docker index server but upload size is
currently smaller with an exported gzipped image.
"""
print("Retrieving bcbio-nextgen docker image with code and tools")
# subprocess.check_call(["docker", "pull", image])
assert args.image, "Unspecified image name for docker import"
subprocess.check_call(["docker", "import", dockerconf["image_url"], args.image])
def _save_install_defaults(args):
"""Save arguments passed to installation to be used on subsequent upgrades.
Avoids needing to re-include genomes and aligners on command line.
"""
install_config = _get_config_file(args)
if install_config is None:
return
if os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
cur_config = yaml.load(in_handle)
else:
cur_config = {}
for attr in ["genomes", "aligners"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(str(x))
if args.image != DEFAULT_IMAGE and args.image:
cur_config["image"] = args.image
with open(install_config, "w") as out_handle:
yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def _get_install_defaults(args):
install_config = _get_config_file(args)
if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
return yaml.load(in_handle)
return {}
def _add_docker_defaults(args, default_args):
if not hasattr(args, "image") or not args.image:
if default_args.get("image") and not default_args.get("images") == "None":
args.image = default_args["image"]
else:
args.image = DEFAULT_IMAGE
return args
def add_install_defaults(args):
"""Add previously saved installation defaults to command line arguments.
"""
default_args = _get_install_defaults(args)
for attr in ["genomes", "aligners"]:
for x in default_args.get(attr, []):
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _add_docker_defaults(args, default_args)
return args
def _check_docker_image(args):
"""Ensure docker image exists.
"""
for image in subprocess.check_output(["docker", "images"]).split("\n"):
parts = image.split()
if len(parts) > 1 and parts[0] == args.image:
return
raise ValueError("Could not find docker image %s in local repository" % args.image)
def docker_image_arg(args):
if not hasattr(args, "image") or not args.image:
default_args = _get_install_defaults(args)
args = _add_docker_defaults(args, default_args)
_check_docker_image(args)
return args
def _get_config_file(args):
config_dir = os.path.join(args.datadir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, "install-params.yaml")
| [
"yaml.load",
"os.makedirs",
"os.path.getsize",
"bcbiovm.docker.manage.run_bcbio_cmd",
"yaml.dump",
"os.path.exists",
"os.path.realpath",
"subprocess.check_output",
"bcbiovm.docker.mounts.prepare_system",
"sys.exit",
"os.path.join",
"subprocess.check_call"
] | [((486, 548), 'bcbiovm.docker.mounts.prepare_system', 'mounts.prepare_system', (['args.datadir', "dockerconf['biodata_dir']"], {}), "(args.datadir, dockerconf['biodata_dir'])\n", (507, 548), False, 'from bcbiovm.docker import manage, mounts\n'), ((2802, 2887), 'subprocess.check_call', 'subprocess.check_call', (["['docker', 'import', dockerconf['image_url'], args.image]"], {}), "(['docker', 'import', dockerconf['image_url'], args.image]\n )\n", (2823, 2887), False, 'import subprocess\n'), ((5526, 5562), 'os.path.join', 'os.path.join', (['args.datadir', '"""config"""'], {}), "(args.datadir, 'config')\n", (5538, 5562), False, 'import os\n'), ((5645, 5692), 'os.path.join', 'os.path.join', (['config_dir', '"""install-params.yaml"""'], {}), "(config_dir, 'install-params.yaml')\n", (5657, 5692), False, 'import os\n'), ((795, 849), 'bcbiovm.docker.manage.run_bcbio_cmd', 'manage.run_bcbio_cmd', (['args.image', 'dmounts', "['upgrade']"], {}), "(args.image, dmounts, ['upgrade'])\n", (815, 849), False, 'from bcbiovm.docker import manage, mounts\n'), ((2071, 2096), 'os.path.exists', 'os.path.exists', (['conda_bin'], {}), '(conda_bin)\n', (2085, 2096), False, 'import os\n'), ((2190, 2309), 'subprocess.check_call', 'subprocess.check_call', (["[conda_bin, 'install', '--yes', '-c', 'https://conda.binstar.org/bcbio',\n 'bcbio-nextgen-vm']"], {}), "([conda_bin, 'install', '--yes', '-c',\n 'https://conda.binstar.org/bcbio', 'bcbio-nextgen-vm'])\n", (2211, 2309), False, 'import subprocess\n'), ((3174, 3204), 'os.path.exists', 'os.path.exists', (['install_config'], {}), '(install_config)\n', (3188, 3204), False, 'import os\n'), ((3764, 3849), 'yaml.dump', 'yaml.dump', (['cur_config', 'out_handle'], {'default_flow_style': '(False)', 'allow_unicode': '(False)'}), '(cur_config, out_handle, default_flow_style=False, allow_unicode=False\n )\n', (3773, 3849), False, 'import yaml\n'), ((3949, 3979), 'os.path.exists', 'os.path.exists', (['install_config'], {}), '(install_config)\n', (3963, 3979), False, 'import os\n'), ((5574, 5600), 'os.path.exists', 'os.path.exists', (['config_dir'], {}), '(config_dir)\n', (5588, 5600), False, 'import os\n'), ((5610, 5633), 'os.makedirs', 'os.makedirs', (['config_dir'], {}), '(config_dir)\n', (5621, 5633), False, 'import os\n'), ((1006, 1017), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1014, 1017), False, 'import sys\n'), ((2016, 2048), 'os.path.realpath', 'os.path.realpath', (['sys.executable'], {}), '(sys.executable)\n', (2032, 2048), False, 'import os\n'), ((3209, 3240), 'os.path.getsize', 'os.path.getsize', (['install_config'], {}), '(install_config)\n', (3224, 3240), False, 'import os\n'), ((3319, 3339), 'yaml.load', 'yaml.load', (['in_handle'], {}), '(in_handle)\n', (3328, 3339), False, 'import yaml\n'), ((3984, 4015), 'os.path.getsize', 'os.path.getsize', (['install_config'], {}), '(install_config)\n', (3999, 4015), False, 'import os\n'), ((4088, 4108), 'yaml.load', 'yaml.load', (['in_handle'], {}), '(in_handle)\n', (4097, 4108), False, 'import yaml\n'), ((4995, 5040), 'subprocess.check_output', 'subprocess.check_output', (["['docker', 'images']"], {}), "(['docker', 'images'])\n", (5018, 5040), False, 'import subprocess\n'), ((1153, 1164), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1161, 1164), False, 'import sys\n')] |
import enum
from pandas.io.pytables import DuplicateWarning
from py2neo import Node, Relationship, Graph, NodeMatcher
import pandas as pd
from operator import itemgetter
from typing import List, Dict
import random
graph = Graph("http://localhost:7474", username="neo4j", password='<PASSWORD>')
main_ingr = set(['apple', 'banana', 'bell pepper', 'broccoli', 'cabbage', 'carrot', 'cheese', 'coconut', 'cucumber', 'egg', 'fish', 'grapes', 'lemon', 'mango', 'milk', 'mushroom', 'oranges', 'peach', 'pear', 'pineapple', 'potatoes', 'pumpkin', 'seafood', 'shrimp', 'strawberry', 'tomatoes', 'watermelon', 'winter melon', 'garlic', 'corn', 'eggplant', 'lettuce', 'onion', 'scallion', 'chicken', 'beef', 'lamb', 'pork', 'sauce', 'duck', 'meatball', 'wine', 'berries', 'crabmeat', 'kiwi', 'bitter melon', 'pepper', 'peas', 'ginger', 'shells', 'chili', 'ham', 'sausage', 'butter', 'bread', 'rice', 'vanilla'])
def getRecipes(
ingr: List[str],
topk: int = 10,
dietaryList: List[str] = None,
cuisine: str = None,
skip: int = 0) -> List[Dict]:
n = len(ingr)
if (n == 0): return [{}]
ingr_type = {}
for it in ingr:
it = it.lower()
if it in main_ingr:
ingr_type[it] = (it.upper(), 'Has_Main_Ingredient', 'main_ingredient')
print(it, ' is main ingredient')
else:
ingr_type[it] = (it.lower(), 'Has_Ingredient', 'ingredient')
cand = {name: 0 for name in ingr}
query_indegree = "WITH "
for i in range(n):
query_indegree += "size((:recipe)-[:{2}]->(:{3}{{Name:'{0}'}})) as a{1},".format(ingr_type[ingr[i]][0], str(i), ingr_type[ingr[i]][1], ingr_type[ingr[i]][2])
query_indegree = query_indegree[:-1] + " RETURN "
for i in range(n):
query_indegree += "a{0},".format(str(i))
query_indegree = query_indegree[:-1]
res = graph.run(query_indegree)
indegrees = pd.DataFrame(res)
for i, name in enumerate(ingr):
cand[name] = indegrees.iloc[[0],[i]].values[0][0]
sorted_ingr = sorted(cand, key=lambda x : x[1])
query = ''
for i in range(n):
query += "OPTIONAL MATCH ((rep:recipe)-[r{0}:{3}]->(i{1}:{4}{{Name: '{2}'}})) ".format(str(i), str(i), ingr_type[sorted_ingr[i]][0], ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2])
if dietaryList is not None:
for dietary in dietaryList:
if dietary == 'halal':
query += "MATCH (rep) WHERE rep.halal is null "
elif dietary == 'vegetarian':
vegan = 'vegan'
query += "MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (rep)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) ".format(vegan, vegan)
elif dietary == 'fruitarian':
query += "MATCH (rep)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) "
elif dietary == 'eggetarian':
query += "MATCH (rep)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) "
if cuisine is not None:
query += "MATCH (rep)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) ".format(cuisine)
query += "WITH rep, "
for i in range(n):
query += "r{0}, i{1}, ".format(str(i), str(i))
query += "(size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, "
for i in range(n):
query += "size((rep:recipe)-[:{2}]->(:{3}{{Name: '{0}'}})) as minus_degree{1},".format(ingr_type[sorted_ingr[i]][0], str(i), ingr_type[sorted_ingr[i]][1], ingr_type[sorted_ingr[i]][2])
query = query[:-1] + ' '
query += "RETURN rep, "
for i in range(n):
query += "r{0}, i{1}, minus_degree{2},".format(str(i), str(i), str(i))
query += "degree ORDER BY degree"
for i in range(n):
query += "-minus_degree{0} * 2".format(str(i))
query += ","
for i in range(n):
query += "(case when minus_degree{0}>=1 then 1 else 0 end)+".format(str(i))
query = query[:-1] + " desc"
query += ",degree SKIP {0} LIMIT 25;".format(skip * topk)
print(query)
res = graph.run(query)
res = pd.DataFrame(res)
# print(res)
recipes = []
for i in range(min(topk, res.shape[0])):
recipes.append(res.iloc[i,0])
return recipes
########################################
# Unit Test 1
########################################
# res = getRecipes(['apple','banana', 'strawberry'], dietaryList=['vegetarian'], cuisine='chinese')
# print(type(res[0]))
# Sample query
# query =
# '''
# OPTIONAL MATCH ((rep:recipe)-[r0:Has_Main_Ingredient]->(i0:main_ingredient{Name: 'BANANA'}))
# OPTIONAL MATCH ((rep:recipe)-[r1:Has_Main_Ingredient]->(i1:main_ingredient{Name: 'APPLE'}))
# OPTIONAL MATCH ((rep:recipe)-[r2:Has_Main_Ingredient]->(i2:main_ingredient{Name: 'STRAWBERRY'}))
# MATCH (rep)-[rs:Has_Meal_Type]->(:meal_type{Name: 'vegan'})
# WHERE (rep)-[:Has_Meal_Type]->(:meal_type{Name: 'vegan'})
# WITH rep, r0, i0, r1, i1, r2, i2, rs,
# (size((rep:recipe)-[:Has_Ingredient]->(:ingredient)) + size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient))) as degree, size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'BANANA'})) as minus_degree0,
# size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'APPLE'})) as minus_degree1,
# size((rep:recipe)-[:Has_Main_Ingredient]->(:main_ingredient{Name: 'STRAWBERRY'})) as minus_degree2
# RETURN rep, r0, i0, minus_degree0,r1, i1, rs, minus_degree1,r2, i2, minus_degree2,degree
# ORDER BY degree-minus_degree0 * 2-minus_degree1 * 2-minus_degree2 * 2,
# (case when minus_degree0>=1 then 1 else 0 end)+(case when minus_degree1>=1 then 1 else 0 end)+(case when minus_degree2>=1 then 1 else 0 end) desc,degree LIMIT 25;
# '''
def getRecipeByName(rep: str) -> Dict:
query = "MATCH (rep:recipe) WHERE rep.Name=~'(?i){0}' RETURN rep".format(rep)
res = graph.run(query)
res = pd.DataFrame(res)
if res.empty:
return None
return res.iloc[0,0]
########################################
# Unit Test 2
########################################
# rep = 'super Fruity Smoothie'
# print(getRecipeByName(rep))
# Sample query
# MATCH (rep:recipe)
# WHERE rep.Name=~'(?i)super Fruity Smoothie'
# RETURN rep
def getIngredient(id: str, rep: str) -> List[str]:
query = "MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient) WHERE rep.Name=~'(?i){0}' AND rep.RecipeId='{1}' RETURN a".format(rep, id)
res = graph.run(query)
res = pd.DataFrame(res)
ingrs = []
for i in range(res.shape[0]):
ingrs.append(res.iloc[i,0]['Name'])
return ingrs
########################################
# Unit Test 3
########################################
# rep = 'super Fruity Smoothie'
# print(getIngredient(rep))
# Sample query
# MATCH (rep:recipe)-[:Has_Ingredient]->(a:ingredient)
# WHERE rep.Name=~'(?i)super Fruity Smoothie'
# RETURN a
# def random_init(length = 50):
# query = "MATCH (n:recipe) RETURN n LIMIT {0}".format(str(length))
# res = graph.run(query)
# res = pd.DataFrame(res)
# for i in range(res.shape[0]):
# random_set[i] = res.iloc[i,0]
def browser(topk: int = 10,
dietaryList: List[str] = None,
cuisine: str = None) -> List[Dict]:
query = "MATCH (a:recipe) WITH rand() as r, a "
if dietaryList is not None:
for dietary in dietaryList:
if dietary == 'halal':
query += "MATCH (a) WHERE a.halal is null "
elif dietary == 'vegetarian':
vegan = 'vegan'
query += "MATCH (a)-[rs:Has_Meal_Type]->(:meal_type{{Name: '{0}'}}) WHERE (a)-[:Has_Meal_Type]->(:meal_type{{Name: '{1}'}}) ".format(vegan, vegan)
elif dietary == 'fruitarian':
query += "MATCH (a)-[:Has_Inferred_Meal_Type]->(:meal_type{Name:'fruit'}) "
elif dietary == 'eggetarian':
query += "MATCH (a)-[:Has_Main_Ingredient]->(:main_ingredient{Name:'EGG'}) "
if cuisine is not None:
query += "MATCH (a)-[:Has_Cuisine_Type]->(:cuisine_type{{Name: '{0}'}}) ".format(cuisine)
query += "RETURN a ORDER BY r LIMIT {0};".format(topk)
print(query)
res = graph.run(query)
res = pd.DataFrame(res)
recipes = []
for i in range(res.shape[0]):
recipes.append(res.iloc[i,0])
return recipes
########################################
# Unit Test 3
########################################
# print(browser(dietaryList=['halal','fruitarian'], cuisine='chinese'))
| [
"py2neo.Graph",
"pandas.DataFrame"
] | [((223, 294), 'py2neo.Graph', 'Graph', (['"""http://localhost:7474"""'], {'username': '"""neo4j"""', 'password': '"""<PASSWORD>"""'}), "('http://localhost:7474', username='neo4j', password='<PASSWORD>')\n", (228, 294), False, 'from py2neo import Node, Relationship, Graph, NodeMatcher\n'), ((1890, 1907), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (1902, 1907), True, 'import pandas as pd\n'), ((4121, 4138), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (4133, 4138), True, 'import pandas as pd\n'), ((5904, 5921), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (5916, 5921), True, 'import pandas as pd\n'), ((6473, 6490), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (6485, 6490), True, 'import pandas as pd\n'), ((8211, 8228), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (8223, 8228), True, 'import pandas as pd\n')] |
import cwgen
import os
import sys
import PySimpleGUI as sg
class CwGenUI:
# General
# GUI - window config
WINDOW_DESCRIPTION = 'CW training material generator by SP6HFE'
# GUI - text config
E2CW_VER_LOCAL_KEY = '-E2CW VER LOCAL-'
E2CW_VER_ONLINE_KEY = '-E2CW VER ONLINE-'
# GUI - button config
FILE_BROWSE_KEY = '-ADD FILE-'
FILE_REMOVE_KEY = '-REMOVE FILE-'
E2CW_DOWNLOAD_KEY = '-E2CW DOWNLOAD-'
E2CW_GENERATE_KEY = '-E2CW GENERATE-'
# GUI - input config
FILE_PATH_INPUT_KEY = '-FILE PATH-'
# GUI - table config
FILES_DATA_TABLE_KEY = '-FILES DATA-'
WORDS_FILTERED_TABLE_KEY = '-WORDS FILTERED-'
WORDS_TO_GEN_TABLE_KEY = '-WORDS TO GEN-'
# GUI - sliders config
H_SLIDER_WIDTH = 21
H_SLIDER_HEIGHT = 10
LETTERS_MIN_KEY = '-LETTERS MIN-'
LETTERS_MAX_KEY = '-LETTERS MAX-'
LETTERS_MIN_RANGE_START_KEY = '-LETTERS MIN RANGE START-'
LETTERS_MIN_RANGE_STOP_KEY = '-LETTERS MIN RANGE STOP-'
LETTERS_MAX_RANGE_START_KEY = '-LETTERS MAX RANGE START-'
LETTERS_MAX_RANGE_STOP_KEY = '-LETTERS MAX RANGE STOP-'
WORDS_TO_TRAIN_KEY = '-WORDS TO TRAIN-'
WORDS_TO_TRAIN_RANGE_START_KEY = 'WORDS TO TRAIN RANGE START-'
WORDS_TO_TRAIN_RANGE_STOP_KEY = 'WORDS TO TRAIN RANGE STOP-'
E2CW_WPM_KEY = '-E2CW WPM-'
E2CW_WPM_RANGE_START_KEY = '-E2CW WPM RANGE START-'
E2CW_WPM_RANGE_STOP_KEY = '-E2CW WPM RANGE STOP-'
E2CW_FARNS_KEY = '-E2CW FARNS-'
E2CW_FARNS_RANGE_START_KEY = '-E2CW FARNS RANGE START-'
E2CW_FARNS_RANGE_STOP_KEY = '-E2CW FARNS RANGE STOP-'
E2CW_PITCH_KEY = '-E2CW PITCH-'
E2CW_PITCH_RANGE_START_KEY = '-E2CW PITCH RANGE START-'
E2CW_PITCH_RANGE_STOP_KEY = '-E2CW PITCH RANGE STOP-'
# GUI - combo config
COMBO_LETTERS_SET_KEY = '-LETTERS SET-'
COMBO_MATERIAL_GENERATION_KEY = '-MATERIAL GENERATION-'
def __init__(self):
"""Class initialization"""
# Members
self.files_table_idx = -1
self.cw_gen = cwgen.CwGen()
self.letters_sets = self.cw_gen.get_letters_sets()
self.training_generator_schemes = self.cw_gen.get_training_generator_schemes()
ebook2cw_version_local = self.cw_gen.get_ebook2cw_version_local()
ebook2cw_version_online = self.cw_gen.get_ebook2cw_version_online()
# GUI - header columns -> name, column size, visible?
files_data_header = [
("UUID", 0, False),
("File name", 14, True),
("Words", 6, True),
("Min len", 7, True),
("Max len", 7, True)
]
words_filtered_header = [
("Word length", 15, True),
("Count", 15, True)
]
words_to_gen_header = [
("Word length", 15, True),
("Count", 15, True)
]
# GUI - tables
files_data_table = [sg.Table(values=[],
headings=[name for name, _size,
_visible in files_data_header],
col_widths=[size for _name, size,
_visible in files_data_header],
visible_column_map=[
visible for _name, _size, visible in files_data_header],
num_rows=5,
justification='left',
auto_size_columns=False,
enable_events=True,
key=self.FILES_DATA_TABLE_KEY
)]
words_filtered_table = [sg.Table(values=[],
headings=[
name for name, _size, _visible in words_filtered_header],
col_widths=[
size for _name, size, _visible in words_filtered_header],
num_rows=5,
justification='left',
auto_size_columns=False,
key=self.WORDS_FILTERED_TABLE_KEY)]
words_to_gen_table = [sg.Table(values=[],
headings=[
name for name, _size, _visible in words_to_gen_header],
col_widths=[
size for _name, size, _visible in words_to_gen_header],
num_rows=5,
justification='left',
auto_size_columns=False,
key=self.WORDS_TO_GEN_TABLE_KEY)]
# GUI - rows
files_operation = [sg.Input(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY),
sg.FileBrowse(button_text="Add", file_types=(
("ALL Files", "*.*"), ("CWOPS sessions", "*.cwo")), target=self.FILE_PATH_INPUT_KEY, key=self.FILE_BROWSE_KEY),
sg.Button(button_text="Remove selected", key=self.FILE_REMOVE_KEY)]
letters_min = [sg.Text("MIN:", size=(4, 1)),
sg.Text("0", size=(2, 1),
key=self.LETTERS_MIN_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY),
sg.Text("0", size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)]
letters_max = [sg.Text("MAX:", size=(4, 1)),
sg.Text("0", size=(2, 1),
key=self.LETTERS_MAX_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY),
sg.Text("0", size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)]
letters_set = [sg.Text('From set:'),
sg.Combo(values=([data['description'] for _id, data in self.letters_sets.items()]),
default_value=list(
self.letters_sets.items())[0][1]['description'],
size=(max(len(data['description'])
for _id, data in self.letters_sets.items()), 1),
readonly=True,
enable_events=True,
key=self.COMBO_LETTERS_SET_KEY)]
generator_scheme = [sg.Text('Using scheme:'),
sg.Combo(values=([name for _id, name in self.training_generator_schemes.items()]),
default_value=list(
self.training_generator_schemes.items())[0][1],
size=(
max(len(name) for _id, name in self.training_generator_schemes.items()), 1),
readonly=True,
enable_events=True,
key=self.COMBO_MATERIAL_GENERATION_KEY)]
words_to_train = [sg.Text("SIZE:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.WORDS_TO_TRAIN_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.WORDS_TO_TRAIN_RANGE_START_KEY),
sg.Text("0", size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)]
e2cw_version = [sg.Text('Local version:', size=(15, 1)), sg.Text(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY),
sg.Text('Online version:', size=(15, 1)), sg.Text(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)]
e2cw_buttons = [sg.Button('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY),
sg.Button('Generate training files', key=self.E2CW_GENERATE_KEY)]
e2cw_wpm = [sg.Text("WPM:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.E2CW_WPM_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.E2CW_WPM_KEY),
sg.Text("0", size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)]
e2cw_farns = [sg.Text("FARNS:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.E2CW_FARNS_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY),
sg.Text("0", size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)]
e2cw_pitch = [sg.Text("PITCH:", size=(6, 1)),
sg.Text("0", size=(2, 1),
key=self.E2CW_PITCH_RANGE_START_KEY),
sg.Slider(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),
orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY),
sg.Text("0", size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)]
# GUI - columns
left_col = [
[sg.Frame('Dictionaries', [files_operation, files_data_table])],
[sg.Frame('Letters selection', [letters_set])],
[sg.Frame('Words length', [letters_min, letters_max])],
[sg.Frame('Training input', [words_filtered_table])]]
right_col = [
[sg.Frame('Training generator', [generator_scheme])],
[sg.Frame('Training set size', [words_to_train])],
[sg.Frame('Training output', [words_to_gen_table])],
[sg.Frame('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])],
[sg.Frame('Ebook2CW', [e2cw_version, e2cw_buttons])]]
# App layout
layout = [[sg.Column(left_col), sg.VSeparator(), sg.Column(right_col)]]
# Configure and create the window
self.window = sg.Window(self.WINDOW_DESCRIPTION, layout)
def _get_dictionary_key_by_value(self, dictionary, lookup_value, nested_key=None):
'''Retrieves a key based on provided string value
keeping insertion order, meaning if dictionary
contain a number of keys with exact same value
first key (in insertion order) will be returned.
Args:
dictionary (dict): dictionary to search for a key
lookup_value (str): value for which key should be found
nested_key (str): key in nested dictionary where lookup_value is
Returns:
result (str): key or None if lookup_value not found
'''
result = None
for key, value in dictionary.items():
if nested_key is not None:
data = value[nested_key]
else:
data = value
if data == lookup_value:
result = key
break
return result
def _update_ui_on_dictionary_set_change(self, values):
"""Updates relevant UI elements according to change
in dictionary set.
Args:
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
table_data = []
sliders_range = (0, 0)
# get information related to already loaded data
dictionaries_info = self.cw_gen.get_dictionaries_info()
words_info = self.cw_gen.get_words_stat()
# generate updated data for UI elements
if len(dictionaries_info) > 0:
for dictionary_data in dictionaries_info:
row = [dictionary_data['uuid'],
dictionary_data['name'],
dictionary_data['stat']['words_count'],
dictionary_data['stat']['min_length'],
dictionary_data['stat']['max_length']]
table_data.append(row)
if len(words_info) > 0:
sliders_range = (words_info['min_length'],
words_info['max_length'])
# update UI
self.window[self.FILES_DATA_TABLE_KEY].update(
values=table_data)
words_min_length, words_max_length = self.update_words_length_sliders_config(
values, (sliders_range))
self._update_ui_on_words_filtering_change(
values, words_min_length, words_max_length)
def _update_ui_on_words_filtering_change(self, values, min_length=None, max_length=None):
'''Updates words stat with filtered result
which allow user to see the data out of which
training material could be generated.
Args:
values():
min_length (int): Minimal words length
passed in when reading the value from self.window is not yet updated
(window hadling did not advanced to the next loop yet)
max_length (int): Maximal words length
passed in when reading the value from self.window is not yet updated
(window hadling did not advanced to the next loop yet)
Returns:
None
'''
words_min_length = int(values[self.LETTERS_MIN_KEY])
words_max_length = int(values[self.LETTERS_MAX_KEY])
letters_set = self.window[self.COMBO_LETTERS_SET_KEY].get()
generator_scheme = self.window[self.COMBO_MATERIAL_GENERATION_KEY].get(
)
if min_length is not None:
words_min_length = min_length
if max_length is not None:
words_max_length = max_length
# get filtered words stat
words_stat_filtered = self.cw_gen.get_words_stat_filtered(
words_min_length, words_max_length,
self._get_dictionary_key_by_value(
self.letters_sets, letters_set, 'description'),
self._get_dictionary_key_by_value(self.training_generator_schemes, generator_scheme))
# assemble words stat table (sorted by word length)
stat = []
if words_stat_filtered:
for word_length in sorted(words_stat_filtered['words_stat'].keys()):
stat.append(
[word_length, words_stat_filtered['words_stat'][word_length]])
# update UI
self.window[self.WORDS_FILTERED_TABLE_KEY].update(values=stat)
def handle_dictionary_add(self, values):
"""Handle new dictionary addition
by passing file path to cwgen. UI gets updated.
Args:
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
# on file selection cancel values[FILE_PATH_INPUT_KEY] is empty
if len(values[self.FILE_PATH_INPUT_KEY]) > 0:
file_path = os.path.normpath(values[self.FILE_PATH_INPUT_KEY])
if os.path.isfile(file_path):
if self.cw_gen.add_dictionary(file_path):
self._update_ui_on_dictionary_set_change(values)
# clear file path storage to properly handle CANCEL situation
self.window[self.FILE_PATH_INPUT_KEY].update(value="")
def handle_dictionary_delete(self, values):
"""Handle dictionary deletion
by passing its generated UUID to cwgen. UI gets updated.
Args:
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
# self.files_table_idx == -1 when no dictionary in the table is selected
if self.files_table_idx >= 0:
table_data = self.window[self.FILES_DATA_TABLE_KEY].get()
selected_dictionary_uuid = table_data[self.files_table_idx][0]
if self.cw_gen.remove_dictionary(selected_dictionary_uuid):
self._update_ui_on_dictionary_set_change(values)
# set table index to negative to properly handle dictionary remove button click
self.files_table_idx = -1
def handle_words_length_sliders(self, event, values):
"""Handle words length sliders movement
to not let their values become ridiculous.
Args:
event (str): GUI event name
values (dict): Dictionary containing GUI elements values
Returns:
None
"""
# get current positions
slider_min_val = int(values[self.LETTERS_MIN_KEY])
slider_max_val = int(values[self.LETTERS_MAX_KEY])
# update them if needed
if event == self.LETTERS_MIN_KEY:
if slider_min_val > slider_max_val:
slider_max_val = slider_min_val
self.window[self.LETTERS_MAX_KEY].update(
value=slider_max_val)
if event == self.LETTERS_MAX_KEY:
if slider_max_val < slider_min_val:
slider_min_val = slider_max_val
self.window[self.LETTERS_MIN_KEY].update(
value=slider_min_val)
return (slider_min_val, slider_max_val)
def update_words_length_sliders_config(self, values, new_range):
"""Updates UI part related to words length sliders change their range
assuring that sliders values gets updated when needed
Args:
values (dict): Dictionary containing GUI elements values
new_range (tuple): New value range
Returns:
new_min_val, new_max_val (tuple): Updated words length sliders values
"""
current_range_min, current_range_max = self.window[self.LETTERS_MIN_KEY].Range
current_min_val = int(values[self.LETTERS_MIN_KEY])
current_max_val = int(values[self.LETTERS_MAX_KEY])
new_range_min, new_range_max = new_range
new_min_val = current_min_val
new_max_val = current_max_val
# range min value may affect sliders position
if new_range_min > current_range_min:
if new_range_min > current_min_val:
new_min_val = new_range_min
if new_min_val > current_max_val:
new_max_val = new_min_val
# range max value may affect sliders position
if new_range_max < current_range_max:
if new_range_max < current_max_val:
new_max_val = new_range_max
if new_max_val < current_min_val:
new_min_val = new_max_val
self.window[self.LETTERS_MIN_KEY].update(
range=new_range, value=new_min_val)
self.window[self.LETTERS_MAX_KEY].update(
range=new_range, value=new_max_val)
self.window[self.LETTERS_MIN_RANGE_START_KEY].update(
value=new_range_min)
self.window[self.LETTERS_MIN_RANGE_STOP_KEY].update(
value=new_range_max)
self.window[self.LETTERS_MAX_RANGE_START_KEY].update(
value=new_range_min)
self.window[self.LETTERS_MAX_RANGE_STOP_KEY].update(
value=new_range_max)
return (new_min_val, new_max_val)
def handleGui(self):
"""GUI main loop
where all events gets dispatched for handling
Args:
None
Returns:
None
"""
event, values = self.window.read()
# See if user wants to quit or window was closed
if event == sg.WINDOW_CLOSED:
self.window.close()
return False
# Remember index of selected table row
if event == self.FILES_DATA_TABLE_KEY:
self.files_table_idx = values[self.FILES_DATA_TABLE_KEY][0]
# Add a dictionary to the list
if event == self.FILE_PATH_INPUT_KEY:
self.handle_dictionary_add(values)
# remove dictionary from the list
if event == self.FILE_REMOVE_KEY:
self.handle_dictionary_delete(values)
# handle words length change
if (event == self.LETTERS_MIN_KEY) or (event == self.LETTERS_MAX_KEY):
words_min_length, words_max_length = self.handle_words_length_sliders(
event, values)
self._update_ui_on_words_filtering_change(
values, words_min_length, words_max_length)
# handle letters set and generator scheme change
if (event == self.COMBO_LETTERS_SET_KEY) or (event == self.COMBO_MATERIAL_GENERATION_KEY):
self._update_ui_on_words_filtering_change(values)
return True
# UI theming
sg.theme('Default1')
# Start the GUI
ui = CwGenUI()
# Display and interact with the GUI using an Event Loop
while ui.handleGui():
pass
# Game over
del ui
| [
"PySimpleGUI.Button",
"PySimpleGUI.Input",
"PySimpleGUI.theme",
"cwgen.CwGen",
"PySimpleGUI.Frame",
"PySimpleGUI.Slider",
"PySimpleGUI.FileBrowse",
"PySimpleGUI.Table",
"PySimpleGUI.Text",
"PySimpleGUI.Window",
"os.path.normpath",
"os.path.isfile",
"PySimpleGUI.Column",
"PySimpleGUI.VSeparator"
] | [((21381, 21401), 'PySimpleGUI.theme', 'sg.theme', (['"""Default1"""'], {}), "('Default1')\n", (21389, 21401), True, 'import PySimpleGUI as sg\n'), ((2063, 2076), 'cwgen.CwGen', 'cwgen.CwGen', ([], {}), '()\n', (2074, 2076), False, 'import cwgen\n'), ((10656, 10698), 'PySimpleGUI.Window', 'sg.Window', (['self.WINDOW_DESCRIPTION', 'layout'], {}), '(self.WINDOW_DESCRIPTION, layout)\n', (10665, 10698), True, 'import PySimpleGUI as sg\n'), ((2983, 3335), 'PySimpleGUI.Table', 'sg.Table', ([], {'values': '[]', 'headings': '[name for name, _size, _visible in files_data_header]', 'col_widths': '[size for _name, size, _visible in files_data_header]', 'visible_column_map': '[visible for _name, _size, visible in files_data_header]', 'num_rows': '(5)', 'justification': '"""left"""', 'auto_size_columns': '(False)', 'enable_events': '(True)', 'key': 'self.FILES_DATA_TABLE_KEY'}), "(values=[], headings=[name for name, _size, _visible in\n files_data_header], col_widths=[size for _name, size, _visible in\n files_data_header], visible_column_map=[visible for _name, _size,\n visible in files_data_header], num_rows=5, justification='left',\n auto_size_columns=False, enable_events=True, key=self.FILES_DATA_TABLE_KEY)\n", (2991, 3335), True, 'import PySimpleGUI as sg\n'), ((3840, 4103), 'PySimpleGUI.Table', 'sg.Table', ([], {'values': '[]', 'headings': '[name for name, _size, _visible in words_filtered_header]', 'col_widths': '[size for _name, size, _visible in words_filtered_header]', 'num_rows': '(5)', 'justification': '"""left"""', 'auto_size_columns': '(False)', 'key': 'self.WORDS_FILTERED_TABLE_KEY'}), "(values=[], headings=[name for name, _size, _visible in\n words_filtered_header], col_widths=[size for _name, size, _visible in\n words_filtered_header], num_rows=5, justification='left',\n auto_size_columns=False, key=self.WORDS_FILTERED_TABLE_KEY)\n", (3848, 4103), True, 'import PySimpleGUI as sg\n'), ((4472, 4729), 'PySimpleGUI.Table', 'sg.Table', ([], {'values': '[]', 'headings': '[name for name, _size, _visible in words_to_gen_header]', 'col_widths': '[size for _name, size, _visible in words_to_gen_header]', 'num_rows': '(5)', 'justification': '"""left"""', 'auto_size_columns': '(False)', 'key': 'self.WORDS_TO_GEN_TABLE_KEY'}), "(values=[], headings=[name for name, _size, _visible in\n words_to_gen_header], col_widths=[size for _name, size, _visible in\n words_to_gen_header], num_rows=5, justification='left',\n auto_size_columns=False, key=self.WORDS_TO_GEN_TABLE_KEY)\n", (4480, 4729), True, 'import PySimpleGUI as sg\n'), ((4904, 4977), 'PySimpleGUI.Input', 'sg.Input', ([], {'enable_events': '(True)', 'visible': '(False)', 'key': 'self.FILE_PATH_INPUT_KEY'}), '(enable_events=True, visible=False, key=self.FILE_PATH_INPUT_KEY)\n', (4912, 4977), True, 'import PySimpleGUI as sg\n'), ((5007, 5172), 'PySimpleGUI.FileBrowse', 'sg.FileBrowse', ([], {'button_text': '"""Add"""', 'file_types': "(('ALL Files', '*.*'), ('CWOPS sessions', '*.cwo'))", 'target': 'self.FILE_PATH_INPUT_KEY', 'key': 'self.FILE_BROWSE_KEY'}), "(button_text='Add', file_types=(('ALL Files', '*.*'), (\n 'CWOPS sessions', '*.cwo')), target=self.FILE_PATH_INPUT_KEY, key=self.\n FILE_BROWSE_KEY)\n", (5020, 5172), True, 'import PySimpleGUI as sg\n'), ((5225, 5291), 'PySimpleGUI.Button', 'sg.Button', ([], {'button_text': '"""Remove selected"""', 'key': 'self.FILE_REMOVE_KEY'}), "(button_text='Remove selected', key=self.FILE_REMOVE_KEY)\n", (5234, 5291), True, 'import PySimpleGUI as sg\n'), ((5319, 5347), 'PySimpleGUI.Text', 'sg.Text', (['"""MIN:"""'], {'size': '(4, 1)'}), "('MIN:', size=(4, 1))\n", (5326, 5347), True, 'import PySimpleGUI as sg\n'), ((5373, 5436), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.LETTERS_MIN_RANGE_START_KEY'}), "('0', size=(2, 1), key=self.LETTERS_MIN_RANGE_START_KEY)\n", (5380, 5436), True, 'import PySimpleGUI as sg\n'), ((5494, 5634), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 0)', 'size': '(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT)', 'orientation': '"""h"""', 'enable_events': '(True)', 'key': 'self.LETTERS_MIN_KEY'}), "(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),\n orientation='h', enable_events=True, key=self.LETTERS_MIN_KEY)\n", (5503, 5634), True, 'import PySimpleGUI as sg\n'), ((5690, 5752), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.LETTERS_MIN_RANGE_STOP_KEY'}), "('0', size=(2, 1), key=self.LETTERS_MIN_RANGE_STOP_KEY)\n", (5697, 5752), True, 'import PySimpleGUI as sg\n'), ((5780, 5808), 'PySimpleGUI.Text', 'sg.Text', (['"""MAX:"""'], {'size': '(4, 1)'}), "('MAX:', size=(4, 1))\n", (5787, 5808), True, 'import PySimpleGUI as sg\n'), ((5834, 5897), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.LETTERS_MAX_RANGE_START_KEY'}), "('0', size=(2, 1), key=self.LETTERS_MAX_RANGE_START_KEY)\n", (5841, 5897), True, 'import PySimpleGUI as sg\n'), ((5955, 6095), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 0)', 'size': '(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT)', 'orientation': '"""h"""', 'enable_events': '(True)', 'key': 'self.LETTERS_MAX_KEY'}), "(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),\n orientation='h', enable_events=True, key=self.LETTERS_MAX_KEY)\n", (5964, 6095), True, 'import PySimpleGUI as sg\n'), ((6151, 6213), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.LETTERS_MAX_RANGE_STOP_KEY'}), "('0', size=(2, 1), key=self.LETTERS_MAX_RANGE_STOP_KEY)\n", (6158, 6213), True, 'import PySimpleGUI as sg\n'), ((6241, 6261), 'PySimpleGUI.Text', 'sg.Text', (['"""From set:"""'], {}), "('From set:')\n", (6248, 6261), True, 'import PySimpleGUI as sg\n'), ((6868, 6892), 'PySimpleGUI.Text', 'sg.Text', (['"""Using scheme:"""'], {}), "('Using scheme:')\n", (6875, 6892), True, 'import PySimpleGUI as sg\n'), ((7537, 7566), 'PySimpleGUI.Text', 'sg.Text', (['"""SIZE:"""'], {'size': '(6, 1)'}), "('SIZE:', size=(6, 1))\n", (7544, 7566), True, 'import PySimpleGUI as sg\n'), ((7595, 7649), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.WORDS_TO_TRAIN_KEY'}), "('0', size=(2, 1), key=self.WORDS_TO_TRAIN_KEY)\n", (7602, 7649), True, 'import PySimpleGUI as sg\n'), ((7713, 7873), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 0)', 'size': '(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT)', 'orientation': '"""h"""', 'enable_events': '(True)', 'key': 'self.WORDS_TO_TRAIN_RANGE_START_KEY'}), "(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),\n orientation='h', enable_events=True, key=self.\n WORDS_TO_TRAIN_RANGE_START_KEY)\n", (7722, 7873), True, 'import PySimpleGUI as sg\n'), ((7930, 7995), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.WORDS_TO_TRAIN_RANGE_STOP_KEY'}), "('0', size=(2, 1), key=self.WORDS_TO_TRAIN_RANGE_STOP_KEY)\n", (7937, 7995), True, 'import PySimpleGUI as sg\n'), ((8024, 8063), 'PySimpleGUI.Text', 'sg.Text', (['"""Local version:"""'], {'size': '(15, 1)'}), "('Local version:', size=(15, 1))\n", (8031, 8063), True, 'import PySimpleGUI as sg\n'), ((8065, 8125), 'PySimpleGUI.Text', 'sg.Text', (['ebook2cw_version_local'], {'key': 'self.E2CW_VER_LOCAL_KEY'}), '(ebook2cw_version_local, key=self.E2CW_VER_LOCAL_KEY)\n', (8072, 8125), True, 'import PySimpleGUI as sg\n'), ((8152, 8192), 'PySimpleGUI.Text', 'sg.Text', (['"""Online version:"""'], {'size': '(15, 1)'}), "('Online version:', size=(15, 1))\n", (8159, 8192), True, 'import PySimpleGUI as sg\n'), ((8194, 8256), 'PySimpleGUI.Text', 'sg.Text', (['ebook2cw_version_online'], {'key': 'self.E2CW_VER_ONLINE_KEY'}), '(ebook2cw_version_online, key=self.E2CW_VER_ONLINE_KEY)\n', (8201, 8256), True, 'import PySimpleGUI as sg\n'), ((8285, 8352), 'PySimpleGUI.Button', 'sg.Button', (['"""Download / Update Ebook2CW"""'], {'key': 'self.E2CW_DOWNLOAD_KEY'}), "('Download / Update Ebook2CW', key=self.E2CW_DOWNLOAD_KEY)\n", (8294, 8352), True, 'import PySimpleGUI as sg\n'), ((8379, 8443), 'PySimpleGUI.Button', 'sg.Button', (['"""Generate training files"""'], {'key': 'self.E2CW_GENERATE_KEY'}), "('Generate training files', key=self.E2CW_GENERATE_KEY)\n", (8388, 8443), True, 'import PySimpleGUI as sg\n'), ((8468, 8496), 'PySimpleGUI.Text', 'sg.Text', (['"""WPM:"""'], {'size': '(6, 1)'}), "('WPM:', size=(6, 1))\n", (8475, 8496), True, 'import PySimpleGUI as sg\n'), ((8519, 8579), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.E2CW_WPM_RANGE_START_KEY'}), "('0', size=(2, 1), key=self.E2CW_WPM_RANGE_START_KEY)\n", (8526, 8579), True, 'import PySimpleGUI as sg\n'), ((8631, 8768), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 0)', 'size': '(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT)', 'orientation': '"""h"""', 'enable_events': '(True)', 'key': 'self.E2CW_WPM_KEY'}), "(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),\n orientation='h', enable_events=True, key=self.E2CW_WPM_KEY)\n", (8640, 8768), True, 'import PySimpleGUI as sg\n'), ((8818, 8877), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.E2CW_WPM_RANGE_STOP_KEY'}), "('0', size=(2, 1), key=self.E2CW_WPM_RANGE_STOP_KEY)\n", (8825, 8877), True, 'import PySimpleGUI as sg\n'), ((8904, 8934), 'PySimpleGUI.Text', 'sg.Text', (['"""FARNS:"""'], {'size': '(6, 1)'}), "('FARNS:', size=(6, 1))\n", (8911, 8934), True, 'import PySimpleGUI as sg\n'), ((8959, 9021), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.E2CW_FARNS_RANGE_START_KEY'}), "('0', size=(2, 1), key=self.E2CW_FARNS_RANGE_START_KEY)\n", (8966, 9021), True, 'import PySimpleGUI as sg\n'), ((9077, 9216), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 0)', 'size': '(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT)', 'orientation': '"""h"""', 'enable_events': '(True)', 'key': 'self.E2CW_FARNS_KEY'}), "(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),\n orientation='h', enable_events=True, key=self.E2CW_FARNS_KEY)\n", (9086, 9216), True, 'import PySimpleGUI as sg\n'), ((9270, 9331), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.E2CW_FARNS_RANGE_STOP_KEY'}), "('0', size=(2, 1), key=self.E2CW_FARNS_RANGE_STOP_KEY)\n", (9277, 9331), True, 'import PySimpleGUI as sg\n'), ((9358, 9388), 'PySimpleGUI.Text', 'sg.Text', (['"""PITCH:"""'], {'size': '(6, 1)'}), "('PITCH:', size=(6, 1))\n", (9365, 9388), True, 'import PySimpleGUI as sg\n'), ((9413, 9475), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.E2CW_PITCH_RANGE_START_KEY'}), "('0', size=(2, 1), key=self.E2CW_PITCH_RANGE_START_KEY)\n", (9420, 9475), True, 'import PySimpleGUI as sg\n'), ((9531, 9670), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(0, 0)', 'size': '(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT)', 'orientation': '"""h"""', 'enable_events': '(True)', 'key': 'self.E2CW_PITCH_KEY'}), "(range=(0, 0), size=(self.H_SLIDER_WIDTH, self.H_SLIDER_HEIGHT),\n orientation='h', enable_events=True, key=self.E2CW_PITCH_KEY)\n", (9540, 9670), True, 'import PySimpleGUI as sg\n'), ((9724, 9785), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(2, 1)', 'key': 'self.E2CW_PITCH_RANGE_STOP_KEY'}), "('0', size=(2, 1), key=self.E2CW_PITCH_RANGE_STOP_KEY)\n", (9731, 9785), True, 'import PySimpleGUI as sg\n'), ((15618, 15668), 'os.path.normpath', 'os.path.normpath', (['values[self.FILE_PATH_INPUT_KEY]'], {}), '(values[self.FILE_PATH_INPUT_KEY])\n', (15634, 15668), False, 'import os\n'), ((15685, 15710), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (15699, 15710), False, 'import os\n'), ((9850, 9911), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Dictionaries"""', '[files_operation, files_data_table]'], {}), "('Dictionaries', [files_operation, files_data_table])\n", (9858, 9911), True, 'import PySimpleGUI as sg\n'), ((9928, 9972), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Letters selection"""', '[letters_set]'], {}), "('Letters selection', [letters_set])\n", (9936, 9972), True, 'import PySimpleGUI as sg\n'), ((9989, 10041), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Words length"""', '[letters_min, letters_max]'], {}), "('Words length', [letters_min, letters_max])\n", (9997, 10041), True, 'import PySimpleGUI as sg\n'), ((10058, 10108), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Training input"""', '[words_filtered_table]'], {}), "('Training input', [words_filtered_table])\n", (10066, 10108), True, 'import PySimpleGUI as sg\n'), ((10150, 10200), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Training generator"""', '[generator_scheme]'], {}), "('Training generator', [generator_scheme])\n", (10158, 10200), True, 'import PySimpleGUI as sg\n'), ((10217, 10264), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Training set size"""', '[words_to_train]'], {}), "('Training set size', [words_to_train])\n", (10225, 10264), True, 'import PySimpleGUI as sg\n'), ((10281, 10330), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Training output"""', '[words_to_gen_table]'], {}), "('Training output', [words_to_gen_table])\n", (10289, 10330), True, 'import PySimpleGUI as sg\n'), ((10347, 10413), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Audible parameters"""', '[e2cw_wpm, e2cw_farns, e2cw_pitch]'], {}), "('Audible parameters', [e2cw_wpm, e2cw_farns, e2cw_pitch])\n", (10355, 10413), True, 'import PySimpleGUI as sg\n'), ((10430, 10480), 'PySimpleGUI.Frame', 'sg.Frame', (['"""Ebook2CW"""', '[e2cw_version, e2cw_buttons]'], {}), "('Ebook2CW', [e2cw_version, e2cw_buttons])\n", (10438, 10480), True, 'import PySimpleGUI as sg\n'), ((10527, 10546), 'PySimpleGUI.Column', 'sg.Column', (['left_col'], {}), '(left_col)\n', (10536, 10546), True, 'import PySimpleGUI as sg\n'), ((10548, 10563), 'PySimpleGUI.VSeparator', 'sg.VSeparator', ([], {}), '()\n', (10561, 10563), True, 'import PySimpleGUI as sg\n'), ((10565, 10585), 'PySimpleGUI.Column', 'sg.Column', (['right_col'], {}), '(right_col)\n', (10574, 10585), True, 'import PySimpleGUI as sg\n')] |
# TODO: use sliding windows instead of chunking.
# the chunking method fails when part of a dupe string
# crosses the border between chunks
import colorama
from colorama import Fore
from tqdm import trange, tqdm
import os
colorama.init(autoreset=True)
DEFAULT_MIN = 5
DEFAULT_MAX = 10
def striplist(l):
# clean out some unneeded chars
return [x.strip(" \t\n\r") for x in l]
def compare(input):
# main comparison function
test = input[0]
data = input[1]
chunk_length = input[2]
# print("data", data)
found = data.find(test)
if found != -1:
words = test.split()
# don't return matched chunks shorter than the current chunk
# length, even if they are rounding remainder orphans from the
# chunking process
if len(words) >= chunk_length:
return test
def make_chunks(lst, n):
# Yield successive n-sized chunks from lst.
for i in range(0, len(lst), n):
yield lst[i : i + n]
def chunkit(str, chunk_length):
# make chunks of strings the way we like
chunks = []
list = str.split()
list = striplist(list)
wordLists = make_chunks(list, chunk_length)
for chunk in wordLists:
if chunk != "":
chunk = " ".join(chunk)
chunks.append(chunk)
return chunks
def run(chunk_length, text, dataset):
dataset = dataset.replace(os.linesep, " ")
testChunks = chunkit(text, chunk_length)
# remove empty lines
testChunks = list(filter(None, testChunks))
results = []
for testLine in testChunks:
found = compare([testLine, dataset, chunk_length])
if found != None:
print("found", found)
results.append(found)
return results
def dupecheck(min=DEFAULT_MIN, max=DEFAULT_MAX, text=None, dataset=None, verbose=False):
assert text != None
assert dataset != None
text = text.replace(os.linesep, "")
matches = []
for i in trange(max, min, -1):
# print('text, i',text, i)
res = run(i, text, dataset)
if len(res) > 0:
for r in res:
if r not in matches:
matches.append(r)
return matches
| [
"colorama.init",
"tqdm.trange"
] | [((224, 253), 'colorama.init', 'colorama.init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (237, 253), False, 'import colorama\n'), ((1951, 1971), 'tqdm.trange', 'trange', (['max', 'min', '(-1)'], {}), '(max, min, -1)\n', (1957, 1971), False, 'from tqdm import trange, tqdm\n')] |
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import datetime
from dateutil import tz
from lib.account import *
from lib.common import *
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
RESOURCE_PATH = "s3/bucket"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
discover_buckets(target_account)
except AssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
logger.error("AWS Error getting info for {}: {}".format(target_account.account_name, e))
return()
except Exception as e:
logger.error("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
raise
def discover_buckets(account):
'''
Gathers all the S3 Buckets and various details about them
'''
bucket_list = []
# Not all Public IPs are attached to instances. So we use ec2 describe_network_interfaces()
# All results are saved to S3. Public IPs and metadata go to DDB (based on the the presense of PublicIp in the Association)
s3_client = account.get_client('s3')
response = s3_client.list_buckets() # This API call doesn't paganate. Go fig...
bucket_list += response['Buckets']
for b in bucket_list:
bucket_name = b['Name']
# Decorate with the account info
b['account_id'] = account.account_id
b['account_name'] = account.account_name
b['resource_type'] = "s3-bucket"
b['last_seen'] = str(datetime.datetime.now(tz.gettz('US/Eastern')))
b['errors'] = {}
# Go through a bunch of API calls to get details on this bucket
try:
response = s3_client.get_bucket_encryption(Bucket=bucket_name)
if 'ServerSideEncryptionConfiguration' in response:
b['ServerSideEncryptionConfiguration'] = response['ServerSideEncryptionConfiguration']
except ClientError as e:
if e.response['Error']['Code'] != 'ServerSideEncryptionConfigurationNotFoundError':
b['errors']['ServerSideEncryptionConfiguration'] = e
try:
response = s3_client.get_bucket_acl(Bucket=bucket_name)
if 'Grants' in response:
b['Grants'] = response['Grants']
except ClientError as e:
b['errors']['Grants'] = e
try:
response = s3_client.get_bucket_location(Bucket=bucket_name)
if 'LocationConstraint' in response:
if response['LocationConstraint'] is None:
b['Location'] = "us-east-1"
else:
b['Location'] = response['LocationConstraint']
except ClientError as e:
b['errors']['Location'] = e
try:
response = s3_client.get_bucket_policy(Bucket=bucket_name)
if 'Policy' in response:
b['BucketPolicy'] = json.loads(response['Policy'])
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchBucketPolicy':
b['errors']['BucketPolicy'] = e
try:
response = s3_client.get_bucket_tagging(Bucket=bucket_name)
if 'TagSet' in response:
b['TagSet'] = response['TagSet']
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchTagSet':
b['errors']['TagSet'] = e
try:
response = s3_client.get_bucket_versioning(Bucket=bucket_name)
del response['ResponseMetadata']
b['Versioning'] = response
except ClientError as e:
b['errors']['Versioning'] = e
try:
response = s3_client.get_bucket_request_payment(Bucket=bucket_name)
del response['ResponseMetadata']
b['RequestPayer'] = response
except ClientError as e:
b['errors']['RequestPayer'] = e
try:
response = s3_client.get_bucket_website(Bucket=bucket_name)
del response['ResponseMetadata']
b['Website'] = response
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchWebsiteConfiguration':
b['errors']['Website'] = e
try:
response = s3_client.get_bucket_logging(Bucket=bucket_name)
if 'LoggingEnabled' in response:
b['Logging'] = response['LoggingEnabled']
except ClientError as e:
b['errors']['Logging'] = e
try:
response = s3_client.get_bucket_cors(Bucket=bucket_name)
if 'CORSRules' in response:
b['CORSRules'] = response['CORSRules']
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchCORSConfiguration':
b['errors']['CORSRules'] = e
save_resource_to_s3(RESOURCE_PATH, bucket_name, b)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj)) | [
"dateutil.tz.gettz",
"json.loads",
"logging.getLogger",
"json.dumps"
] | [((210, 229), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (227, 229), False, 'import logging\n'), ((523, 572), 'json.loads', 'json.loads', (["event['Records'][0]['Sns']['Message']"], {}), "(event['Records'][0]['Sns']['Message'])\n", (533, 572), False, 'import json\n'), ((261, 290), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (278, 290), False, 'import logging\n'), ((317, 343), 'logging.getLogger', 'logging.getLogger', (['"""boto3"""'], {}), "('boto3')\n", (334, 343), False, 'import logging\n'), ((474, 507), 'json.dumps', 'json.dumps', (['event'], {'sort_keys': '(True)'}), '(event, sort_keys=True)\n', (484, 507), False, 'import json\n'), ((612, 647), 'json.dumps', 'json.dumps', (['message'], {'sort_keys': '(True)'}), '(message, sort_keys=True)\n', (622, 647), False, 'import json\n'), ((2039, 2061), 'dateutil.tz.gettz', 'tz.gettz', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (2047, 2061), False, 'from dateutil import tz\n'), ((3418, 3448), 'json.loads', 'json.loads', (["response['Policy']"], {}), "(response['Policy'])\n", (3428, 3448), False, 'import json\n')] |
#!/usr/bin/python3
from PyQt5.QtGui import QBrush, QColor, QPainter
from PyQt5.QtChart import QChartView, QChart, QPieSeries
from portfolio.utils import confighandler
from portfolio.db.fdbhandler import balances
from portfolio.db.cdbhandler import cbalances
from portfolio.gui.ui_components.fonts import ChartTitleFont
class DistributionPieChart(QChartView):
"""
Pie chart that shows the distribution of capital according
to several criteria
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Chart
self.chart = QChart()
self.chart.setTheme(QChart.ChartThemeDark)
self.chart.legend().hide()
self.chart.createDefaultAxes()
self.chart.setAnimationOptions(QChart.SeriesAnimations)
self.chart.setBackgroundVisible(False)
self.chart.setTitle(" ")
self.chart.setTitleBrush(QBrush(QColor('white')))
self.setChart(self.chart)
self.setRenderHint(QPainter.Antialiasing)
self.setStyleSheet("border: 0px; background-color: rgba(0,0,0,0)")
self.setupSeries() # Initialize to all mode
def setupSeries(self, mode="all"):
"""
Chart gets updated displaying the new data.
Modes:
- all : distribution between all accounts
- accs : distribution between portfolio accounts
- cryptoaccs : distribution between crypto accounts
- strategies : distribution between strategies
"""
# Series
self.chart.removeAllSeries() # Remove any previous series
self.series = QPieSeries()
# Get data
if mode == "all":
data = balances.get_all_accounts(
) + cbalances.get_all_accounts_with_amount_fiat()
elif mode == "accounts":
data = balances.get_all_accounts()
elif mode == "crypto":
data = cbalances.get_all_accounts_with_amount_fiat()
elif mode == "currency":
data = [(confighandler.get_fiat_currency().upper(), balances.get_total_balance_all_accounts(
)), ("BTC", cbalances.get_total_balance_all_accounts_fiat())]
data.sort(key=lambda x: x[1]) # Sort
# Set Chart Title
self.total = sum([i[1] for i in data])
self.setDefaultTitle()
# Add to series
for d in data:
self.series.append(d[0], d[1])
# Hide little slices' labels
self.series.setLabelsVisible(True)
for slc in self.series.slices():
if slc.angleSpan() < 5:
slc.setLabelVisible(False)
slc.setLabelArmLengthFactor(0.05)
self.chart.addSeries(self.series)
# Signals and functionality
self.series.hovered.connect(self.selectSlice)
def selectSlice(self, _slice, state):
""" Highlight selected slice """
font = ChartTitleFont()
if state:
font.setPointSize(20)
_slice.setLabelVisible(True)
self.chart.setTitle(
f"{int(_slice.value())} {confighandler.get_fiat_currency().upper()} {round(_slice.percentage()*100,1)}%")
else:
font.setBold(False)
if _slice.angleSpan() < 5:
_slice.setLabelVisible(False)
_slice.setExploded(False)
self.setDefaultTitle()
_slice.setLabelFont(font)
def setDefaultTitle(self):
""" Sets title as total balance from all pie slices """
self.chart.setTitle(
f"{int(self.total)} {confighandler.get_fiat_currency().upper()}")
font = ChartTitleFont(fontsize=20)
self.chart.setTitleFont(font)
| [
"PyQt5.QtGui.QColor",
"portfolio.db.fdbhandler.balances.get_total_balance_all_accounts",
"PyQt5.QtChart.QPieSeries",
"portfolio.db.cdbhandler.cbalances.get_all_accounts_with_amount_fiat",
"portfolio.gui.ui_components.fonts.ChartTitleFont",
"portfolio.db.cdbhandler.cbalances.get_total_balance_all_accounts_fiat",
"portfolio.db.fdbhandler.balances.get_all_accounts",
"portfolio.utils.confighandler.get_fiat_currency",
"PyQt5.QtChart.QChart"
] | [((590, 598), 'PyQt5.QtChart.QChart', 'QChart', ([], {}), '()\n', (596, 598), False, 'from PyQt5.QtChart import QChartView, QChart, QPieSeries\n'), ((1618, 1630), 'PyQt5.QtChart.QPieSeries', 'QPieSeries', ([], {}), '()\n', (1628, 1630), False, 'from PyQt5.QtChart import QChartView, QChart, QPieSeries\n'), ((2903, 2919), 'portfolio.gui.ui_components.fonts.ChartTitleFont', 'ChartTitleFont', ([], {}), '()\n', (2917, 2919), False, 'from portfolio.gui.ui_components.fonts import ChartTitleFont\n'), ((3624, 3651), 'portfolio.gui.ui_components.fonts.ChartTitleFont', 'ChartTitleFont', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (3638, 3651), False, 'from portfolio.gui.ui_components.fonts import ChartTitleFont\n'), ((910, 925), 'PyQt5.QtGui.QColor', 'QColor', (['"""white"""'], {}), "('white')\n", (916, 925), False, 'from PyQt5.QtGui import QBrush, QColor, QPainter\n'), ((1696, 1723), 'portfolio.db.fdbhandler.balances.get_all_accounts', 'balances.get_all_accounts', ([], {}), '()\n', (1721, 1723), False, 'from portfolio.db.fdbhandler import balances\n'), ((1739, 1784), 'portfolio.db.cdbhandler.cbalances.get_all_accounts_with_amount_fiat', 'cbalances.get_all_accounts_with_amount_fiat', ([], {}), '()\n', (1782, 1784), False, 'from portfolio.db.cdbhandler import cbalances\n'), ((1838, 1865), 'portfolio.db.fdbhandler.balances.get_all_accounts', 'balances.get_all_accounts', ([], {}), '()\n', (1863, 1865), False, 'from portfolio.db.fdbhandler import balances\n'), ((1917, 1962), 'portfolio.db.cdbhandler.cbalances.get_all_accounts_with_amount_fiat', 'cbalances.get_all_accounts_with_amount_fiat', ([], {}), '()\n', (1960, 1962), False, 'from portfolio.db.cdbhandler import cbalances\n'), ((3564, 3597), 'portfolio.utils.confighandler.get_fiat_currency', 'confighandler.get_fiat_currency', ([], {}), '()\n', (3595, 3597), False, 'from portfolio.utils import confighandler\n'), ((2061, 2102), 'portfolio.db.fdbhandler.balances.get_total_balance_all_accounts', 'balances.get_total_balance_all_accounts', ([], {}), '()\n', (2100, 2102), False, 'from portfolio.db.fdbhandler import balances\n'), ((2127, 2174), 'portfolio.db.cdbhandler.cbalances.get_total_balance_all_accounts_fiat', 'cbalances.get_total_balance_all_accounts_fiat', ([], {}), '()\n', (2172, 2174), False, 'from portfolio.db.cdbhandler import cbalances\n'), ((3087, 3120), 'portfolio.utils.confighandler.get_fiat_currency', 'confighandler.get_fiat_currency', ([], {}), '()\n', (3118, 3120), False, 'from portfolio.utils import confighandler\n'), ((2018, 2051), 'portfolio.utils.confighandler.get_fiat_currency', 'confighandler.get_fiat_currency', ([], {}), '()\n', (2049, 2051), False, 'from portfolio.utils import confighandler\n')] |
#coding=utf8
import re
import itchat
from itchat.content import *
'''
0.0.1版本
功能:
1.匹配群聊关键字 说 ,然后回复接受到的消息
'''
# 群聊监控
@itchat.msg_register(TEXT, isGroupChat = True)
def groupchat_reply(msg):
room_name = itchat.search_chatrooms(userName=msg[u'FromUserName'])
print(u"来自-%s-群聊消息|%s:%s"%(room_name[ u'NickName'],msg['ActualNickName'],msg['Text']))
# 匹配说关键字
if(re.match(u'^说', msg['Text'])):
itchat.send_msg(msg['Text'].replace(u'说', ''),msg[u'FromUserName'])
if(re.match(u'^搜', msg['Text'])):
itchat.send_msg(u'电影名xxx',msg[u'FromUserName'])
itchat.auto_login(hotReload=True,enableCmdQR=True)
itchat.run(debug=True)
| [
"itchat.msg_register",
"re.match",
"itchat.search_chatrooms",
"itchat.send_msg",
"itchat.auto_login",
"itchat.run"
] | [((120, 163), 'itchat.msg_register', 'itchat.msg_register', (['TEXT'], {'isGroupChat': '(True)'}), '(TEXT, isGroupChat=True)\n', (139, 163), False, 'import itchat\n'), ((578, 629), 'itchat.auto_login', 'itchat.auto_login', ([], {'hotReload': '(True)', 'enableCmdQR': '(True)'}), '(hotReload=True, enableCmdQR=True)\n', (595, 629), False, 'import itchat\n'), ((630, 652), 'itchat.run', 'itchat.run', ([], {'debug': '(True)'}), '(debug=True)\n', (640, 652), False, 'import itchat\n'), ((208, 262), 'itchat.search_chatrooms', 'itchat.search_chatrooms', ([], {'userName': "msg[u'FromUserName']"}), "(userName=msg[u'FromUserName'])\n", (231, 262), False, 'import itchat\n'), ((375, 403), 're.match', 're.match', (['u"""^说"""', "msg['Text']"], {}), "(u'^说', msg['Text'])\n", (383, 403), False, 'import re\n'), ((490, 518), 're.match', 're.match', (['u"""^搜"""', "msg['Text']"], {}), "(u'^搜', msg['Text'])\n", (498, 518), False, 'import re\n'), ((529, 577), 'itchat.send_msg', 'itchat.send_msg', (['u"""电影名xxx"""', "msg[u'FromUserName']"], {}), "(u'电影名xxx', msg[u'FromUserName'])\n", (544, 577), False, 'import itchat\n')] |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
####################################################################################################
import io
####################################################################################################
def TailFile(p_FileName, p_BufferSize=io.DEFAULT_BUFFER_SIZE, p_Encoding='utf8', p_Separator = '\n', p_KeepSeparator=True):
'''
Iterator used to read a file starting with the end, and proceeding backwards.
p_FileName : the full path to the file to be read backwards
p_BufferSize : the size of the file chunk to read into memory for processing
p_Encoding : the encoding of the file, default is utf-8
p_Separator : the character(s) used to separate the stream. Usually either newline or space.
p_KeepNewLine : keep the newline character at the end of the line (to be compatible with readline() )
'''
l_Separator = bytes(p_Separator, p_Encoding)
l_KeepSeparator = l_Separator if p_KeepSeparator else b''
l_Fragment = bytearray()
with open(p_FileName, 'rb') as l_File:
l_File.seek(0, io.SEEK_END)
l_Blocks = l_File.tell() // p_BufferSize
while l_Blocks >= 0:
l_File.seek(l_Blocks * p_BufferSize, io.SEEK_SET)
l_BufferContent = l_File.read(p_BufferSize) # might overshoot at first read
l_Blocks -= 1
if not l_Separator in l_BufferContent:
l_Fragment = l_BufferContent + l_Fragment
else:
l_BufferFragments = l_BufferContent.split(l_Separator)
yield str(l_BufferFragments[-1] + l_Fragment + l_KeepSeparator, p_Encoding)
for l_BufferFragment in reversed(l_BufferFragments[1:-1]):
yield str(l_BufferFragment + l_KeepSeparator, p_Encoding)
l_Fragment = bytearray(l_BufferFragments[0])
yield str(l_Fragment, p_Encoding)
####################################################################################################
if __name__ == '__main__':
import os
import sys
import time
C_TestFileName = 'tmp.txt'
C_TestBufferSize = 9182
if len(sys.argv) != 2:
print ('Usage: python3 tailfile.py <testfile>')
sys.exit(0)
if True: # benchmark
l_Moment1 = time.time()
l_Count1 = 0
with open(sys.argv[1], 'r') as l_File:
for l_Line in l_File:
l_Count1 += 1
l_Moment2 = time.time()
l_Count2 = 0
for l_Line in TailFile(sys.argv[1], p_BufferSize=C_TestBufferSize):
l_Count2 += 1
l_Moment3 = time.time()
print ('{}: {} {}'.format(l_Count1, (l_Moment2 - l_Moment1), (l_Moment3 - l_Moment2)))
else: # test algorithm
# write reversed content to tmp file
with open(C_TestFileName, 'w') as l_TempFile:
for l_Line in TailFile(sys.argv[1], C_TestBufferSize, p_Separator='\n'):
l_TempFile.write(l_Line)
# print (l_Line, end='')
# read and compare original file to reversed tmp file, should be identical
for l_Line, l_Copy in zip(open(sys.argv[1], 'r'), TailFile(C_TestFileName, C_TestBufferSize)):
if l_Line != l_Copy:
print ('|'+l_Line+'|\n---\n|'+l_Copy+'|')
break
os.remove(C_TestFileName) | [
"os.remove",
"sys.exit",
"time.time"
] | [((2399, 2410), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2407, 2410), False, 'import sys\n'), ((2461, 2472), 'time.time', 'time.time', ([], {}), '()\n', (2470, 2472), False, 'import time\n'), ((2631, 2642), 'time.time', 'time.time', ([], {}), '()\n', (2640, 2642), False, 'import time\n'), ((2796, 2807), 'time.time', 'time.time', ([], {}), '()\n', (2805, 2807), False, 'import time\n'), ((3550, 3575), 'os.remove', 'os.remove', (['C_TestFileName'], {}), '(C_TestFileName)\n', (3559, 3575), False, 'import os\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import textwrap
from typing import Dict, Optional
import libcst as cst
from libcst.codemod import CodemodContext, VisitorBasedCodemodCommand
presets_per_formatter: Dict[str, Dict[str, int]] = {
"black": {
"parameter_count": 1,
"argument_count": 2,
},
"yapf": {
"parameter_count": 2,
"argument_count": 2,
},
}
class AddTrailingCommas(VisitorBasedCodemodCommand):
DESCRIPTION: str = textwrap.dedent(
"""
Codemod that adds trailing commas to arguments in function
headers and function calls.
The idea is that both the black and yapf autoformatters will
tend to split headers and function calls so that there
is one parameter / argument per line if there is a trailing
comma:
- Black will always separate them by line
- Yapf appears to do so whenever there are at least two arguments
Applying this codemod (and then an autoformatter) may make
it easier to read function definitions and calls
"""
)
def __init__(
self,
context: CodemodContext,
formatter: str = "black",
parameter_count: Optional[int] = None,
argument_count: Optional[int] = None,
) -> None:
super().__init__(context)
presets = presets_per_formatter.get(formatter)
if presets is None:
raise ValueError(
f"Unknown formatter {formatter!r}. Presets exist for "
+ ", ".join(presets_per_formatter.keys())
)
self.parameter_count: int = parameter_count or presets["parameter_count"]
self.argument_count: int = argument_count or presets["argument_count"]
@staticmethod
def add_args(arg_parser: argparse.ArgumentParser) -> None:
arg_parser.add_argument(
"--formatter",
dest="formatter",
metavar="FORMATTER",
help="Formatter to target (e.g. yapf or black)",
type=str,
default="black",
)
arg_parser.add_argument(
"--paramter-count",
dest="parameter_count",
metavar="PARAMETER_COUNT",
help="Minimal number of parameters for us to add trailing comma",
type=int,
default=None,
)
arg_parser.add_argument(
"--argument-count",
dest="argument_count",
metavar="ARGUMENT_COUNT",
help="Minimal number of arguments for us to add trailing comma",
type=int,
default=None,
)
def leave_Parameters(
self,
original_node: cst.Parameters,
updated_node: cst.Parameters,
) -> cst.Parameters:
skip = (
#
self.parameter_count is None
or len(updated_node.params) < self.parameter_count
or (
len(updated_node.params) == 1
and updated_node.params[0].name.value in {"self", "cls"}
)
)
if skip:
return updated_node
else:
last_param = updated_node.params[-1]
return updated_node.with_changes(
params=(
*updated_node.params[:-1],
last_param.with_changes(comma=cst.Comma()),
),
)
def leave_Call(
self,
original_node: cst.Call,
updated_node: cst.Call,
) -> cst.Call:
if len(updated_node.args) < self.argument_count:
return updated_node
else:
last_arg = updated_node.args[-1]
return updated_node.with_changes(
args=(
*updated_node.args[:-1],
last_arg.with_changes(comma=cst.Comma()),
),
)
| [
"textwrap.dedent",
"libcst.Comma"
] | [((634, 1244), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Codemod that adds trailing commas to arguments in function\n headers and function calls.\n\n The idea is that both the black and yapf autoformatters will\n tend to split headers and function calls so that there\n is one parameter / argument per line if there is a trailing\n comma:\n - Black will always separate them by line\n - Yapf appears to do so whenever there are at least two arguments\n\n Applying this codemod (and then an autoformatter) may make\n it easier to read function definitions and calls\n """'], {}), '(\n """\n Codemod that adds trailing commas to arguments in function\n headers and function calls.\n\n The idea is that both the black and yapf autoformatters will\n tend to split headers and function calls so that there\n is one parameter / argument per line if there is a trailing\n comma:\n - Black will always separate them by line\n - Yapf appears to do so whenever there are at least two arguments\n\n Applying this codemod (and then an autoformatter) may make\n it easier to read function definitions and calls\n """\n )\n', (649, 1244), False, 'import textwrap\n'), ((3502, 3513), 'libcst.Comma', 'cst.Comma', ([], {}), '()\n', (3511, 3513), True, 'import libcst as cst\n'), ((3978, 3989), 'libcst.Comma', 'cst.Comma', ([], {}), '()\n', (3987, 3989), True, 'import libcst as cst\n')] |
"""
Tests with the Izhikevich neuron model.
"""
import numpy as np
import matplotlib.pyplot as plt
import pyNN.nest as sim
from pyNN.utility.plotting import Figure, Panel
# === Configure the simulator ================================================
duration = 100
dt = 0.01
sim.setup(timestep=dt, min_delay=0.1)
# === Build and instrument the network =======================================
phasic_spiking = {'a': 0.02, 'b': 0.25, 'c': -65, 'd': 6}
class_2 = {'a': 0.2, 'b': 0.26, 'c': -65, 'd': 0}
params = class_2
n = 100
v_init = -64
input_currents = 0.0005 * np.logspace(-4, 6, n, base=np.e)
neurons = sim.Population(n, sim.Izhikevich(i_offset=input_currents, **params))
neurons.record(['v', 'u', 'spikes'])
neurons.initialize(v=v_init, u=-params['b']*v_init)
# === Run the simulation =====================================================
sim.run(duration)
# === Save the results, optionally plot a figure =============================
data = neurons.get_data().segments[0]
first_spiketimes = []
rates = []
for spiketrain in data.spiketrains:
if len(spiketrain) == 0:
first_spiketimes.append(np.infty)
else:
first_spiketimes.append(spiketrain[0])
rates.append(np.count_nonzero(spiketrain) / duration)
plt.scatter(input_currents, 1 / np.array(first_spiketimes),
label='inverse ttfs')
plt.scatter(input_currents, rates, label='avg spikerate')
plt.legend()
plt.savefig('FI')
v = data.filter(name="v")[0]
u = data.filter(name="u")[0]
Figure(Panel(v, ylabel="Membrane potential (mV)", xticks=True,
xlabel="Time (ms)", yticks=True),
Panel(u, ylabel="u variable (units?)")).save('mem')
# === Clean up and quit ========================================================
sim.end()
| [
"pyNN.nest.run",
"numpy.count_nonzero",
"pyNN.nest.setup",
"pyNN.nest.end",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.logspace",
"numpy.array",
"pyNN.utility.plotting.Panel",
"matplotlib.pyplot.savefig",
"pyNN.nest.Izhikevich"
] | [((281, 318), 'pyNN.nest.setup', 'sim.setup', ([], {'timestep': 'dt', 'min_delay': '(0.1)'}), '(timestep=dt, min_delay=0.1)\n', (290, 318), True, 'import pyNN.nest as sim\n'), ((857, 874), 'pyNN.nest.run', 'sim.run', (['duration'], {}), '(duration)\n', (864, 874), True, 'import pyNN.nest as sim\n'), ((1345, 1402), 'matplotlib.pyplot.scatter', 'plt.scatter', (['input_currents', 'rates'], {'label': '"""avg spikerate"""'}), "(input_currents, rates, label='avg spikerate')\n", (1356, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1415), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1413, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1416, 1433), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""FI"""'], {}), "('FI')\n", (1427, 1433), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1754), 'pyNN.nest.end', 'sim.end', ([], {}), '()\n', (1752, 1754), True, 'import pyNN.nest as sim\n'), ((574, 606), 'numpy.logspace', 'np.logspace', (['(-4)', '(6)', 'n'], {'base': 'np.e'}), '(-4, 6, n, base=np.e)\n', (585, 606), True, 'import numpy as np\n'), ((635, 684), 'pyNN.nest.Izhikevich', 'sim.Izhikevich', ([], {'i_offset': 'input_currents'}), '(i_offset=input_currents, **params)\n', (649, 684), True, 'import pyNN.nest as sim\n'), ((1283, 1309), 'numpy.array', 'np.array', (['first_spiketimes'], {}), '(first_spiketimes)\n', (1291, 1309), True, 'import numpy as np\n'), ((1209, 1237), 'numpy.count_nonzero', 'np.count_nonzero', (['spiketrain'], {}), '(spiketrain)\n', (1225, 1237), True, 'import numpy as np\n'), ((1500, 1592), 'pyNN.utility.plotting.Panel', 'Panel', (['v'], {'ylabel': '"""Membrane potential (mV)"""', 'xticks': '(True)', 'xlabel': '"""Time (ms)"""', 'yticks': '(True)'}), "(v, ylabel='Membrane potential (mV)', xticks=True, xlabel='Time (ms)',\n yticks=True)\n", (1505, 1592), False, 'from pyNN.utility.plotting import Figure, Panel\n'), ((1610, 1648), 'pyNN.utility.plotting.Panel', 'Panel', (['u'], {'ylabel': '"""u variable (units?)"""'}), "(u, ylabel='u variable (units?)')\n", (1615, 1648), False, 'from pyNN.utility.plotting import Figure, Panel\n')] |
import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--indexes', dest='inFile', required=True,
help='Path to indexes file, one per line.')
parser.add_argument('--config', dest='config', required=True)
o=parser.parse_args()
outFile = open(f"{o.config}.summary.csv",'w')
indexes = []
indexFile = open(o.inFile, 'r')
for line in indexFile:
indexes.append(line.strip())
indexFile.close()
outFile.write("RunID,Index,Raw reads,SSCS reads,Mapped SSCS,DCS reads,Mapped DCS,% mapped SSCS,% mapped DCS,Raw/SSCS,SSCS/DCS,Peak Family Size,Max Family Size,Mean Insert Size,SSCS On Target,DCS On Target,DCS Mean Depth,DCS Max Depth,DCS Uncovered Target,Nucleotides Sequenced,A's Sequenced,T's Sequenced,C's Sequenced,G's Sequenced,Mutations,Mutation Frequency,A>T,A>C,A>G,T>A,T>C,T>G,C>A,C>T,C>G,G>A,G>T,G>C,ins,dels\n")
for index in indexes:
print(f"Index {index}")
print("Reading config")
# Get the run ID from the config file
runID=""
c=""
C=""
d=""
configFile = open(f"{index}/{index}_config.sh", 'r')
for line in configFile:
if "RUN_ID=" in line:
runID = line.strip().split('=')[1].strip('"')
elif "minClonal=" in line:
c=line.strip().split('=')[1].split()[0]
elif "maxClonal=" in line:
C=line.strip().split('=')[1].split()[0]
elif "minDepth=" in line:
d=line.strip().split('=')[1].split()[0]
configFile.close()
print("Getting read counts")
# get read counts
# Read tagstats files:
rawFlagstats = open(f"{index}/Stats/data/{runID}.temp.sort.flagstats.txt", 'r').readlines()
sscsFlagstats = open(f"{index}/Stats/data/{runID}_mem.sscs.sort.flagstats.txt", 'r').readlines()
dcsFlagstats = open(f"{index}/Stats/data/{runID}_mem.dcs.sort.flagstats.txt", 'r').readlines()
rawReads = float(rawFlagstats[0].split()[0])
#rawReads = float(pysam.flagstat(f"{index}/{runID}.temp.sort.bam").split('\n')[0].split()[0])
#sscsFlagstat=pysam.flagstat(f"{index}/{runID}_mem.sscs.sort.bam").split('\n')
sscsReads=float(sscsFlagstats[0].split()[0])
mappedSscs=float(sscsFlagstats[4].split()[0])
# ~ dcsFlagstat=pysam.flagstat(f"{index}/{runID}_mem.dcs.sort.bam").split('\n')
dcsReads=float(dcsFlagstats[0].split()[0])
mappedDcs=float(dcsFlagstats[4].split()[0])
print("Processing Tagstats")
# get tagstats numbers
tagstatsFile = open(f"{index}/Stats/data/{runID}.tagstats.txt", 'r')
lastProportion=1
peakProportion = 0
peakSize = 1
maxSize=0
for line in tagstatsFile:
if float(line.split()[2]) <= lastProportion:
lastProportion = float(line.split()[2])
elif float(line.split()[2]) >= peakProportion:
lastProportion = 0
peakSize = line.split()[0]
peakProportion = float(line.split()[2])
maxSize = line.split()[0]
tagstatsFile.close()
sscsOnTarget="NA"
# read depth file:
print("Processing Depth")
depthFile = open(f"{index}/Stats/data/{runID}.dcs.region.mutpos.vcf_depth.txt", 'r')
totDepth = 0
numLocs = 0
dcsMaxDepth = 0
for line in depthFile:
if "#" not in line:
totDepth += int(line.split('\t')[3])
numLocs += 1
dcsMaxDepth = max(dcsMaxDepth, int(line.split('\t')[3]))
dcsOnTarget="NA"
if numLocs != 0:
dcsMeanDepth=totDepth / numLocs
else:
dcsMeanDepth=0
dcsUncovered="NA"
depthFile.close()
# insert size file
print("Processing Insert Size")
insertSizeFile = open(f"{index}/Stats/data/{runID}.dcs.iSize_Metrics.txt", 'r')
totInsertSize = 0
numInsertReads = 0
line = next(insertSizeFile)
while "## HISTOGRAM" not in line:
line = next(insertSizeFile)
contIter = True
line = next(insertSizeFile)
while contIter:
try:
line = next(insertSizeFile)
if line.strip() != "":
linebins = [int(x) for x in line.strip().split('\t')]
totInsertSize += linebins[0] * linebins[1]
numInsertReads += linebins[1]
except StopIteration:
contIter = False
if numInsertReads == 0:
meanInsertSize = "N/A"
else:
meanInsertSize = totInsertSize / numInsertReads
print("Processing countmuts")
# get countmuts data
sys.stderr.write(f"{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\n")
cmFile = open(f"{index}/Final/dcs/{runID}.dcs.countmuts.csv", 'r')
AsSeq=""
AtoT=""
AtoC=""
AtoG=""
TsSeq=""
TtoA=""
TtoC=""
TtoG=""
CsSeq=""
CtoA=""
CtoT=""
CtoG=""
GsSeq=""
GtoA=""
GtoT=""
GtoC=""
totalNt=""
totalMuts=""
ins=""
dels=""
for line in cmFile:
if "##" not in line and "OVERALL" in line:
linebins = line.strip().split(',')
if "A>T" in line:
AtoT=linebins[4]
AsSeq=linebins[5]
elif "A>C" in line:
AtoC=linebins[4]
elif "A>G" in line:
AtoG=linebins[4]
elif "T>A" in line:
TtoA=linebins[4]
TsSeq=linebins[5]
elif "T>C" in line:
TtoC=linebins[4]
elif "T>G" in line:
TtoG=linebins[4]
elif "C>A" in line:
CtoA=linebins[4]
CsSeq=linebins[5]
elif "C>T" in line:
CtoT=linebins[4]
elif "C>G" in line:
CtoG=linebins[4]
elif "G>A" in line:
GtoA=linebins[4]
GsSeq=linebins[5]
elif "G>T" in line:
GtoT=linebins[4]
elif "G>C" in line:
GtoC=linebins[4]
elif "Total" in line and "SNV" in line:
totalNt = float(linebins[5])
totalMuts = float(linebins[4])
elif "Total" in line and "INS" in line:
ins=linebins[4]
elif "Total" in line and "DEL" in line:
dels=linebins[4]
cmFile.close()
if sscsReads > 0:
percentMappedSSCS = mappedSscs/sscsReads
rawPerSSCS = rawReads/sscsReads
else:
percentMappedSSCS = 0
rawPerSSCS = 0
if dcsReads > 0:
percentMappedDCS = mappedDcs/dcsReads
sscsPerDCS = sscsReads/dcsReads
else:
percentMappedDCS = 0
sscsPerDCS = 0
if totalNt > 0:
mutFreq = totalMuts/totalNt
else:
mutFreq = 0
outFile.write(
f"{runID},"
f"{index},{rawReads},{sscsReads},{mappedSscs},{dcsReads},{mappedDcs},"
f"{percentMappedSSCS},{percentMappedDCS},{rawPerSSCS},{sscsPerDCS},"
f"{peakSize},{maxSize},{meanInsertSize},{sscsOnTarget},{dcsOnTarget},{dcsMeanDepth},"
f"{dcsMaxDepth},{dcsUncovered},{totalNt},{AsSeq},{TsSeq},{CsSeq},{GsSeq},{totalMuts},{mutFreq},"
f"{AtoT},{AtoC},{AtoG},{TtoA},{TtoC},{TtoG},{CtoA},{CtoT},{CtoG},{GtoA},"
f"{GtoT},{GtoC},{ins},{dels}\n"
)
outFile.close()
| [
"sys.stderr.write",
"argparse.ArgumentParser"
] | [((57, 73), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (71, 73), False, 'from argparse import ArgumentParser\n'), ((4423, 4533), 'sys.stderr.write', 'sys.stderr.write', (['f"""{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\n"""'], {}), "(\n f'{index}/{runID}.dcs.filt.no_overlap.region.c{c}-{C}.d{d}.unique.countmuts.txt\\n'\n )\n", (4439, 4533), False, 'import sys\n')] |
from pathlib import Path
from setuptools import find_packages, setup
long_description: str = (Path(__file__).parent.resolve() / "README.md").read_text(
encoding="utf-8"
)
setup(
name="crawler",
version="0.0.0",
description="A Web Crawler",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/darkslab/Crawler",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
],
keywords="crawler",
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.8, <4",
install_requires=[
"click==7.1.2",
"aiohttp==3.8.1",
"yarl==1.7.2",
],
entry_points={
"console_scripts": [
"crawler=crawler:cli",
],
},
)
| [
"pathlib.Path",
"setuptools.find_packages"
] | [((831, 857), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (844, 857), False, 'from setuptools import find_packages, setup\n'), ((96, 110), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (100, 110), False, 'from pathlib import Path\n')] |
import json
import os
import requests
data_file = './description.json'
with open(data_file) as f:
data = json.load(f)
home_page = data['homepage']
seqs = data['sequences']
for v in seqs:
link = '%s%s' % (home_page,v['annotations']['url'])
print('download %s' % link)
os.system('wget %s -O %s_ann.zip' % (link, v['name']))
link = '%s%s' % (home_page,v['channels']['color']['url'])
print('download %s' % link)
os.system('wget %s -O %s_chn.zip' % (link, v['name']))
| [
"json.load",
"os.system"
] | [((109, 121), 'json.load', 'json.load', (['f'], {}), '(f)\n', (118, 121), False, 'import json\n'), ((285, 339), 'os.system', 'os.system', (["('wget %s -O %s_ann.zip' % (link, v['name']))"], {}), "('wget %s -O %s_ann.zip' % (link, v['name']))\n", (294, 339), False, 'import os\n'), ((438, 492), 'os.system', 'os.system', (["('wget %s -O %s_chn.zip' % (link, v['name']))"], {}), "('wget %s -O %s_chn.zip' % (link, v['name']))\n", (447, 492), False, 'import os\n')] |
import hyperion
import time
import colorsys
# Get the parameters
rotationTime = float(hyperion.args.get('rotation-time', 2.0))
colorOne = hyperion.args.get('color_one', (255,0,0))
colorTwo = hyperion.args.get('color_two', (0,0,255))
colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2)
reverse = bool(hyperion.args.get('reverse', False))
# Check parameters
rotationTime = max(0.1, rotationTime)
colorsCount = min(hyperion.ledCount/2, colorsCount)
# Initialize the led data
hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0)
hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0)
colorBlack = (0,0,0)
ledData = bytearray()
for i in range(hyperion.ledCount):
if i <= colorsCount:
rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2])
elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount):
rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2])
else:
rgb = colorBlack
ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2])))
# Calculate the sleep time and rotation increment
increment = 3
sleepTime = rotationTime / hyperion.ledCount
while sleepTime < 0.05:
increment *= 2
sleepTime *= 2
increment %= hyperion.ledCount
# Switch direction if needed
if reverse:
increment = -increment
# Start the write data loop
while not hyperion.abort():
hyperion.setColor(ledData)
ledData = ledData[-increment:] + ledData[:-increment]
time.sleep(sleepTime)
| [
"colorsys.rgb_to_hsv",
"colorsys.hsv_to_rgb",
"hyperion.abort",
"hyperion.setColor",
"time.sleep",
"hyperion.args.get"
] | [((139, 182), 'hyperion.args.get', 'hyperion.args.get', (['"""color_one"""', '(255, 0, 0)'], {}), "('color_one', (255, 0, 0))\n", (156, 182), False, 'import hyperion\n'), ((192, 235), 'hyperion.args.get', 'hyperion.args.get', (['"""color_two"""', '(0, 0, 255)'], {}), "('color_two', (0, 0, 255))\n", (209, 235), False, 'import hyperion\n'), ((248, 304), 'hyperion.args.get', 'hyperion.args.get', (['"""colors_count"""', '(hyperion.ledCount / 2)'], {}), "('colors_count', hyperion.ledCount / 2)\n", (265, 304), False, 'import hyperion\n'), ((499, 585), 'colorsys.rgb_to_hsv', 'colorsys.rgb_to_hsv', (['(colorOne[0] / 255.0)', '(colorOne[1] / 255.0)', '(colorOne[2] / 255.0)'], {}), '(colorOne[0] / 255.0, colorOne[1] / 255.0, colorOne[2] /\n 255.0)\n', (518, 585), False, 'import colorsys\n'), ((583, 669), 'colorsys.rgb_to_hsv', 'colorsys.rgb_to_hsv', (['(colorTwo[0] / 255.0)', '(colorTwo[1] / 255.0)', '(colorTwo[2] / 255.0)'], {}), '(colorTwo[0] / 255.0, colorTwo[1] / 255.0, colorTwo[2] /\n 255.0)\n', (602, 669), False, 'import colorsys\n'), ((87, 126), 'hyperion.args.get', 'hyperion.args.get', (['"""rotation-time"""', '(2.0)'], {}), "('rotation-time', 2.0)\n", (104, 126), False, 'import hyperion\n'), ((318, 353), 'hyperion.args.get', 'hyperion.args.get', (['"""reverse"""', '(False)'], {}), "('reverse', False)\n", (335, 353), False, 'import hyperion\n'), ((1354, 1370), 'hyperion.abort', 'hyperion.abort', ([], {}), '()\n', (1368, 1370), False, 'import hyperion\n'), ((1373, 1399), 'hyperion.setColor', 'hyperion.setColor', (['ledData'], {}), '(ledData)\n', (1390, 1399), False, 'import hyperion\n'), ((1456, 1477), 'time.sleep', 'time.sleep', (['sleepTime'], {}), '(sleepTime)\n', (1466, 1477), False, 'import time\n'), ((768, 814), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hsv1[0]', 'hsv1[1]', 'hsv1[2]'], {}), '(hsv1[0], hsv1[1], hsv1[2])\n', (787, 814), False, 'import colorsys\n'), ((903, 949), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hsv2[0]', 'hsv2[1]', 'hsv2[2]'], {}), '(hsv2[0], hsv2[1], hsv2[2])\n', (922, 949), False, 'import colorsys\n')] |
"""
---
title: Attention with Linear Biases (ALiBi) Experiment
summary: This experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset.
---
# [Attention with Linear Biases (ALiBi)](index.html) Experiment
This is an annotated PyTorch experiment to train a [ALiBi model](index.html).
This is based on
[our GPT model](../gpt/index.html).
[](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925)
"""
import torch
from torch.utils.data import DataLoader
from labml import experiment, tracker
from labml.configs import option, calculate
from labml_helpers.datasets.text import SequentialUnBatchedDataset
from labml_nn.transformers.alibi import AlibiMultiHeadAttention
from labml_nn.experiments.nlp_autoregression import transpose_batch
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.gpt import Configs as GPTConfigs
class Configs(GPTConfigs):
"""
## Configurations
We extend [GPT configurations](../gpt/index.html) and change the attention mechanism.
"""
# ALiBi based transformer (defined below)
transformer: TransformerConfigs = 'GPT_ALiBi'
# Longer validation set
valid_seq_len: int = 128
valid_loader = 'shuffled_longer_valid_loader'
def other_metrics(self, output: torch.Tensor, target: torch.Tensor):
"""
Log losses at the initial and final tokens
"""
# If there are more tokens that the training sequence length (during validation),
if self.seq_len < output.shape[0]:
# Log the loss at training sequence length
tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1]))
# Log the loss at the first token
tracker.add(f'loss.0.', self.loss_func(output[0], target[0]))
# Log the loss at the final token
tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1]))
def _alibi_mha(c: TransformerConfigs):
"""
Create an ALiBi attention module
"""
return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
# Set all attention mechanisms to ALiBi
calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)
@option(Configs.valid_loader)
def shuffled_longer_valid_loader(c: Configs):
"""
Shuffled validation data loader with `valid_seq_len` sequence length
"""
return DataLoader(SequentialUnBatchedDataset(text=c.text.valid,
dataset=c.text,
seq_len=c.valid_seq_len),
batch_size=c.batch_size,
collate_fn=transpose_batch,
shuffle=True)
@option(Configs.transformer, 'GPT_ALiBi')
def _transformer_configs(c: Configs):
"""
### ALiBi based Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# GPT uses GELU activation for position wise feedforward
conf.ffn.activation = 'GELU'
# ALiBi doesn't use positional embeddings
conf.src_embed = 'no_pos'
conf.tgt_embed = 'no_pos'
# Set all attention mechanisms to ALiBi
conf.encoder_attn = 'alibi_mha'
conf.decoder_attn = 'alibi_mha'
conf.decoder_mem_attn = 'alibi_mha'
#
return conf
def main():
# Create experiment
experiment.create(name="gpt_alibi")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# 'text': 'tiny_shakespeare_no_split',
# Use a context size of $128$
'seq_len': 64,
# Use a context size of $128$
'valid_seq_len': 80,
# Train for $32$ epochs
'epochs': 128,
# Batch size $128$
'batch_size': 128,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Transformer configurations
'transformer.d_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 8,
'transformer.n_layers': 4,
'transformer.dropout': 0.1,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| [
"labml.configs.option",
"labml_nn.transformers.TransformerConfigs",
"labml_nn.transformers.alibi.AlibiMultiHeadAttention",
"labml.experiment.create",
"labml.experiment.configs",
"labml.experiment.add_pytorch_models",
"labml_helpers.datasets.text.SequentialUnBatchedDataset",
"labml.experiment.start",
"labml.configs.calculate"
] | [((2260, 2327), 'labml.configs.calculate', 'calculate', (['TransformerConfigs.encoder_attn', '"""alibi_mha"""', '_alibi_mha'], {}), "(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)\n", (2269, 2327), False, 'from labml.configs import option, calculate\n'), ((2328, 2395), 'labml.configs.calculate', 'calculate', (['TransformerConfigs.decoder_attn', '"""alibi_mha"""', '_alibi_mha'], {}), "(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)\n", (2337, 2395), False, 'from labml.configs import option, calculate\n'), ((2396, 2467), 'labml.configs.calculate', 'calculate', (['TransformerConfigs.decoder_mem_attn', '"""alibi_mha"""', '_alibi_mha'], {}), "(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)\n", (2405, 2467), False, 'from labml.configs import option, calculate\n'), ((2471, 2499), 'labml.configs.option', 'option', (['Configs.valid_loader'], {}), '(Configs.valid_loader)\n', (2477, 2499), False, 'from labml.configs import option, calculate\n'), ((2979, 3019), 'labml.configs.option', 'option', (['Configs.transformer', '"""GPT_ALiBi"""'], {}), "(Configs.transformer, 'GPT_ALiBi')\n", (2985, 3019), False, 'from labml.configs import option, calculate\n'), ((2148, 2217), 'labml_nn.transformers.alibi.AlibiMultiHeadAttention', 'AlibiMultiHeadAttention', (['c.n_heads', 'c.d_model'], {'dropout_prob': 'c.dropout'}), '(c.n_heads, c.d_model, dropout_prob=c.dropout)\n', (2171, 2217), False, 'from labml_nn.transformers.alibi import AlibiMultiHeadAttention\n'), ((3234, 3254), 'labml_nn.transformers.TransformerConfigs', 'TransformerConfigs', ([], {}), '()\n', (3252, 3254), False, 'from labml_nn.transformers import TransformerConfigs\n'), ((3814, 3849), 'labml.experiment.create', 'experiment.create', ([], {'name': '"""gpt_alibi"""'}), "(name='gpt_alibi')\n", (3831, 3849), False, 'from labml import experiment, tracker\n'), ((3926, 4302), 'labml.experiment.configs', 'experiment.configs', (['conf', "{'tokenizer': 'character', 'prompt_separator': '', 'prompt': 'It is ',\n 'text': 'tiny_shakespeare', 'seq_len': 64, 'valid_seq_len': 80,\n 'epochs': 128, 'batch_size': 128, 'inner_iterations': 10,\n 'transformer.d_model': 128, 'transformer.ffn.d_ff': 512,\n 'transformer.n_heads': 8, 'transformer.n_layers': 4,\n 'transformer.dropout': 0.1}"], {}), "(conf, {'tokenizer': 'character', 'prompt_separator': '',\n 'prompt': 'It is ', 'text': 'tiny_shakespeare', 'seq_len': 64,\n 'valid_seq_len': 80, 'epochs': 128, 'batch_size': 128,\n 'inner_iterations': 10, 'transformer.d_model': 128,\n 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 8,\n 'transformer.n_layers': 4, 'transformer.dropout': 0.1})\n", (3944, 4302), False, 'from labml import experiment, tracker\n'), ((4906, 4958), 'labml.experiment.add_pytorch_models', 'experiment.add_pytorch_models', (["{'model': conf.model}"], {}), "({'model': conf.model})\n", (4935, 4958), False, 'from labml import experiment, tracker\n'), ((2657, 2748), 'labml_helpers.datasets.text.SequentialUnBatchedDataset', 'SequentialUnBatchedDataset', ([], {'text': 'c.text.valid', 'dataset': 'c.text', 'seq_len': 'c.valid_seq_len'}), '(text=c.text.valid, dataset=c.text, seq_len=c.\n valid_seq_len)\n', (2683, 2748), False, 'from labml_helpers.datasets.text import SequentialUnBatchedDataset\n'), ((4996, 5014), 'labml.experiment.start', 'experiment.start', ([], {}), '()\n', (5012, 5014), False, 'from labml import experiment, tracker\n')] |
#coding=utf-8
'''
Created on 2015-9-24
@author: Devuser
'''
from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker
from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar
from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar
from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService
from doraemon.ci.pagefactory.ci_template_path import CIServicePath
from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker
from business.ci.ci_service import CIService
from doraemon.ci.models import CIDeployService
class CIServicePageWorker(CIPageWorker):
'''
项目页面生成器
'''
def __init__(self, request):
'''
Constructor
'''
CIPageWorker.__init__(self, request)
self.pagemodel = CIServiceLeftNavBar
self.subpage_model = CIServiceSubNavBar
def get_ci_service_fullpage(self, request,sub_nav_action):
dm_products = CIService.get_products_include_me(request)
left_nav_bar = self.get_service_left_bar(request)
sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action)
ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action)
page_fileds = {"left_nav_bar":left_nav_bar, "sub_nav_bar":sub_nav_bar, "ci_service_webpart":ci_service_webpart}
return self.get_page(page_fileds,CIServicePath.service_index_path, request)
def get_ci_service_config_page(self, request,service_id):
dm_products = CIService.get_products_include_me(request)
left_nav_bar = self.get_service_left_bar(request)
sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0)
ci_service_config_webpart = self.ci_service_config_webpart(request,service_id)
page_fileds = {"left_nav_bar":left_nav_bar, "sub_nav_bar":sub_nav_bar, "ci_service_config":ci_service_config_webpart}
return self.get_page(page_fileds,CIServicePath.service_index_path,request)
def ci_service_config_webpart(self, request,service_id):
service=CIDeployService.objects.get(int(service_id))
vm_service=VM_CIDeployService(service,0)
ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project)
pagefileds = {"service":vm_service,"ci_service_project":ci_service_project}
return self.get_webpart(pagefileds, CIServicePath.service_config_page)
def get_ci_service_list_webpart(self, request,sub_nav_action):
service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action)
pagefileds = {"ci_service_listcontroll":service_list_controll}
return self.get_webpart(pagefileds, CIServicePath.service_list_webpart)
def get_ci_service_list_controll(self, request,sub_nav_action):
dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action)
ci_services = self.get_ci_services(request, dm_ci_services)
pagefileds = {"ci_services":ci_services}
return self.get_webpart(pagefileds, CIServicePath.service_list_controll)
def get_service_left_bar(self, request):
return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path)
def get_service_sub_navbar(self, request, dm_products, sub_nav_action):
return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products)
def get_ci_services(self,request,dm_ci_services):
result=list()
for service in dm_ci_services:
temp=VM_CIDeployService(service,0)
result.append(temp)
return result
| [
"business.ci.ci_service.CIService.get_products_include_me",
"doraemon.ci.pagefactory.ci_pageworker.CIPageWorker.__init__",
"doraemon.project.pagefactory.project_common_pageworker.ProjectCommonControllPageWorker.get_myproject_dropdown_list",
"business.ci.ci_service.CIService.get_product_ci_services",
"doraemon.ci.viewmodels.vm_ci_deploy_service.VM_CIDeployService"
] | [((754, 790), 'doraemon.ci.pagefactory.ci_pageworker.CIPageWorker.__init__', 'CIPageWorker.__init__', (['self', 'request'], {}), '(self, request)\n', (775, 790), False, 'from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker\n'), ((974, 1016), 'business.ci.ci_service.CIService.get_products_include_me', 'CIService.get_products_include_me', (['request'], {}), '(request)\n', (1007, 1016), False, 'from business.ci.ci_service import CIService\n'), ((1542, 1584), 'business.ci.ci_service.CIService.get_products_include_me', 'CIService.get_products_include_me', (['request'], {}), '(request)\n', (1575, 1584), False, 'from business.ci.ci_service import CIService\n'), ((2164, 2194), 'doraemon.ci.viewmodels.vm_ci_deploy_service.VM_CIDeployService', 'VM_CIDeployService', (['service', '(0)'], {}), '(service, 0)\n', (2182, 2194), False, 'from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService\n'), ((2221, 2316), 'doraemon.project.pagefactory.project_common_pageworker.ProjectCommonControllPageWorker.get_myproject_dropdown_list', 'ProjectCommonControllPageWorker.get_myproject_dropdown_list', (['self', 'request', 'service.Project'], {}), '(self, request,\n service.Project)\n', (2280, 2316), False, 'from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker\n'), ((2897, 2955), 'business.ci.ci_service.CIService.get_product_ci_services', 'CIService.get_product_ci_services', (['request', 'sub_nav_action'], {}), '(request, sub_nav_action)\n', (2930, 2955), False, 'from business.ci.ci_service import CIService\n'), ((3681, 3711), 'doraemon.ci.viewmodels.vm_ci_deploy_service.VM_CIDeployService', 'VM_CIDeployService', (['service', '(0)'], {}), '(service, 0)\n', (3699, 3711), False, 'from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService\n')] |
# Code generated by lark_sdk_gen. DO NOT EDIT.
import unittest
import pylark
import pytest
from tests.test_conf import app_all_permission, app_no_permission
from tests.test_helper import mock_get_tenant_access_token_failed
def mock(*args, **kwargs):
raise pylark.PyLarkError(scope="scope", func="func", code=1, msg="mock-failed")
def mock_raw_request(*args, **kwargs):
raise pylark.PyLarkError(
scope="scope", func="func", code=1, msg="mock-raw-request-failed"
)
# mock get token
class TestBitableSampleMockGetTokenFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed
self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed
self.module_cli = self.cli.bitable
def test_mock_get_token_get_bitable_view_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(pylark.CreateBitableViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_record_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(pylark.GetBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_field_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_table_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(pylark.CreateBitableTableReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq())
assert "msg=failed" in f"{e}"
# mock mock self func
class TestBitableSampleMockSelfFuncFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.bitable
def test_mock_self_func_get_bitable_view_list(self):
origin_func = self.module_cli.get_bitable_view_list
self.module_cli.get_bitable_view_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_view_list = origin_func
def test_mock_self_func_create_bitable_view(self):
origin_func = self.module_cli.create_bitable_view
self.module_cli.create_bitable_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(pylark.CreateBitableViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_view = origin_func
def test_mock_self_func_delete_bitable_view(self):
origin_func = self.module_cli.delete_bitable_view
self.module_cli.delete_bitable_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_view = origin_func
def test_mock_self_func_get_bitable_record_list(self):
origin_func = self.module_cli.get_bitable_record_list
self.module_cli.get_bitable_record_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_record_list = origin_func
def test_mock_self_func_get_bitable_record(self):
origin_func = self.module_cli.get_bitable_record
self.module_cli.get_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(pylark.GetBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_record = origin_func
def test_mock_self_func_create_bitable_record(self):
origin_func = self.module_cli.create_bitable_record
self.module_cli.create_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_record = origin_func
def test_mock_self_func_batch_create_bitable_record(self):
origin_func = self.module_cli.batch_create_bitable_record
self.module_cli.batch_create_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_create_bitable_record = origin_func
def test_mock_self_func_update_bitable_record(self):
origin_func = self.module_cli.update_bitable_record
self.module_cli.update_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_bitable_record = origin_func
def test_mock_self_func_batch_update_bitable_record(self):
origin_func = self.module_cli.batch_update_bitable_record
self.module_cli.batch_update_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_update_bitable_record = origin_func
def test_mock_self_func_delete_bitable_record(self):
origin_func = self.module_cli.delete_bitable_record
self.module_cli.delete_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_record = origin_func
def test_mock_self_func_batch_delete_bitable_record(self):
origin_func = self.module_cli.batch_delete_bitable_record
self.module_cli.batch_delete_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_delete_bitable_record = origin_func
def test_mock_self_func_get_bitable_field_list(self):
origin_func = self.module_cli.get_bitable_field_list
self.module_cli.get_bitable_field_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_field_list = origin_func
def test_mock_self_func_create_bitable_field(self):
origin_func = self.module_cli.create_bitable_field
self.module_cli.create_bitable_field = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_field = origin_func
def test_mock_self_func_update_bitable_field(self):
origin_func = self.module_cli.update_bitable_field
self.module_cli.update_bitable_field = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_bitable_field = origin_func
def test_mock_self_func_delete_bitable_field(self):
origin_func = self.module_cli.delete_bitable_field
self.module_cli.delete_bitable_field = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_field = origin_func
def test_mock_self_func_get_bitable_table_list(self):
origin_func = self.module_cli.get_bitable_table_list
self.module_cli.get_bitable_table_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_table_list = origin_func
def test_mock_self_func_create_bitable_table(self):
origin_func = self.module_cli.create_bitable_table
self.module_cli.create_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(pylark.CreateBitableTableReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_table = origin_func
def test_mock_self_func_batch_create_bitable_table(self):
origin_func = self.module_cli.batch_create_bitable_table
self.module_cli.batch_create_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_create_bitable_table = origin_func
def test_mock_self_func_delete_bitable_table(self):
origin_func = self.module_cli.delete_bitable_table
self.module_cli.delete_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_table = origin_func
def test_mock_self_func_batch_delete_bitable_table(self):
origin_func = self.module_cli.batch_delete_bitable_table
self.module_cli.batch_delete_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_delete_bitable_table = origin_func
def test_mock_self_func_get_bitable_meta(self):
origin_func = self.module_cli.get_bitable_meta
self.module_cli.get_bitable_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_meta = origin_func
# mock raw request
class TestBitableSampleMockRawRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.bitable
self.cli.raw_request = mock_raw_request
def test_mock_raw_request_get_bitable_view_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(
pylark.GetBitableViewListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(
pylark.CreateBitableViewReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(
pylark.DeleteBitableViewReq(
app_token="x",
table_id="x",
view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_record_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(
pylark.GetBitableRecordListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(
pylark.GetBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(
pylark.CreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(
pylark.UpdateBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(
pylark.DeleteBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_field_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(
pylark.GetBitableFieldListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(
pylark.CreateBitableFieldReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(
pylark.UpdateBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(
pylark.DeleteBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_table_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(
pylark.GetBitableTableListReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(
pylark.CreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(
pylark.DeleteBitableTableReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(
pylark.GetBitableMetaReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
# real request
class TestBitableSampleRealRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_no_permission.ins()
self.module_cli = self.cli.bitable
def test_real_request_get_bitable_view_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(
pylark.GetBitableViewListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(
pylark.CreateBitableViewReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(
pylark.DeleteBitableViewReq(
app_token="x",
table_id="x",
view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_record_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(
pylark.GetBitableRecordListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(
pylark.GetBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(
pylark.CreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(
pylark.UpdateBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(
pylark.DeleteBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_field_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(
pylark.GetBitableFieldListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(
pylark.CreateBitableFieldReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(
pylark.UpdateBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(
pylark.DeleteBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_table_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(
pylark.GetBitableTableListReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(
pylark.CreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(
pylark.DeleteBitableTableReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(
pylark.GetBitableMetaReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
| [
"pylark.GetBitableViewListReq",
"pylark.PyLarkError",
"pylark.CreateBitableViewReq",
"pylark.GetBitableRecordReq",
"pylark.DeleteBitableRecordReq",
"pylark.GetBitableTableListReq",
"pytest.raises",
"pylark.UpdateBitableFieldReq",
"pylark.DeleteBitableFieldReq",
"pylark.GetBitableMetaReq",
"pylark.BatchDeleteBitableTableReq",
"pylark.DeleteBitableViewReq",
"pylark.CreateBitableRecordReq",
"pylark.GetBitableFieldListReq",
"pylark.BatchCreateBitableRecordReq",
"pylark.BatchDeleteBitableRecordReq",
"pylark.BatchCreateBitableTableReq",
"pylark.BatchUpdateBitableRecordReq",
"pylark.UpdateBitableRecordReq",
"tests.test_conf.app_all_permission.ins",
"pylark.GetBitableRecordListReq",
"pylark.CreateBitableTableReq",
"pylark.DeleteBitableTableReq",
"pylark.CreateBitableFieldReq",
"tests.test_conf.app_no_permission.ins"
] | [((263, 336), 'pylark.PyLarkError', 'pylark.PyLarkError', ([], {'scope': '"""scope"""', 'func': '"""func"""', 'code': '(1)', 'msg': '"""mock-failed"""'}), "(scope='scope', func='func', code=1, msg='mock-failed')\n", (281, 336), False, 'import pylark\n'), ((388, 478), 'pylark.PyLarkError', 'pylark.PyLarkError', ([], {'scope': '"""scope"""', 'func': '"""func"""', 'code': '(1)', 'msg': '"""mock-raw-request-failed"""'}), "(scope='scope', func='func', code=1, msg=\n 'mock-raw-request-failed')\n", (406, 478), False, 'import pylark\n'), ((712, 736), 'tests.test_conf.app_all_permission.ins', 'app_all_permission.ins', ([], {}), '()\n', (734, 736), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((6253, 6277), 'tests.test_conf.app_all_permission.ins', 'app_all_permission.ins', ([], {}), '()\n', (6275, 6277), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((15434, 15458), 'tests.test_conf.app_all_permission.ins', 'app_all_permission.ins', ([], {}), '()\n', (15456, 15458), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((25187, 25210), 'tests.test_conf.app_no_permission.ins', 'app_no_permission.ins', ([], {}), '()\n', (25208, 25210), False, 'from tests.test_conf import app_all_permission, app_no_permission\n'), ((1016, 1049), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1029, 1049), False, 'import pytest\n'), ((1246, 1279), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1259, 1279), False, 'import pytest\n'), ((1473, 1506), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1486, 1506), False, 'import pytest\n'), ((1704, 1737), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1717, 1737), False, 'import pytest\n'), ((1937, 1970), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (1950, 1970), False, 'import pytest\n'), ((2164, 2197), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2177, 2197), False, 'import pytest\n'), ((2403, 2436), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2416, 2436), False, 'import pytest\n'), ((2677, 2710), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2690, 2710), False, 'import pytest\n'), ((2916, 2949), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (2929, 2949), False, 'import pytest\n'), ((3190, 3223), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3203, 3223), False, 'import pytest\n'), ((3429, 3462), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3442, 3462), False, 'import pytest\n'), ((3704, 3737), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3717, 3737), False, 'import pytest\n'), ((3937, 3970), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (3950, 3970), False, 'import pytest\n'), ((4167, 4200), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4180, 4200), False, 'import pytest\n'), ((4397, 4430), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4410, 4430), False, 'import pytest\n'), ((4629, 4662), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4642, 4662), False, 'import pytest\n'), ((4862, 4895), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (4875, 4895), False, 'import pytest\n'), ((5098, 5131), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (5111, 5131), False, 'import pytest\n'), ((5369, 5402), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (5382, 5402), False, 'import pytest\n'), ((5605, 5638), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (5618, 5638), False, 'import pytest\n'), ((5872, 5905), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (5885, 5905), False, 'import pytest\n'), ((6506, 6539), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (6519, 6539), False, 'import pytest\n'), ((6911, 6944), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (6924, 6944), False, 'import pytest\n'), ((7311, 7344), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (7324, 7344), False, 'import pytest\n'), ((7723, 7756), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (7736, 7756), False, 'import pytest\n'), ((8131, 8164), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (8144, 8164), False, 'import pytest\n'), ((8534, 8567), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (8547, 8567), False, 'import pytest\n'), ((8964, 8997), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (8977, 8997), False, 'import pytest\n'), ((9423, 9456), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (9436, 9456), False, 'import pytest\n'), ((9853, 9886), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (9866, 9886), False, 'import pytest\n'), ((10312, 10345), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (10325, 10345), False, 'import pytest\n'), ((10742, 10775), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (10755, 10775), False, 'import pytest\n'), ((11204, 11237), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (11217, 11237), False, 'import pytest\n'), ((11615, 11648), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (11628, 11648), False, 'import pytest\n'), ((12021, 12054), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (12034, 12054), False, 'import pytest\n'), ((12427, 12460), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (12440, 12460), False, 'import pytest\n'), ((12839, 12872), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (12852, 12872), False, 'import pytest\n'), ((13250, 13283), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (13263, 13283), False, 'import pytest\n'), ((13674, 13707), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (13687, 13707), False, 'import pytest\n'), ((14127, 14160), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (14140, 14160), False, 'import pytest\n'), ((14551, 14584), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (14564, 14584), False, 'import pytest\n'), ((14992, 15025), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (15005, 15025), False, 'import pytest\n'), ((15623, 15656), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (15636, 15656), False, 'import pytest\n'), ((16065, 16098), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (16078, 16098), False, 'import pytest\n'), ((16504, 16537), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (16517, 16537), False, 'import pytest\n'), ((16980, 17013), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (16993, 17013), False, 'import pytest\n'), ((17425, 17458), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (17438, 17458), False, 'import pytest\n'), ((17899, 17932), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (17912, 17932), False, 'import pytest\n'), ((18350, 18383), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (18363, 18383), False, 'import pytest\n'), ((18806, 18839), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (18819, 18839), False, 'import pytest\n'), ((19292, 19325), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (19305, 19325), False, 'import pytest\n'), ((19748, 19781), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (19761, 19781), False, 'import pytest\n'), ((20234, 20267), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (20247, 20267), False, 'import pytest\n'), ((20691, 20724), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (20704, 20724), False, 'import pytest\n'), ((21136, 21169), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (21149, 21169), False, 'import pytest\n'), ((21578, 21611), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (21591, 21611), False, 'import pytest\n'), ((22054, 22087), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (22067, 22087), False, 'import pytest\n'), ((22532, 22565), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (22545, 22565), False, 'import pytest\n'), ((22943, 22976), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (22956, 22976), False, 'import pytest\n'), ((23357, 23390), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (23370, 23390), False, 'import pytest\n'), ((23776, 23809), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (23789, 23809), False, 'import pytest\n'), ((24224, 24257), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (24237, 24257), False, 'import pytest\n'), ((24639, 24672), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (24652, 24672), False, 'import pytest\n'), ((25323, 25356), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (25336, 25356), False, 'import pytest\n'), ((25705, 25738), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (25718, 25738), False, 'import pytest\n'), ((26084, 26117), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (26097, 26117), False, 'import pytest\n'), ((26500, 26533), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (26513, 26533), False, 'import pytest\n'), ((26885, 26918), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (26898, 26918), False, 'import pytest\n'), ((27299, 27332), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (27312, 27332), False, 'import pytest\n'), ((27690, 27723), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (27703, 27723), False, 'import pytest\n'), ((28086, 28119), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (28099, 28119), False, 'import pytest\n'), ((28512, 28545), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (28525, 28545), False, 'import pytest\n'), ((28908, 28941), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (28921, 28941), False, 'import pytest\n'), ((29334, 29367), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (29347, 29367), False, 'import pytest\n'), ((29731, 29764), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (29744, 29764), False, 'import pytest\n'), ((30116, 30149), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (30129, 30149), False, 'import pytest\n'), ((30498, 30531), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (30511, 30531), False, 'import pytest\n'), ((30914, 30947), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (30927, 30947), False, 'import pytest\n'), ((31332, 31365), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (31345, 31365), False, 'import pytest\n'), ((31683, 31716), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (31696, 31716), False, 'import pytest\n'), ((32037, 32070), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (32050, 32070), False, 'import pytest\n'), ((32396, 32429), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (32409, 32429), False, 'import pytest\n'), ((32784, 32817), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (32797, 32817), False, 'import pytest\n'), ((33139, 33172), 'pytest.raises', 'pytest.raises', (['pylark.PyLarkError'], {}), '(pylark.PyLarkError)\n', (33152, 33172), False, 'import pytest\n'), ((1106, 1136), 'pylark.GetBitableViewListReq', 'pylark.GetBitableViewListReq', ([], {}), '()\n', (1134, 1136), False, 'import pylark\n'), ((1334, 1363), 'pylark.CreateBitableViewReq', 'pylark.CreateBitableViewReq', ([], {}), '()\n', (1361, 1363), False, 'import pylark\n'), ((1561, 1590), 'pylark.DeleteBitableViewReq', 'pylark.DeleteBitableViewReq', ([], {}), '()\n', (1588, 1590), False, 'import pylark\n'), ((1796, 1828), 'pylark.GetBitableRecordListReq', 'pylark.GetBitableRecordListReq', ([], {}), '()\n', (1826, 1828), False, 'import pylark\n'), ((2024, 2052), 'pylark.GetBitableRecordReq', 'pylark.GetBitableRecordReq', ([], {}), '()\n', (2050, 2052), False, 'import pylark\n'), ((2254, 2285), 'pylark.CreateBitableRecordReq', 'pylark.CreateBitableRecordReq', ([], {}), '()\n', (2283, 2285), False, 'import pylark\n'), ((2516, 2552), 'pylark.BatchCreateBitableRecordReq', 'pylark.BatchCreateBitableRecordReq', ([], {}), '()\n', (2550, 2552), False, 'import pylark\n'), ((2767, 2798), 'pylark.UpdateBitableRecordReq', 'pylark.UpdateBitableRecordReq', ([], {}), '()\n', (2796, 2798), False, 'import pylark\n'), ((3029, 3065), 'pylark.BatchUpdateBitableRecordReq', 'pylark.BatchUpdateBitableRecordReq', ([], {}), '()\n', (3063, 3065), False, 'import pylark\n'), ((3280, 3311), 'pylark.DeleteBitableRecordReq', 'pylark.DeleteBitableRecordReq', ([], {}), '()\n', (3309, 3311), False, 'import pylark\n'), ((3542, 3578), 'pylark.BatchDeleteBitableRecordReq', 'pylark.BatchDeleteBitableRecordReq', ([], {}), '()\n', (3576, 3578), False, 'import pylark\n'), ((3795, 3826), 'pylark.GetBitableFieldListReq', 'pylark.GetBitableFieldListReq', ([], {}), '()\n', (3824, 3826), False, 'import pylark\n'), ((4026, 4056), 'pylark.CreateBitableFieldReq', 'pylark.CreateBitableFieldReq', ([], {}), '()\n', (4054, 4056), False, 'import pylark\n'), ((4256, 4286), 'pylark.UpdateBitableFieldReq', 'pylark.UpdateBitableFieldReq', ([], {}), '()\n', (4284, 4286), False, 'import pylark\n'), ((4486, 4516), 'pylark.DeleteBitableFieldReq', 'pylark.DeleteBitableFieldReq', ([], {}), '()\n', (4514, 4516), False, 'import pylark\n'), ((4720, 4751), 'pylark.GetBitableTableListReq', 'pylark.GetBitableTableListReq', ([], {}), '()\n', (4749, 4751), False, 'import pylark\n'), ((4951, 4981), 'pylark.CreateBitableTableReq', 'pylark.CreateBitableTableReq', ([], {}), '()\n', (4979, 4981), False, 'import pylark\n'), ((5210, 5245), 'pylark.BatchCreateBitableTableReq', 'pylark.BatchCreateBitableTableReq', ([], {}), '()\n', (5243, 5245), False, 'import pylark\n'), ((5458, 5488), 'pylark.DeleteBitableTableReq', 'pylark.DeleteBitableTableReq', ([], {}), '()\n', (5486, 5488), False, 'import pylark\n'), ((5717, 5752), 'pylark.BatchDeleteBitableTableReq', 'pylark.BatchDeleteBitableTableReq', ([], {}), '()\n', (5750, 5752), False, 'import pylark\n'), ((5957, 5983), 'pylark.GetBitableMetaReq', 'pylark.GetBitableMetaReq', ([], {}), '()\n', (5981, 5983), False, 'import pylark\n'), ((6596, 6626), 'pylark.GetBitableViewListReq', 'pylark.GetBitableViewListReq', ([], {}), '()\n', (6624, 6626), False, 'import pylark\n'), ((6999, 7028), 'pylark.CreateBitableViewReq', 'pylark.CreateBitableViewReq', ([], {}), '()\n', (7026, 7028), False, 'import pylark\n'), ((7399, 7428), 'pylark.DeleteBitableViewReq', 'pylark.DeleteBitableViewReq', ([], {}), '()\n', (7426, 7428), False, 'import pylark\n'), ((7815, 7847), 'pylark.GetBitableRecordListReq', 'pylark.GetBitableRecordListReq', ([], {}), '()\n', (7845, 7847), False, 'import pylark\n'), ((8218, 8246), 'pylark.GetBitableRecordReq', 'pylark.GetBitableRecordReq', ([], {}), '()\n', (8244, 8246), False, 'import pylark\n'), ((8624, 8655), 'pylark.CreateBitableRecordReq', 'pylark.CreateBitableRecordReq', ([], {}), '()\n', (8653, 8655), False, 'import pylark\n'), ((9077, 9113), 'pylark.BatchCreateBitableRecordReq', 'pylark.BatchCreateBitableRecordReq', ([], {}), '()\n', (9111, 9113), False, 'import pylark\n'), ((9513, 9544), 'pylark.UpdateBitableRecordReq', 'pylark.UpdateBitableRecordReq', ([], {}), '()\n', (9542, 9544), False, 'import pylark\n'), ((9966, 10002), 'pylark.BatchUpdateBitableRecordReq', 'pylark.BatchUpdateBitableRecordReq', ([], {}), '()\n', (10000, 10002), False, 'import pylark\n'), ((10402, 10433), 'pylark.DeleteBitableRecordReq', 'pylark.DeleteBitableRecordReq', ([], {}), '()\n', (10431, 10433), False, 'import pylark\n'), ((10855, 10891), 'pylark.BatchDeleteBitableRecordReq', 'pylark.BatchDeleteBitableRecordReq', ([], {}), '()\n', (10889, 10891), False, 'import pylark\n'), ((11295, 11326), 'pylark.GetBitableFieldListReq', 'pylark.GetBitableFieldListReq', ([], {}), '()\n', (11324, 11326), False, 'import pylark\n'), ((11704, 11734), 'pylark.CreateBitableFieldReq', 'pylark.CreateBitableFieldReq', ([], {}), '()\n', (11732, 11734), False, 'import pylark\n'), ((12110, 12140), 'pylark.UpdateBitableFieldReq', 'pylark.UpdateBitableFieldReq', ([], {}), '()\n', (12138, 12140), False, 'import pylark\n'), ((12516, 12546), 'pylark.DeleteBitableFieldReq', 'pylark.DeleteBitableFieldReq', ([], {}), '()\n', (12544, 12546), False, 'import pylark\n'), ((12930, 12961), 'pylark.GetBitableTableListReq', 'pylark.GetBitableTableListReq', ([], {}), '()\n', (12959, 12961), False, 'import pylark\n'), ((13339, 13369), 'pylark.CreateBitableTableReq', 'pylark.CreateBitableTableReq', ([], {}), '()\n', (13367, 13369), False, 'import pylark\n'), ((13786, 13821), 'pylark.BatchCreateBitableTableReq', 'pylark.BatchCreateBitableTableReq', ([], {}), '()\n', (13819, 13821), False, 'import pylark\n'), ((14216, 14246), 'pylark.DeleteBitableTableReq', 'pylark.DeleteBitableTableReq', ([], {}), '()\n', (14244, 14246), False, 'import pylark\n'), ((14663, 14698), 'pylark.BatchDeleteBitableTableReq', 'pylark.BatchDeleteBitableTableReq', ([], {}), '()\n', (14696, 14698), False, 'import pylark\n'), ((15077, 15103), 'pylark.GetBitableMetaReq', 'pylark.GetBitableMetaReq', ([], {}), '()\n', (15101, 15103), False, 'import pylark\n'), ((15730, 15787), 'pylark.GetBitableViewListReq', 'pylark.GetBitableViewListReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (15758, 15787), False, 'import pylark\n'), ((16170, 16226), 'pylark.CreateBitableViewReq', 'pylark.CreateBitableViewReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (16197, 16226), False, 'import pylark\n'), ((16609, 16678), 'pylark.DeleteBitableViewReq', 'pylark.DeleteBitableViewReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'view_id': '"""x"""'}), "(app_token='x', table_id='x', view_id='x')\n", (16636, 16678), False, 'import pylark\n'), ((17089, 17148), 'pylark.GetBitableRecordListReq', 'pylark.GetBitableRecordListReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (17119, 17148), False, 'import pylark\n'), ((17529, 17599), 'pylark.GetBitableRecordReq', 'pylark.GetBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'record_id': '"""x"""'}), "(app_token='x', table_id='x', record_id='x')\n", (17555, 17599), False, 'import pylark\n'), ((18006, 18064), 'pylark.CreateBitableRecordReq', 'pylark.CreateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (18035, 18064), False, 'import pylark\n'), ((18463, 18526), 'pylark.BatchCreateBitableRecordReq', 'pylark.BatchCreateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (18497, 18526), False, 'import pylark\n'), ((18913, 18986), 'pylark.UpdateBitableRecordReq', 'pylark.UpdateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'record_id': '"""x"""'}), "(app_token='x', table_id='x', record_id='x')\n", (18942, 18986), False, 'import pylark\n'), ((19405, 19468), 'pylark.BatchUpdateBitableRecordReq', 'pylark.BatchUpdateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (19439, 19468), False, 'import pylark\n'), ((19855, 19928), 'pylark.DeleteBitableRecordReq', 'pylark.DeleteBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'record_id': '"""x"""'}), "(app_token='x', table_id='x', record_id='x')\n", (19884, 19928), False, 'import pylark\n'), ((20347, 20410), 'pylark.BatchDeleteBitableRecordReq', 'pylark.BatchDeleteBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (20381, 20410), False, 'import pylark\n'), ((20799, 20857), 'pylark.GetBitableFieldListReq', 'pylark.GetBitableFieldListReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (20828, 20857), False, 'import pylark\n'), ((21242, 21299), 'pylark.CreateBitableFieldReq', 'pylark.CreateBitableFieldReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (21270, 21299), False, 'import pylark\n'), ((21684, 21755), 'pylark.UpdateBitableFieldReq', 'pylark.UpdateBitableFieldReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'field_id': '"""x"""'}), "(app_token='x', table_id='x', field_id='x')\n", (21712, 21755), False, 'import pylark\n'), ((22160, 22231), 'pylark.DeleteBitableFieldReq', 'pylark.DeleteBitableFieldReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'field_id': '"""x"""'}), "(app_token='x', table_id='x', field_id='x')\n", (22188, 22231), False, 'import pylark\n'), ((22640, 22684), 'pylark.GetBitableTableListReq', 'pylark.GetBitableTableListReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (22669, 22684), False, 'import pylark\n'), ((23049, 23092), 'pylark.CreateBitableTableReq', 'pylark.CreateBitableTableReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (23077, 23092), False, 'import pylark\n'), ((23469, 23517), 'pylark.BatchCreateBitableTableReq', 'pylark.BatchCreateBitableTableReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (23502, 23517), False, 'import pylark\n'), ((23882, 23939), 'pylark.DeleteBitableTableReq', 'pylark.DeleteBitableTableReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (23910, 23939), False, 'import pylark\n'), ((24336, 24384), 'pylark.BatchDeleteBitableTableReq', 'pylark.BatchDeleteBitableTableReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (24369, 24384), False, 'import pylark\n'), ((24741, 24780), 'pylark.GetBitableMetaReq', 'pylark.GetBitableMetaReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (24765, 24780), False, 'import pylark\n'), ((25430, 25487), 'pylark.GetBitableViewListReq', 'pylark.GetBitableViewListReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (25458, 25487), False, 'import pylark\n'), ((25810, 25866), 'pylark.CreateBitableViewReq', 'pylark.CreateBitableViewReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (25837, 25866), False, 'import pylark\n'), ((26189, 26258), 'pylark.DeleteBitableViewReq', 'pylark.DeleteBitableViewReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'view_id': '"""x"""'}), "(app_token='x', table_id='x', view_id='x')\n", (26216, 26258), False, 'import pylark\n'), ((26609, 26668), 'pylark.GetBitableRecordListReq', 'pylark.GetBitableRecordListReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (26639, 26668), False, 'import pylark\n'), ((26989, 27059), 'pylark.GetBitableRecordReq', 'pylark.GetBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'record_id': '"""x"""'}), "(app_token='x', table_id='x', record_id='x')\n", (27015, 27059), False, 'import pylark\n'), ((27406, 27464), 'pylark.CreateBitableRecordReq', 'pylark.CreateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (27435, 27464), False, 'import pylark\n'), ((27803, 27866), 'pylark.BatchCreateBitableRecordReq', 'pylark.BatchCreateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (27837, 27866), False, 'import pylark\n'), ((28193, 28266), 'pylark.UpdateBitableRecordReq', 'pylark.UpdateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'record_id': '"""x"""'}), "(app_token='x', table_id='x', record_id='x')\n", (28222, 28266), False, 'import pylark\n'), ((28625, 28688), 'pylark.BatchUpdateBitableRecordReq', 'pylark.BatchUpdateBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (28659, 28688), False, 'import pylark\n'), ((29015, 29088), 'pylark.DeleteBitableRecordReq', 'pylark.DeleteBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'record_id': '"""x"""'}), "(app_token='x', table_id='x', record_id='x')\n", (29044, 29088), False, 'import pylark\n'), ((29447, 29510), 'pylark.BatchDeleteBitableRecordReq', 'pylark.BatchDeleteBitableRecordReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (29481, 29510), False, 'import pylark\n'), ((29839, 29897), 'pylark.GetBitableFieldListReq', 'pylark.GetBitableFieldListReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (29868, 29897), False, 'import pylark\n'), ((30222, 30279), 'pylark.CreateBitableFieldReq', 'pylark.CreateBitableFieldReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (30250, 30279), False, 'import pylark\n'), ((30604, 30675), 'pylark.UpdateBitableFieldReq', 'pylark.UpdateBitableFieldReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'field_id': '"""x"""'}), "(app_token='x', table_id='x', field_id='x')\n", (30632, 30675), False, 'import pylark\n'), ((31020, 31091), 'pylark.DeleteBitableFieldReq', 'pylark.DeleteBitableFieldReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""', 'field_id': '"""x"""'}), "(app_token='x', table_id='x', field_id='x')\n", (31048, 31091), False, 'import pylark\n'), ((31440, 31484), 'pylark.GetBitableTableListReq', 'pylark.GetBitableTableListReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (31469, 31484), False, 'import pylark\n'), ((31789, 31832), 'pylark.CreateBitableTableReq', 'pylark.CreateBitableTableReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (31817, 31832), False, 'import pylark\n'), ((32149, 32197), 'pylark.BatchCreateBitableTableReq', 'pylark.BatchCreateBitableTableReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (32182, 32197), False, 'import pylark\n'), ((32502, 32559), 'pylark.DeleteBitableTableReq', 'pylark.DeleteBitableTableReq', ([], {'app_token': '"""x"""', 'table_id': '"""x"""'}), "(app_token='x', table_id='x')\n", (32530, 32559), False, 'import pylark\n'), ((32896, 32944), 'pylark.BatchDeleteBitableTableReq', 'pylark.BatchDeleteBitableTableReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (32929, 32944), False, 'import pylark\n'), ((33241, 33280), 'pylark.GetBitableMetaReq', 'pylark.GetBitableMetaReq', ([], {'app_token': '"""x"""'}), "(app_token='x')\n", (33265, 33280), False, 'import pylark\n')] |
import angr
from taint import is_tainted, taintedUnconstrainedBits
import logging
l = logging.getLogger(__name__)
class IROpHook(angr.SimStatePlugin):
"""
Allows hooking the computation of operations performed in the symbolic execution.
(requires our fork of angr to actually respect the hook)
"""
def do_op(self, state, irop, args):
"""
irop: an angr.vex.engines.SimIROp
args: arguments to irop, which will all be claripy objects (instances of claripy.ast.Base)
return: claripy object to use as the result of the operation;
or None to refrain from hooking the operation, and let angr proceed normally
"""
if any(is_tainted(a) for a in args):
#l.debug("Replacing operation {} on {} with unconstrained secret".format(irop, args))
return taintedUnconstrainedBits(state, "secret", irop._output_size_bits)
return None
@angr.SimStatePlugin.memo
def copy(self, memo):
return IROpHook()
| [
"taint.taintedUnconstrainedBits",
"taint.is_tainted",
"logging.getLogger"
] | [((87, 114), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (104, 114), False, 'import logging\n'), ((843, 908), 'taint.taintedUnconstrainedBits', 'taintedUnconstrainedBits', (['state', '"""secret"""', 'irop._output_size_bits'], {}), "(state, 'secret', irop._output_size_bits)\n", (867, 908), False, 'from taint import is_tainted, taintedUnconstrainedBits\n'), ((696, 709), 'taint.is_tainted', 'is_tainted', (['a'], {}), '(a)\n', (706, 709), False, 'from taint import is_tainted, taintedUnconstrainedBits\n')] |
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_title_from_index(index):
return df[df.index == index]["title"].values[0]
def get_index_from_title(title):
return df[df.title.str.lower() == title.lower()]["index"].values[0]
def combine_features(row):
try:
return row['keywords'] +" "+row['cast']+" "+row["genres"]+" "+row["director"]
except:
print ("Error:", row )
def check_movie(title):
if title in all_movies:
return True
return False
def get_recommendations(title, n):
names = []
movie_user_likes = title
movie_index = get_index_from_title(movie_user_likes)
similar_movies = list(enumerate(cosine_sim[movie_index]))
sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True)
for element in sorted_similar_movies[1:n+1]:
names.append(get_title_from_index(element[0]))
return names
df = pd.read_csv("./data/movie_dataset.csv")
features = ['keywords','cast','genres','director']
all_movies = df.original_title.str.lower().tolist()
for feature in features:
df[feature] = df[feature].fillna('')
df["combined_features"] = df.apply(combine_features,axis=1)
cv = CountVectorizer()
count_matrix = cv.fit_transform(df["combined_features"])
cosine_sim = cosine_similarity(count_matrix)
| [
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.metrics.pairwise.cosine_similarity"
] | [((990, 1029), 'pandas.read_csv', 'pd.read_csv', (['"""./data/movie_dataset.csv"""'], {}), "('./data/movie_dataset.csv')\n", (1001, 1029), True, 'import pandas as pd\n'), ((1264, 1281), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (1279, 1281), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1352, 1383), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['count_matrix'], {}), '(count_matrix)\n', (1369, 1383), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
import django_filters
from django_filters import CharFilter
from .models import *
class FoodFilter(django_filters.FilterSet):
food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items')
class Meta:
model = Food
fields = ['food_name']
| [
"django_filters.CharFilter"
] | [((140, 226), 'django_filters.CharFilter', 'CharFilter', ([], {'field_name': '"""name"""', 'lookup_expr': '"""icontains"""', 'label': '"""search food items"""'}), "(field_name='name', lookup_expr='icontains', label=\n 'search food items')\n", (150, 226), False, 'from django_filters import CharFilter\n')] |
# -*- coding: utf-8 -*-
import os
from config import Config
from flask import Flask, send_from_directory
from werkzeug.contrib.fixers import ProxyFix
import logging
from gtts import gTTS
from pydub import AudioSegment
import hashlib
try:
from urllib.parse import unquote_plus
except:
from urllib import unquote_plus
config = Config()
app = Flask(__name__)
logging.getLogger('flask_tts').setLevel(logging.DEBUG)
STORAGE_DIR = os.environ['STORAGE_DIR']
@app.route('/generate/<lang>/<text>')
def generate(lang, text):
lang = lang.lower()
text = unquote_plus(text)
tts = gTTS(text=text, lang=lang)
filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3'
if os.path.isfile(STORAGE_DIR+filename):
return send_from_directory(STORAGE_DIR, filename)
tts.save(STORAGE_DIR+filename)
sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3')
sound = sound.apply_gain(+8.0)
sound.export(STORAGE_DIR+filename,
format="mp3",
bitrate="48k",
parameters=['-ac','2','-ar', '16000'])
return send_from_directory(STORAGE_DIR, filename)
if __name__ == '__main__':
# Be sure to set config.debug_mode to False in production
port = int(os.environ.get("PORT", config.port))
if port != config.port:
config.debug = False
app.wsgi_app = ProxyFix(app.wsgi_app)
app.run(host='0.0.0.0', debug=config.debug_mode, port=port)
| [
"werkzeug.contrib.fixers.ProxyFix",
"config.Config",
"gtts.gTTS",
"flask.Flask",
"os.environ.get",
"os.path.isfile",
"pydub.AudioSegment.from_file",
"flask.send_from_directory",
"urllib.unquote_plus",
"logging.getLogger"
] | [((334, 342), 'config.Config', 'Config', ([], {}), '()\n', (340, 342), False, 'from config import Config\n'), ((350, 365), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (355, 365), False, 'from flask import Flask, send_from_directory\n'), ((563, 581), 'urllib.unquote_plus', 'unquote_plus', (['text'], {}), '(text)\n', (575, 581), False, 'from urllib import unquote_plus\n'), ((592, 618), 'gtts.gTTS', 'gTTS', ([], {'text': 'text', 'lang': 'lang'}), '(text=text, lang=lang)\n', (596, 618), False, 'from gtts import gTTS\n'), ((707, 745), 'os.path.isfile', 'os.path.isfile', (['(STORAGE_DIR + filename)'], {}), '(STORAGE_DIR + filename)\n', (721, 745), False, 'import os\n'), ((851, 911), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['(STORAGE_DIR + filename)'], {'format': '"""mp3"""'}), "(STORAGE_DIR + filename, format='mp3')\n", (873, 911), False, 'from pydub import AudioSegment\n'), ((1114, 1156), 'flask.send_from_directory', 'send_from_directory', (['STORAGE_DIR', 'filename'], {}), '(STORAGE_DIR, filename)\n', (1133, 1156), False, 'from flask import Flask, send_from_directory\n'), ((1376, 1398), 'werkzeug.contrib.fixers.ProxyFix', 'ProxyFix', (['app.wsgi_app'], {}), '(app.wsgi_app)\n', (1384, 1398), False, 'from werkzeug.contrib.fixers import ProxyFix\n'), ((366, 396), 'logging.getLogger', 'logging.getLogger', (['"""flask_tts"""'], {}), "('flask_tts')\n", (383, 396), False, 'import logging\n'), ((760, 802), 'flask.send_from_directory', 'send_from_directory', (['STORAGE_DIR', 'filename'], {}), '(STORAGE_DIR, filename)\n', (779, 802), False, 'from flask import Flask, send_from_directory\n'), ((1263, 1298), 'os.environ.get', 'os.environ.get', (['"""PORT"""', 'config.port'], {}), "('PORT', config.port)\n", (1277, 1298), False, 'import os\n')] |
import ctypes
import sys
from pathlib import Path
from .space import SpaceClient
_known_tokens = 'ap cons nil neg c b s isnil car eq mul add lt div i t f cdr SCAN number FUN DEF galaxy GG'
_Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)}
class AlienProxy:
def __init__(self):
pass
class MachineImage:
TOKENS = dict(_Tokens)
def emit_call(self, *args):
ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split())
def emit(fn, args):
fringe = [(fn, args)]
while fringe:
fn, args = fringe.pop()
if fn is None:
yield from self.encode_lists(args)
elif isinstance(args, (list, tuple)) and (len(args) == 0):
yield self.TOKENS[fn]
else:
yield ap
fringe.append((None, args[-1]))
fringe.append((fn, args[:-1]))
return list(emit(args[0], args[1:]))
def encode_lists(self, data):
ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split())
def encode(data):
fringe = [data]
while fringe:
item = fringe.pop()
if isinstance(item, tuple) and (len(item) == 1):
fringe.append(item[0])
elif isinstance(item, list) and (len(item) == 0):
yield nil
elif isinstance(item, (list, tuple)):
yield ap
yield ap
yield cons
fringe.append(item[1:])
fringe.append(item[0])
else:
yield num
yield int(item)
return list(encode(data))
class _partial:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return f'Partial({repr(self.arg)})'
def decode_lists(self, data):
ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split())
def reduce(stack):
while (stack[-3] == '$') and (stack[-2] != '$'):
head, tail = stack[-2], stack[-1]
if head == cons:
xs = self._partial(tail)
elif isinstance(head, self._partial):
if isinstance(tail, list):
xs = [head.arg, *tail]
elif isinstance(tail, tuple):
xs = (head.arg, *tail)
else:
xs = (head.arg, tail)
else:
raise Exception((head, tail))
stack[-3:] = [xs]
stack = ['$', '$']
i = 0
while True:
# print('** ', i, repr(stack), '--', repr(data[i]))
x = data[i]
i += 1
if x == gg: break
elif x == ap: stack.append('$')
elif x == nil: stack.append([]); reduce(stack)
elif x == num: stack.append(data[i]); i += 1; reduce(stack)
else: stack.append(x)
return stack[-1]
def run_tests(self):
gg, = map(self.TOKENS.__getitem__, 'GG'.split())
test_cases = [
[],
[42],
(2, 7),
[(3, 1)],
[[],[],[]],
[0, [42, 11, 12], 3, (8, 9)],
]
for data in test_cases:
image = MachineImage().encode_lists(data)
image += [gg]
rev = MachineImage().decode_lists(image)
assert rev == data, (rev, data)
class Galaxy:
def __init__(self, target='release', api_host=None, api_key=None):
self.state = []
fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so')
build_target = (target + '/') if target else ''
fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn))
print(repr(str(fn)))
self.galexy = ctypes.cdll.LoadLibrary(fn)
p64 = ctypes.POINTER(ctypes.c_int64)
u32 = ctypes.c_uint32
self.galexy.evaluate.argtypes = (u32, p64)
self.galexy.evaluate.restype = p64
self.galexy.load_machine.argtypes = (p64,)
self.galexy.load_machine.restype = None
self.space = SpaceClient(api_host=api_host, api_key=api_key)
def _interact(self, state, event):
flag, state, data = self._evaluate(state, event)
if (flag == 0):
return (state, data)
return self._interact(state, self._send_to_alien(data))
def _evaluate(self, state, event):
self.galexy.load_machine(None)
image = MachineImage().emit_call('galaxy', state, event)
data = (ctypes.c_int64 * len(image))(*image)
res = self.galexy.evaluate(len(image), data)
res = MachineImage().decode_lists(res)
# print('<', repr(res))
return res
def _send_to_alien(self, data):
print('<~', repr(data))
res = self.space.send(data)
print('~>', repr(res))
return res
def _render_frame(self, images):
self.frame = images
def eval_step(self, mouse):
print('>', (self.state))
print('>', (mouse or (0, 0)))
(new_state, images) = self._interact(self.state, mouse or (0, 0))
print('<', (new_state))
# print('<', (images))
self.state = new_state
self._render_frame(images)
if __name__ == '__main__':
g = Galaxy()
r = g.eval_step((0,0))
print(repr(r))
| [
"pathlib.Path",
"ctypes.cdll.LoadLibrary",
"ctypes.POINTER"
] | [((4013, 4040), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['fn'], {}), '(fn)\n', (4036, 4040), False, 'import ctypes\n'), ((4055, 4085), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int64'], {}), '(ctypes.c_int64)\n', (4069, 4085), False, 'import ctypes\n'), ((3890, 3904), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3894, 3904), False, 'from pathlib import Path\n')] |
# Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
from sklearn.model_selection import KFold
import tensorflow.keras.callbacks as tfcb
from utils.logging import print_and_log
class ModelTrainer:
"""Has functionalities to train a model"""
def __init__(self, model_module, config_params):
self.model_module = model_module
self.config_params = config_params
self.evaluation_input = None
self.evaluation_output = None
@staticmethod
def get_callbacks(patience=2):
"""Gets helper callbacks to save checkpoints and allow early stopping when needed."""
file_path = ".model_weights.hdf5"
es = tfcb.EarlyStopping('val_loss', patience=patience, mode="min")
msave = tfcb.ModelCheckpoint(file_path, save_best_only=True)
return [es, msave]
def train(self, training_set):
"""Train."""
print_and_log("TRAINING")
model = self.model_module.create_model()
epochs = self.config_params.get("epochs")
batch_size = self.config_params.get("batch_size")
print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}')
validation_data = None
callbacks = None
if training_set.has_validation():
print_and_log("Validation data found")
validation_data = (training_set.x_validation, training_set.y_validation)
callbacks = self.get_callbacks(patience=5)
history = model.fit(training_set.x_train, training_set.y_train,
epochs=epochs,
validation_data=validation_data,
batch_size=batch_size,
callbacks=callbacks)
print_and_log(f'Final training result ({len(history.history.get("loss"))} epochs): '
f'loss: {history.history.get("loss")[-1]}, '
f'accuracy: {history.history.get("accuracy")[-1]}')
if training_set.has_validation():
print_and_log(f'Validation: val_loss: {history.history.get("val_loss")[-1]}, '
f'val_accuracy: {history.history.get("val_accuracy")[-1]}')
print("Done training!", flush=True)
return model, history
def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None):
"""Does an evaluation."""
print_and_log("EVALUATION")
print("Starting evaluation", flush=True)
if self.evaluation_input is not None:
evaluation_input = self.evaluation_input
if self.evaluation_output is not None:
evaluation_output = self.evaluation_output
if evaluation_input is None or evaluation_output is None:
raise Exception("Evaluation input or output not passed properly to evaluate.")
batch_size = self.config_params.get("batch_size")
scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size)
print(f'Done! Evaluation loss and acc: {scores}')
return scores
def cross_validate(self, full_dataset, num_folds=5):
"""k-fold cross-validation to check how model is performing by selecting different sets to train/validate."""
# Define the K-fold Cross Validator
print_and_log("CROSS VALIDATION")
kfold = KFold(n_splits=num_folds, shuffle=True)
# K-fold Cross Validation model evaluation
acc_per_fold = []
loss_per_fold = []
fold_no = 1
for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()):
# Generate a print
print('------------------------------------------------------------------------')
print_and_log(f'Training for fold {fold_no} ...')
training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index)
# Fit data to model
print_and_log(f'Training fold samples: {training_set.num_train_samples}')
model, history = self.train(training_set)
# Generate generalization metrics
print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}')
scores = self.evaluate(model, training_set.x_validation, training_set.y_validation)
print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; '
f'{model.metrics_names[1]} of {scores[1] * 100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
# Increase fold number
fold_no = fold_no + 1
print_and_log("Done with cross-validation!")
def split_and_train(self, dataset_instance):
"""Splits a dataset and trains the configured model, returning it."""
training_set = self.model_module.split_data(dataset_instance, self.config_params.get("validation_size"))
print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, '
f'training samples: {len(training_set.x_train[0])}, '
f'validation samples: {len(training_set.x_validation[0])}')
trained_model, history = self.train(training_set)
# Store evaluation input/outputs as the validation split, in case evaluation is done later.
self.evaluation_input = training_set.x_validation
self.evaluation_output = training_set.y_validation
return trained_model
| [
"sklearn.model_selection.KFold",
"utils.logging.print_and_log",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.callbacks.ModelCheckpoint"
] | [((2726, 2787), 'tensorflow.keras.callbacks.EarlyStopping', 'tfcb.EarlyStopping', (['"""val_loss"""'], {'patience': 'patience', 'mode': '"""min"""'}), "('val_loss', patience=patience, mode='min')\n", (2744, 2787), True, 'import tensorflow.keras.callbacks as tfcb\n'), ((2804, 2856), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tfcb.ModelCheckpoint', (['file_path'], {'save_best_only': '(True)'}), '(file_path, save_best_only=True)\n', (2824, 2856), True, 'import tensorflow.keras.callbacks as tfcb\n'), ((2949, 2974), 'utils.logging.print_and_log', 'print_and_log', (['"""TRAINING"""'], {}), "('TRAINING')\n", (2962, 2974), False, 'from utils.logging import print_and_log\n'), ((3142, 3253), 'utils.logging.print_and_log', 'print_and_log', (['f"""Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}"""'], {}), "(\n f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}'\n )\n", (3155, 3253), False, 'from utils.logging import print_and_log\n'), ((4469, 4496), 'utils.logging.print_and_log', 'print_and_log', (['"""EVALUATION"""'], {}), "('EVALUATION')\n", (4482, 4496), False, 'from utils.logging import print_and_log\n'), ((5372, 5405), 'utils.logging.print_and_log', 'print_and_log', (['"""CROSS VALIDATION"""'], {}), "('CROSS VALIDATION')\n", (5385, 5405), False, 'from utils.logging import print_and_log\n'), ((5422, 5461), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_folds', 'shuffle': '(True)'}), '(n_splits=num_folds, shuffle=True)\n', (5427, 5461), False, 'from sklearn.model_selection import KFold\n'), ((6740, 6784), 'utils.logging.print_and_log', 'print_and_log', (['"""Done with cross-validation!"""'], {}), "('Done with cross-validation!')\n", (6753, 6784), False, 'from utils.logging import print_and_log\n'), ((3355, 3393), 'utils.logging.print_and_log', 'print_and_log', (['"""Validation data found"""'], {}), "('Validation data found')\n", (3368, 3393), False, 'from utils.logging import print_and_log\n'), ((5836, 5885), 'utils.logging.print_and_log', 'print_and_log', (['f"""Training for fold {fold_no} ..."""'], {}), "(f'Training for fold {fold_no} ...')\n", (5849, 5885), False, 'from utils.logging import print_and_log\n'), ((6030, 6103), 'utils.logging.print_and_log', 'print_and_log', (['f"""Training fold samples: {training_set.num_train_samples}"""'], {}), "(f'Training fold samples: {training_set.num_train_samples}')\n", (6043, 6103), False, 'from utils.logging import print_and_log\n'), ((6217, 6302), 'utils.logging.print_and_log', 'print_and_log', (['f"""Evaluation fold samples: {training_set.num_validation_samples}"""'], {}), "(f'Evaluation fold samples: {training_set.num_validation_samples}'\n )\n", (6230, 6302), False, 'from utils.logging import print_and_log\n'), ((6406, 6547), 'utils.logging.print_and_log', 'print_and_log', (['f"""Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1] * 100}%"""'], {}), "(\n f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1] * 100}%'\n )\n", (6419, 6547), False, 'from utils.logging import print_and_log\n')] |
'''
utility functions
'''
__author__ = '<NAME>'
import os
from os.path import join
from os.path import abspath
import json
import pandas as pd
import numpy as np
from configs import config as cf
def is_available(filename):
'''
[filename] : str
'''
return os.path.isfile(filename)
def chunks(lst, n):
'''
Yield successive n-sized chunks from list
[lst] : python list
[n] : int
'''
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_intent_dataset(verbose=True):
'''
Load 'Intent' dataset
[verbose] : bool, verbosity level
'''
# read as a pandas dataframe
data = []
for lang in ['en', 'es', 'fr']:
for ds in ['train', 'test', 'eval']:
path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds)))
df = pd.read_csv(path, header=None, sep='\t',
names=['text', 'class'])
data.append(df)
data = pd.concat(data)
# merge certain categories (see configs.py) and rename columns
data['class'] = data['class'].replace(cf.intent_label_map)
# remove trivial (too easy) categories
for cat in ['hi', 'okay_thanks']:
data = data[data['class'] != 'intent:{}'.format(cat)]
if verbose:
print('\t"Intent" data shape={}'.format(data.shape))
return data
def read_questions_dataset(verbose=True):
'''
Load 'Questions' dataset
[verbose] : bool, verbosity level
'''
# read as a pandas dataframe
data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv'))
data = pd.read_csv(data_path, delimiter=',',
usecols=['Question', 'Category'])
data.rename(columns={'Question': 'text', 'Category': 'class'},
inplace=True)
data = data[~data['class'].isna()] # remove unannotated rows
# split label into class and subclass, keep only class
data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True)
data['class'] = data['class'].str.strip()
data.drop(['subclass'], axis=1, inplace=True)
data = data[[i in cf.questions_relevant_categories for i in data['class']]]
if verbose:
print('\t"Questions" data shape={}'.format(data.shape))
return data
def merge_datasets(embeddings='labse', verbose=True):
'''
Merge 'Intent' and 'Questions' datasets
[embeddings] : str, type of embeddings to load ('bert' or 'labse')
[verbose] : bool, verbosity level
'''
# load datasets
intent = read_intent_dataset(verbose=False)
questions = read_questions_dataset(verbose=False)
merged = pd.concat([intent, questions])
# load corresponding embeddings
if embeddings == 'labse':
emb_to_load = (cf.intent_embeddings, cf.questions_embeddings)
elif embeddings == 'bert':
emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert)
else:
raise ValueError("embeddings argument can be 'bert' or 'labse'")
print(f'{embeddings} embeddings loaded.')
intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0])))
questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR,
emb_to_load[1])))
merged_embeddings = np.vstack([intent_embeddings, questions_embeddings])
assert merged.shape[0] == merged_embeddings.shape[0]
if verbose:
print('Full data shape={}'.format(merged.shape))
return merged, merged_embeddings
# _____________ Logging related functions _____________
def convert(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
def save_logs(logs_dict, dict_name):
'''
Save best hyperparameters dictionary to "logs" directory
[logs_dict] : dict
[dict_name] : str
'''
json.dump(logs_dict,
open('{}/{}.json'.format(cf.LOGS_DIR,
dict_name),
'w'), default=convert)
print('Best hyper-parameters saved...')
return None
def load_logs(dict_name):
'''
Load best hyperparameters dictionary from "logs" directory
[dict_name] : str
'''
log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name)
if not is_available(log_path):
raise ValueError('Hyperparameters are not available. '
'Please run train.py in "hyper_opt" mode before full '
'training.')
with open() as logs_json:
logs = json.load(logs_json)
print('Best hyperparameters loaded...')
return logs
| [
"json.load",
"pandas.read_csv",
"os.path.isfile",
"os.path.join",
"pandas.concat",
"numpy.vstack"
] | [((278, 302), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (292, 302), False, 'import os\n'), ((965, 980), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (974, 980), True, 'import pandas as pd\n'), ((1598, 1669), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'delimiter': '""","""', 'usecols': "['Question', 'Category']"}), "(data_path, delimiter=',', usecols=['Question', 'Category'])\n", (1609, 1669), True, 'import pandas as pd\n'), ((2630, 2660), 'pandas.concat', 'pd.concat', (['[intent, questions]'], {}), '([intent, questions])\n', (2639, 2660), True, 'import pandas as pd\n'), ((3273, 3325), 'numpy.vstack', 'np.vstack', (['[intent_embeddings, questions_embeddings]'], {}), '([intent_embeddings, questions_embeddings])\n', (3282, 3325), True, 'import numpy as np\n'), ((1535, 1585), 'os.path.join', 'join', (['cf.QUESTIONS_DIR', '"""final_master_dataset.csv"""'], {}), "(cf.QUESTIONS_DIR, 'final_master_dataset.csv')\n", (1539, 1585), False, 'from os.path import join\n'), ((4490, 4510), 'json.load', 'json.load', (['logs_json'], {}), '(logs_json)\n', (4499, 4510), False, 'import json\n'), ((831, 896), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'sep': '"""\t"""', 'names': "['text', 'class']"}), "(path, header=None, sep='\\t', names=['text', 'class'])\n", (842, 896), True, 'import pandas as pd\n'), ((3079, 3114), 'os.path.join', 'join', (['cf.INTENT_DIR', 'emb_to_load[0]'], {}), '(cf.INTENT_DIR, emb_to_load[0])\n', (3083, 3114), False, 'from os.path import join\n'), ((3160, 3198), 'os.path.join', 'join', (['cf.QUESTIONS_DIR', 'emb_to_load[1]'], {}), '(cf.QUESTIONS_DIR, emb_to_load[1])\n', (3164, 3198), False, 'from os.path import join\n')] |
from django.contrib import admin
from .models import Order
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'phone_number', 'submitted')
list_filter = ('name', 'submitted')
readonly_fields = ('submitted',)
fieldsets = (
(None, {'fields': ('name', 'phone_number', 'order_details')
}),
('Pick-Up Details', {
'classes': ('collapse',),
'fields': ('pickup_name', 'pickup_address', 'pickup_phone')
}),
('Recipient Details', {
'classes': ('collapse',),
'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted')
}),
('Order Admin', {
'classes': ('collapse',),
'fields': ('username',)
})
)
admin.site.register(Order, OrderAdmin)
| [
"django.contrib.admin.site.register"
] | [((810, 848), 'django.contrib.admin.site.register', 'admin.site.register', (['Order', 'OrderAdmin'], {}), '(Order, OrderAdmin)\n', (829, 848), False, 'from django.contrib import admin\n')] |
import click
from rapt.connection import get_vr
from rapt.models import query, models
from rapt.util import edit_yaml, dump_yaml
from pprint import pformat
@click.command()
def buildpack():
tmpl = {
'repo_url': '',
'repo_type': ['git', 'hg'],
'description': '',
'order': 0,
}
vr = get_vr()
info = {
'available buildpacks': [
bp.repo_url for bp in query('buildpack', vr)
]
}
config = edit_yaml(dump_yaml(tmpl),
dump_yaml(info))
click.echo('Creating buildpack with following config:\n')
click.echo(pformat(config))
click.echo()
if click.confirm('Create buildpack?'):
bp = models.Buildpack(vr, config)
bp.create()
click.echo('Create %s %s!' % (bp.repo_url, bp.resource_uri))
| [
"pprint.pformat",
"rapt.models.models.Buildpack",
"click.confirm",
"rapt.connection.get_vr",
"click.echo",
"click.command",
"rapt.util.dump_yaml",
"rapt.models.query"
] | [((161, 176), 'click.command', 'click.command', ([], {}), '()\n', (174, 176), False, 'import click\n'), ((330, 338), 'rapt.connection.get_vr', 'get_vr', ([], {}), '()\n', (336, 338), False, 'from rapt.connection import get_vr\n'), ((546, 603), 'click.echo', 'click.echo', (['"""Creating buildpack with following config:\n"""'], {}), "('Creating buildpack with following config:\\n')\n", (556, 603), False, 'import click\n'), ((640, 652), 'click.echo', 'click.echo', ([], {}), '()\n', (650, 652), False, 'import click\n'), ((661, 695), 'click.confirm', 'click.confirm', (['"""Create buildpack?"""'], {}), "('Create buildpack?')\n", (674, 695), False, 'import click\n'), ((484, 499), 'rapt.util.dump_yaml', 'dump_yaml', (['tmpl'], {}), '(tmpl)\n', (493, 499), False, 'from rapt.util import edit_yaml, dump_yaml\n'), ((524, 539), 'rapt.util.dump_yaml', 'dump_yaml', (['info'], {}), '(info)\n', (533, 539), False, 'from rapt.util import edit_yaml, dump_yaml\n'), ((619, 634), 'pprint.pformat', 'pformat', (['config'], {}), '(config)\n', (626, 634), False, 'from pprint import pformat\n'), ((710, 738), 'rapt.models.models.Buildpack', 'models.Buildpack', (['vr', 'config'], {}), '(vr, config)\n', (726, 738), False, 'from rapt.models import query, models\n'), ((767, 827), 'click.echo', 'click.echo', (["('Create %s %s!' % (bp.repo_url, bp.resource_uri))"], {}), "('Create %s %s!' % (bp.repo_url, bp.resource_uri))\n", (777, 827), False, 'import click\n'), ((421, 443), 'rapt.models.query', 'query', (['"""buildpack"""', 'vr'], {}), "('buildpack', vr)\n", (426, 443), False, 'from rapt.models import query, models\n')] |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from geoprisma import views as geoprisma_views
@login_required
def maprender(request, *args, **kwargs):
wsName = kwargs.get('wsName')
viewId = kwargs.get('viewId')
if not viewId:
viewId = ""
renderContext = geoprisma_views.maprender(request, wsName, viewId)
if isinstance(renderContext, dict):
templateName = renderContext.get("templateName")
return render(request, "example_project/" + templateName , renderContext)
else:
return renderContext
| [
"django.shortcuts.render",
"geoprisma.views.maprender"
] | [((353, 403), 'geoprisma.views.maprender', 'geoprisma_views.maprender', (['request', 'wsName', 'viewId'], {}), '(request, wsName, viewId)\n', (378, 403), True, 'from geoprisma import views as geoprisma_views\n'), ((516, 581), 'django.shortcuts.render', 'render', (['request', "('example_project/' + templateName)", 'renderContext'], {}), "(request, 'example_project/' + templateName, renderContext)\n", (522, 581), False, 'from django.shortcuts import render\n')] |
"""
@Fire
https://github.com/fire717
"""
from lib import init, Data, MoveNet, Task
from config import cfg
from lib.utils.utils import arg_parser
# Script to create and save as images all the various outputs of the model
def main(cfg):
init(cfg)
model = MoveNet(num_classes=cfg["num_classes"],
width_mult=cfg["width_mult"],
mode='train')
data = Data(cfg)
test_loader = data.getTestDataloader()
# _,test_loader = data.getTrainValDataloader()
run_task = Task(cfg, model)
run_task.modelLoad("/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth")
# run_task.modelLoad("/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth")
# run_task.modelLoad("output/mbv2_e105_valacc0.80255.pth") # for coco
# run_task.modelLoad(cfg["newest_ckpt"])
run_task.predict(test_loader, cfg["predict_output_path"])
# run_task.predict(test_loader, "output/predict")
if __name__ == '__main__':
cfg = arg_parser(cfg)
main(cfg) | [
"lib.MoveNet",
"lib.utils.utils.arg_parser",
"lib.Task",
"lib.Data",
"lib.init"
] | [((246, 255), 'lib.init', 'init', (['cfg'], {}), '(cfg)\n', (250, 255), False, 'from lib import init, Data, MoveNet, Task\n'), ((270, 358), 'lib.MoveNet', 'MoveNet', ([], {'num_classes': "cfg['num_classes']", 'width_mult': "cfg['width_mult']", 'mode': '"""train"""'}), "(num_classes=cfg['num_classes'], width_mult=cfg['width_mult'], mode=\n 'train')\n", (277, 358), False, 'from lib import init, Data, MoveNet, Task\n'), ((415, 424), 'lib.Data', 'Data', (['cfg'], {}), '(cfg)\n', (419, 424), False, 'from lib import init, Data, MoveNet, Task\n'), ((536, 552), 'lib.Task', 'Task', (['cfg', 'model'], {}), '(cfg, model)\n', (540, 552), False, 'from lib import init, Data, MoveNet, Task\n'), ((991, 1006), 'lib.utils.utils.arg_parser', 'arg_parser', (['cfg'], {}), '(cfg)\n', (1001, 1006), False, 'from lib.utils.utils import arg_parser\n')] |
#!/usr/bin/env python3
# [rights] Copyright 2020 brianddk at github https://github.com/brianddk
# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0
# [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py
# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj
# [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt
# [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93
# [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/
# [req] python -m pip install electrum
# [note] with open(r"..\reddit\python\hodl.py", 'r') as s: exec(s.read())
from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction
from electrum.bitcoin import deserialize_privkey, opcodes, push_script
from electrum.crypto import hash_160, sha256d
from electrum.constants import set_testnet
from electrum.ecc import ECPrivkey
# The basic bitcoinlib utility scripts
x = lambda h: bytes.fromhex(h)
lx = lambda h: bytes.fromhex(h)[::-1]
b2x = lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','')
b2lx = lambda b: b[::-1].hex().replace('0x','')
# Very simple bitcoin script comiler
compile = lambda s: "".join([
opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in s])
# Electrum assumes P2SH is multisig, this subclass corrects that
class P2SHPartialTransaction(PartialTransaction):
def __init__(self):
PartialTransaction.__init__(self)
@classmethod
def get_preimage_script(cls, txin: 'PartialTxInput') -> str:
return b2x(txin.redeem_script)
# Set testnet
set_testnet()
# I removed the R-value grinding to use "legacy" sig processing
# This is the original TXID we are trying to hit
otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93'
# Basic constants to build the TXNIN
wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N'
txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434')
vout = 1
sats = 9999
script_type = 'p2sh'
binzero = 2**32
sequence = binzero - 3
address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk'
sats_less_fees = sats - 200
locktime = 1602565200
# Build the Transaction Input
_, privkey, compressed = deserialize_privkey(wif)
pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
prevout = TxOutpoint(txid=txid, out_idx=vout)
txin = PartialTxInput(prevout=prevout)
txin.nsequence = sequence
txin.script_type = script_type
expiry = b2x(lx(b2x(locktime)))
redeem_script = compile([
expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG'])
txin.redeem_script = x(redeem_script)
# Build the Transaction Output
txout = PartialTxOutput.from_address_and_value(address, sats_less_fees)
# Build and sign the transaction
tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime)
tx.version = 1
sig = tx.sign_txin(0, privkey)
txin.script_sig = x(compile([sig , redeem_script]))
# Get the serialized txn and compute txid
txn = tx.serialize()
txid = b2lx(sha256d(x(txn)))
# Ensure we arrived at where we intended
if txid != otxid:
print("Did not achive target TXID hash")
print("Perhaps R-value hashing needs to be reverted")
Print("See: https://redd.it/jf97pc")
# Display results
print("pubk:", pubkey)
print("priv:", b2x(privkey))
print("txid:", txid)
print("txn:", txn)
| [
"electrum.constants.set_testnet",
"electrum.bitcoin.deserialize_privkey",
"electrum.transaction.PartialTxInput",
"electrum.transaction.PartialTxOutput.from_address_and_value",
"electrum.ecc.ECPrivkey",
"electrum.bitcoin.push_script",
"electrum.transaction.TxOutpoint",
"electrum.transaction.PartialTransaction.__init__"
] | [((1640, 1653), 'electrum.constants.set_testnet', 'set_testnet', ([], {}), '()\n', (1651, 1653), False, 'from electrum.constants import set_testnet\n'), ((2262, 2286), 'electrum.bitcoin.deserialize_privkey', 'deserialize_privkey', (['wif'], {}), '(wif)\n', (2281, 2286), False, 'from electrum.bitcoin import deserialize_privkey, opcodes, push_script\n'), ((2367, 2402), 'electrum.transaction.TxOutpoint', 'TxOutpoint', ([], {'txid': 'txid', 'out_idx': 'vout'}), '(txid=txid, out_idx=vout)\n', (2377, 2402), False, 'from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction\n'), ((2410, 2441), 'electrum.transaction.PartialTxInput', 'PartialTxInput', ([], {'prevout': 'prevout'}), '(prevout=prevout)\n', (2424, 2441), False, 'from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction\n'), ((2708, 2771), 'electrum.transaction.PartialTxOutput.from_address_and_value', 'PartialTxOutput.from_address_and_value', (['address', 'sats_less_fees'], {}), '(address, sats_less_fees)\n', (2746, 2771), False, 'from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction\n'), ((1465, 1498), 'electrum.transaction.PartialTransaction.__init__', 'PartialTransaction.__init__', (['self'], {}), '(self)\n', (1492, 1498), False, 'from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction\n'), ((2296, 2314), 'electrum.ecc.ECPrivkey', 'ECPrivkey', (['privkey'], {}), '(privkey)\n', (2305, 2314), False, 'from electrum.ecc import ECPrivkey\n'), ((1288, 1302), 'electrum.bitcoin.push_script', 'push_script', (['i'], {}), '(i)\n', (1299, 1302), False, 'from electrum.bitcoin import deserialize_privkey, opcodes, push_script\n')] |
from django.contrib import admin
from app.models import *
class TrackAdmin(admin.ModelAdmin):
list_display=('title','description',)
class SessionAdmin(admin.ModelAdmin):
list_display = ('title','status',)
search_fields = ['title','abstract']
list_filter = ('track','speaker',)
actions = ['make_approved',]
def make_approved(self,request,queryset):
row_updated = queryset.update(status = 'a')
if row_updated == 1:
message_bit = "1 session was "
else:
message_bit = "%s session were "%row_updated
self.message_user(request,"%s approved"%message_bit)
make_approved.short_description = "Mark session(s) as approved"
class SpeakerAdmin(admin.ModelAdmin):
list_display = ('name','bio',)
fieldsets = (
("General Information ",{"fields": ("name","bio",)}),
("Social Media",{
"classes":("collapse"),
"fields":("twitter","facebook"),
"description":"Add social media here"})
)
admin.site.register(Speaker,SpeakerAdmin)
admin.site.register(Track,TrackAdmin)
admin.site.register(Session,SessionAdmin)
| [
"django.contrib.admin.site.register"
] | [((1079, 1121), 'django.contrib.admin.site.register', 'admin.site.register', (['Speaker', 'SpeakerAdmin'], {}), '(Speaker, SpeakerAdmin)\n', (1098, 1121), False, 'from django.contrib import admin\n'), ((1122, 1160), 'django.contrib.admin.site.register', 'admin.site.register', (['Track', 'TrackAdmin'], {}), '(Track, TrackAdmin)\n', (1141, 1160), False, 'from django.contrib import admin\n'), ((1161, 1203), 'django.contrib.admin.site.register', 'admin.site.register', (['Session', 'SessionAdmin'], {}), '(Session, SessionAdmin)\n', (1180, 1203), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python
"""
Defines unit-tests for the module at hms_core.data_objects.
"""
#######################################
# Any needed from __future__ imports #
# Create an "__all__" list to support #
# "from module import member" use #
#######################################
__all__ = [
# Test-case classes
# Child test-modules
]
#######################################
# Module metadata/dunder-names #
#######################################
__author__ = '<NAME>'
__copyright__ = 'Copyright 2018, all rights reserved'
__status__ = 'Development'
#######################################
# Standard library imports needed #
#######################################
import os
import sys
import unittest
from datetime import datetime
from uuid import UUID, uuid4
#######################################
# Third-party imports needed #
#######################################
#######################################
# Local imports needed #
#######################################
from idic.unit_testing import *
#######################################
# Initialization needed before member #
# definition can take place #
#######################################
#######################################
# Module-level Constants #
#######################################
LocalSuite = unittest.TestSuite()
#######################################
# Import the module being tested #
#######################################
import hms_core.data_objects as data_objects
from hms_core.data_objects import *
#######################################
# Constants for test-methods #
#######################################
GoodBooleanOrIntEquivalents = [
True, False, 1, 0
]
BadBooleanOrIntEquivalents = [
'true', '', (1,2), tuple()
]
GoodDateTimes = [
# - actual datetime values
datetime.now(), datetime.fromtimestamp(1234567890),
datetime.now().timestamp(),
# - timestamp numbers
1234567890, 1234567890.123456,
# - strings
'2001-01-01 12:34:56', '3001-01-01 12:34:56',
'1911-01-01 12:34:56',
# - datetimes outside the UNIX epoch, just in case
datetime.strptime(
'2001-01-01 12:34:56', BaseDataObject._data_time_string
),
datetime.strptime(
'3001-01-01 12:34:56', BaseDataObject._data_time_string
),
datetime.strptime(
'1911-01-01 12:34:56', BaseDataObject._data_time_string
),
]
BadDateTimes = [
# - invalid types
(1,2), tuple(), True, False, object(),
# - invalid values
'true', '', '1911-01-01 12:34:56.123456'
]
GoodOIDs = [
# - actual UUID values
uuid4(), str(uuid4()),
UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'),
UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
UUID('00000000-0000-0000-0000-000000000000'),
# - strings
'dc3a7fdf-2183-49cc-aa00-af9239950254',
'ffffffff-ffff-ffff-ffff-ffffffffffff',
'00000000-0000-0000-0000-000000000000',
'dc3a7fdf218349ccaa00af9239950254',
'ffffffffffffffffffffffffffffffff',
'00000000000000000000000000000000',
]
BadOIDs = [
# - invalid types
(1,2), tuple(), True, False, object(),
# - invalid values
'true', '', '1911-01-01 12:34:56.123456'
]
#######################################
# Code-coverage test-case and #
# decorator-methods #
#######################################
class testdata_objectsCodeCoverage(ModuleCoverageTest):
_testNamespace = 'hms_core'
_testModule = data_objects
LocalSuite.addTests(
unittest.TestLoader().loadTestsFromTestCase(
testdata_objectsCodeCoverage
)
)
#######################################
# Test-cases in the module #
#######################################
class BaseDataObjectDerived(BaseDataObject):
def __init__(self,
oid=None, created=None, modified=None, is_active=None,
is_deleted=None, is_dirty=None, is_new=None
):
BaseDataObject.__init__(
self, oid, created, modified, is_active, is_deleted,
is_dirty, is_new
)
def _create(self):
return BaseDataObject._create(self)
def _update(self):
return BaseDataObject._update(self)
def matches(self, **criteria):
return BaseDataObject.matches(self, **criteria)
def to_data_dict(self):
return BaseDataObject.to_data_dict(self)
@classmethod
def delete(cls, *oids):
pass
@classmethod
def from_data_dict(cls, data_dict):
pass
@classmethod
def get(cls, *oids, **criteria):
pass
@classmethod
def sort(cls, objects, sort_by):
pass
@testdata_objectsCodeCoverage.AddMethodTesting
@testdata_objectsCodeCoverage.AddPropertyTesting
class testBaseDataObject(unittest.TestCase):
###################################
# Tests of class methods #
###################################
def test__init__(self):
# Tests the __init__ method of the BaseDataObject class
# - All we need to do here is prove that the various
# setter- and deleter-method calls are operating as
# expected.
# - deleters first
test_object = BaseDataObjectDerived()
self.assertEquals(test_object._created, None)
self.assertEquals(test_object._is_active, True)
self.assertEquals(test_object._is_deleted, False)
self.assertEquals(test_object._is_dirty, False)
self.assertEquals(test_object._is_new, True)
self.assertEquals(test_object._modified, None)
self.assertEquals(test_object._oid, None)
# - setters
oid = uuid4()
created = GoodDateTimes[0]
modified = GoodDateTimes[1]
is_active = False
is_deleted = True
is_dirty = True
is_new = False
test_object = BaseDataObjectDerived(
oid, created, modified, is_active, is_deleted,
is_dirty, is_new
)
self.assertEquals(test_object.oid, oid)
self.assertEquals(test_object.created, created)
self.assertEquals(test_object.is_active, is_active)
self.assertEquals(test_object.is_deleted, is_deleted)
self.assertEquals(test_object.is_dirty, is_dirty)
self.assertEquals(test_object.is_new, is_new)
self.assertEquals(test_object.modified, modified)
def test_del_created(self):
# Tests the _del_created method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._created = 'unexpected value'
test_object._del_created()
self.assertEquals(
test_object._created, None,
'BaseDataObject._del_created should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._created,
type(test_object._created).__name__
)
)
def test_del_is_active(self):
# Tests the _del_is_active method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_active = 'unexpected value'
test_object._del_is_active()
self.assertEquals(
test_object._is_active, True,
'BaseDataObject._del_is_active should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._is_active,
type(test_object._is_active).__name__
)
)
def test_del_is_deleted(self):
# Tests the _del_is_deleted method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_deleted = 'unexpected value'
test_object._del_is_deleted()
self.assertEquals(
test_object._is_deleted, False,
'BaseDataObject._del_is_deleted should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._is_deleted,
type(test_object._is_deleted).__name__
)
)
def test_del_is_dirty(self):
# Tests the _del_is_dirty method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_dirty = 'unexpected value'
test_object._del_is_dirty()
self.assertEquals(
test_object._is_dirty, False,
'BaseDataObject._del_is_dirty should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._is_dirty,
type(test_object._is_dirty).__name__
)
)
def test_del_is_new(self):
# Tests the _del_is_new method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_new = 'unexpected value'
test_object._del_is_new()
self.assertEquals(
test_object._is_new, True,
'BaseDataObject._del_is_new should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(test_object._is_new, type(test_object._is_new).__name__)
)
def test_del_modified(self):
# Tests the _del_modified method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._modified = 'unexpected value'
test_object._del_modified()
self.assertEquals(
test_object._modified, None,
'BaseDataObject._del_modified should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._modified,
type(test_object._modified).__name__
)
)
def test_del_oid(self):
# Tests the _del_oid method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._oid = 'unexpected value'
test_object._del_oid()
self.assertEquals(
test_object._oid, None,
'BaseDataObject._del_oid should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(test_object._oid, type(test_object._oid).__name__)
)
def test_get_created(self):
# Tests the _get_created method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._created = expected
actual = test_object.created
self.assertEquals(actual, expected,
'_get_created was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
test_object._created = None
self.assertEqual(type(test_object._get_created()), datetime,
'BaseDataObject._get_created should return a '
'datetime value if it\'s retrieved from an instance '
'with an underlying None value'
)
def test_get_is_active(self):
# Tests the _get_is_active method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_active = expected
actual = test_object.is_active
self.assertEquals(actual, expected,
'_get_is_active was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_is_deleted(self):
# Tests the _get_is_deleted method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_deleted = expected
actual = test_object.is_deleted
self.assertEquals(actual, expected,
'_get_is_deleted was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_is_dirty(self):
# Tests the _get_is_dirty method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_dirty = expected
actual = test_object.is_dirty
self.assertEquals(actual, expected,
'_get_is_dirty was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_is_new(self):
# Tests the _get_is_new method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_new = expected
actual = test_object.is_new
self.assertEquals(actual, expected,
'_get_is_new was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_modified(self):
# Tests the _get_modified method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._modified = expected
actual = test_object.modified
self.assertEquals(actual, expected,
'_get_modified was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
test_object._modified = None
self.assertEqual(type(test_object._get_modified()), datetime,
'BaseDataObject._get_modified should return a '
'datetime value if it\'s retrieved from an instance '
'with an underlying None value'
)
def test_get_oid(self):
# Tests the _get_oid method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._oid = expected
actual = test_object.oid
self.assertEquals(actual, expected,
'_get_oid was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
test_object._oid = None
self.assertEqual(type(test_object._get_oid()), UUID,
'BaseDataObject._get_oid should return a UUID value '
'if it\'s retrieved from an instance with an '
'underlying None value'
)
def test_set_created(self):
# Tests the _set_created method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for created in GoodDateTimes:
if type(created) == datetime:
expected = created
elif type(created) in (int, float):
expected = datetime.fromtimestamp(created)
elif type(created) == str:
expected = datetime.strptime(
created, BaseDataObject._data_time_string
)
test_object._set_created(created)
actual = test_object.created
self.assertEqual(
actual, expected,
'Setting created to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
created, type(created).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for created in BadDateTimes:
try:
test_object._set_created(created)
self.fail(
'BaseDataObject objects should not accept "%s" '
'(%s) as created values, but it was allowed to '
'be set' %
(created, type(created).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed a created value of '
'"%s" (%s), but %s was raised instead:\n'
' %s' %
(
created, type(created).__name__,
error.__class__.__name__, error
)
)
def test_set_is_active(self):
# Tests the _set_is_active method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_active in GoodBooleanOrIntEquivalents:
test_object._set_is_active(is_active)
expected = True if is_active else False
actual = test_object.is_active
self.assertEqual(
actual, expected,
'Setting is_active to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_active, type(is_active).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_active in BadBooleanOrIntEquivalents:
try:
test_object._set_is_active(is_active)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_active values, but it '
'was allowed to be set' %
(is_active, type(is_active).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_active value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_active, type(is_active).__name__,
error.__class__.__name__, error
)
)
def test_set_is_deleted(self):
# Tests the _set_is_deleted method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_deleted in GoodBooleanOrIntEquivalents:
test_object._set_is_deleted(is_deleted)
expected = True if is_deleted else False
actual = test_object.is_deleted
self.assertEqual(
actual, expected,
'Setting is_deleted to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_deleted, type(is_deleted).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_deleted in BadBooleanOrIntEquivalents:
try:
test_object._set_is_deleted(is_deleted)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_deleted values, but it '
'was allowed to be set' %
(is_deleted, type(is_deleted).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_deleted value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_deleted, type(is_deleted).__name__,
error.__class__.__name__, error
)
)
def test_set_is_dirty(self):
# Tests the _set_is_dirty method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_dirty in GoodBooleanOrIntEquivalents:
test_object._set_is_dirty(is_dirty)
expected = True if is_dirty else False
actual = test_object.is_dirty
self.assertEqual(
actual, expected,
'Setting is_dirty to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_dirty, type(is_dirty).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_dirty in BadBooleanOrIntEquivalents:
try:
test_object._set_is_dirty(is_dirty)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_dirty values, but it '
'was allowed to be set' %
(is_dirty, type(is_dirty).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_dirty value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_dirty, type(is_dirty).__name__,
error.__class__.__name__, error
)
)
def test_set_is_new(self):
# Tests the _set_is_new method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_new in GoodBooleanOrIntEquivalents:
test_object._set_is_new(is_new)
expected = True if is_new else False
actual = test_object.is_new
self.assertEqual(
actual, expected,
'Setting is_new to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_new, type(is_new).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_new in BadBooleanOrIntEquivalents:
try:
test_object._set_is_new(is_new)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_new values, but it '
'was allowed to be set' %
(is_new, type(is_new).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_new value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_new, type(is_new).__name__,
error.__class__.__name__, error
)
)
def test_set_modified(self):
# Tests the _set_modified method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for modified in GoodDateTimes:
if type(modified) == datetime:
expected = modified
elif type(modified) in (int, float):
expected = datetime.fromtimestamp(modified)
elif type(modified) == str:
expected = datetime.strptime(
modified, BaseDataObject._data_time_string
)
test_object._set_modified(modified)
actual = test_object.modified
self.assertEqual(
actual, expected,
'Setting modified to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
modified, type(modified).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for modified in BadDateTimes:
try:
test_object._set_modified(modified)
self.fail(
'BaseDataObject objects should not accept "%s" '
'(%s) as modified values, but it was allowed to '
'be set' %
(modified, type(modified).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed a modified value of '
'"%s" (%s), but %s was raised instead:\n'
' %s' %
(
modified, type(modified).__name__,
error.__class__.__name__, error
)
)
def test_set_oid(self):
# Tests the _set_oid method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for oid in GoodOIDs:
if type(oid) == UUID:
expected = oid
elif type(oid) == str:
expected = UUID(oid)
test_object._set_oid(oid)
actual = test_object.oid
self.assertEqual(
actual, expected,
'Setting oid to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" '
'(%s) was returned instead.' %
(
oid, type(oid).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for oid in BadOIDs:
try:
test_object._set_oid(oid)
self.fail(
'BaseDatObject objects should not accept '
'"%s" (%s) as a valid oid, but it was '
'allowed to be set' %
(oid, type(oid).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed a value of "%s" (%s) '
'as an oid, but %s was raised instead:\n'
' %s' %
(
oid, type(oid).__name__,
error.__class__.__name__, error
)
)
def testsave(self):
# Tests the save method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Set things up to force a call to _create:
test_object._is_new = True
for dirty in (True, False, None):
test_object._is_dirty = dirty
try:
test_object.save()
except NotImplementedError as error:
if str(error) != (
'BaseDataObjectDerived has not implemented '
'_create, as required by BaseDataObject'
):
self.fail(
'Calling _create should return a known '
'error-message, but the message returned '
'was not what was expected'
)
except Exception as error:
self.fail(
'BaseDataObject.save did not raise the '
'expected error while being tested'
)
# - Set things up to force a call to _update:
test_object._is_new = False
for dirty in (True, False, None):
test_object._is_dirty = dirty
try:
test_object.save()
except NotImplementedError as error:
if str(error) != (
'BaseDataObjectDerived has not implemented '
'_update, as required by BaseDataObject'
):
self.fail(
'Calling _create should return a known '
'error-message, but the message returned '
'was not what was expected'
)
except Exception as error:
self.fail(
'BaseDataObject.save did not raise the '
'expected error while being tested'
)
###################################
# Tests of class properties #
###################################
def testcreated(self):
# Tests the created property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.created.fget,
BaseDataObject._get_created,
'BaseDataObject.created is expected to use the '
'_get_created method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.created.fset,
BaseDataObject._set_created,
'BaseDataObject.created is expected to use the '
'_set_created method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.created.fdel,
BaseDataObject._del_created,
'BaseDataObject.created is expected to use the '
'_del_created method as its deleter-method'
)
def testis_active(self):
# Tests the is_active property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_active.fget,
BaseDataObject._get_is_active,
'BaseDataObject.is_active is expected to use the '
'_get_is_active method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_active.fset,
BaseDataObject._set_is_active,
'BaseDataObject.is_active is expected to use the '
'_set_is_active method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_active.fdel,
BaseDataObject._del_is_active,
'BaseDataObject.is_active is expected to use the '
'_del_is_active method as its deleter-method'
)
def testis_deleted(self):
# Tests the is_deleted property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_deleted.fget,
BaseDataObject._get_is_deleted,
'BaseDataObject.is_deleted is expected to use the '
'_get_is_deleted method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_deleted.fset,
BaseDataObject._set_is_deleted,
'BaseDataObject.is_deleted is expected to use the '
'_set_is_deleted method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_deleted.fdel,
BaseDataObject._del_is_deleted,
'BaseDataObject.is_deleted is expected to use the '
'_del_is_deleted method as its deleter-method'
)
def testis_dirty(self):
# Tests the is_dirty property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_dirty.fget,
BaseDataObject._get_is_dirty,
'BaseDataObject.is_dirty is expected to use the '
'_get_is_dirty method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_dirty.fset,
BaseDataObject._set_is_dirty,
'BaseDataObject.is_dirty is expected to use the '
'_set_is_dirty method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_dirty.fdel,
BaseDataObject._del_is_dirty,
'BaseDataObject.is_dirty is expected to use the '
'_del_is_dirty method as its deleter-method'
)
def testis_new(self):
# Tests the is_new property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_new.fget,
BaseDataObject._get_is_new,
'BaseDataObject.is_new is expected to use the '
'_get_is_new method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_new.fset,
BaseDataObject._set_is_new,
'BaseDataObject.is_new is expected to use the '
'_set_is_new method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_new.fdel,
BaseDataObject._del_is_new,
'BaseDataObject.is_new is expected to use the '
'_del_is_new method as its deleter-method'
)
def testmodified(self):
# Tests the modified property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.modified.fget,
BaseDataObject._get_modified,
'BaseDataObject.modified is expected to use the '
'_get_modified method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.modified.fset,
BaseDataObject._set_modified,
'BaseDataObject.modified is expected to use the '
'_set_modified method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.modified.fdel,
BaseDataObject._del_modified,
'BaseDataObject.modified is expected to use the '
'_del_modified method as its deleter-method'
)
def testoid(self):
# Tests the oid property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.oid.fget,
BaseDataObject._get_oid,
'BaseDataObject.oid is expected to use the '
'_get_oid method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.oid.fset,
BaseDataObject._set_oid,
'BaseDataObject.oid is expected to use the '
'_set_oid method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.oid.fdel,
BaseDataObject._del_oid,
'BaseDataObject.oid is expected to use the '
'_del_oid method as its deleter-method'
)
# def testproperty_name(self):
# # Tests the property_name property of the BaseDataObject class
# # - Assert that the getter is correct:
# self.assertEqual(
# BaseDataObject.property_name.fget,
# BaseDataObject._get_property_name,
# 'BaseDataObject.property_name is expected to use the '
# '_get_property_name method as its getter-method'
# )
# # - If property_name is not expected to be publicly settable,
# # the second item here (BaseDataObject._set_property_name) should
# # be changed to None, and the failure message adjusted
# # accordingly:
# # - Assert that the setter is correct:
# self.assertEqual(
# BaseDataObject.property_name.fset,
# BaseDataObject._set_property_name,
# 'BaseDataObject.property_name is expected to use the '
# '_set_property_name method as its setter-method'
# )
# # - If property_name is not expected to be publicly deletable,
# # the second item here (BaseDataObject._del_property_name) should
# # be changed to None, and the failure message adjusted
# # accordingly:
# # - Assert that the deleter is correct:
# self.assertEqual(
# BaseDataObject.property_name.fdel,
# BaseDataObject._del_property_name,
# 'BaseDataObject.property_name is expected to use the '
# '_del_property_name method as its deleter-method'
# )
LocalSuite.addTests(
unittest.TestLoader().loadTestsFromTestCase(
testBaseDataObject
)
)
#######################################
# Child-module test-cases to execute #
#######################################
# import child_module
# LocalSuite.addTests(child_module.LocalSuite._tests)
#######################################
# Imports to resolve circular #
# dependencies. Avoid if possible. #
#######################################
#######################################
# Initialization that needs to #
# happen after member definition. #
#######################################
#######################################
# Code to execute if file is called #
# or run directly. #
#######################################
if __name__ == '__main__':
import time
results = unittest.TestResult()
testStartTime = time.time()
LocalSuite.run(results)
results.runTime = time.time() - testStartTime
PrintTestResults(results)
if not results.errors and not results.failures:
SaveTestReport(results, 'hms_core.data_objects',
'hms_core.data_objects.test-results')
| [
"uuid.uuid4",
"unittest.TestSuite",
"time.time",
"datetime.datetime.strptime",
"uuid.UUID",
"unittest.TestLoader",
"datetime.datetime.fromtimestamp",
"datetime.datetime.now",
"unittest.TestResult"
] | [((1371, 1391), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1389, 1391), False, 'import unittest\n'), ((1891, 1905), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1903, 1905), False, 'from datetime import datetime\n'), ((1907, 1941), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1234567890)'], {}), '(1234567890)\n', (1929, 1941), False, 'from datetime import datetime\n'), ((2191, 2265), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2001-01-01 12:34:56"""', 'BaseDataObject._data_time_string'], {}), "('2001-01-01 12:34:56', BaseDataObject._data_time_string)\n", (2208, 2265), False, 'from datetime import datetime\n'), ((2285, 2359), 'datetime.datetime.strptime', 'datetime.strptime', (['"""3001-01-01 12:34:56"""', 'BaseDataObject._data_time_string'], {}), "('3001-01-01 12:34:56', BaseDataObject._data_time_string)\n", (2302, 2359), False, 'from datetime import datetime\n'), ((2379, 2453), 'datetime.datetime.strptime', 'datetime.strptime', (['"""1911-01-01 12:34:56"""', 'BaseDataObject._data_time_string'], {}), "('1911-01-01 12:34:56', BaseDataObject._data_time_string)\n", (2396, 2453), False, 'from datetime import datetime\n'), ((2669, 2676), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2674, 2676), False, 'from uuid import UUID, uuid4\n'), ((2697, 2741), 'uuid.UUID', 'UUID', (['"""dc3a7fdf-2183-49cc-aa00-af9239950254"""'], {}), "('dc3a7fdf-2183-49cc-aa00-af9239950254')\n", (2701, 2741), False, 'from uuid import UUID, uuid4\n'), ((2747, 2791), 'uuid.UUID', 'UUID', (['"""ffffffff-ffff-ffff-ffff-ffffffffffff"""'], {}), "('ffffffff-ffff-ffff-ffff-ffffffffffff')\n", (2751, 2791), False, 'from uuid import UUID, uuid4\n'), ((2797, 2841), 'uuid.UUID', 'UUID', (['"""00000000-0000-0000-0000-000000000000"""'], {}), "('00000000-0000-0000-0000-000000000000')\n", (2801, 2841), False, 'from uuid import UUID, uuid4\n'), ((39262, 39283), 'unittest.TestResult', 'unittest.TestResult', ([], {}), '()\n', (39281, 39283), False, 'import unittest\n'), ((39304, 39315), 'time.time', 'time.time', ([], {}), '()\n', (39313, 39315), False, 'import time\n'), ((2682, 2689), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2687, 2689), False, 'from uuid import UUID, uuid4\n'), ((5665, 5672), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (5670, 5672), False, 'from uuid import UUID, uuid4\n'), ((39366, 39377), 'time.time', 'time.time', ([], {}), '()\n', (39375, 39377), False, 'import time\n'), ((1947, 1961), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1959, 1961), False, 'from datetime import datetime\n'), ((3568, 3589), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (3587, 3589), False, 'import unittest\n'), ((38443, 38464), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (38462, 38464), False, 'import unittest\n'), ((15507, 15538), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['created'], {}), '(created)\n', (15529, 15538), False, 'from datetime import datetime\n'), ((24760, 24792), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['modified'], {}), '(modified)\n', (24782, 24792), False, 'from datetime import datetime\n'), ((26780, 26789), 'uuid.UUID', 'UUID', (['oid'], {}), '(oid)\n', (26784, 26789), False, 'from uuid import UUID, uuid4\n'), ((15605, 15665), 'datetime.datetime.strptime', 'datetime.strptime', (['created', 'BaseDataObject._data_time_string'], {}), '(created, BaseDataObject._data_time_string)\n', (15622, 15665), False, 'from datetime import datetime\n'), ((24860, 24921), 'datetime.datetime.strptime', 'datetime.strptime', (['modified', 'BaseDataObject._data_time_string'], {}), '(modified, BaseDataObject._data_time_string)\n', (24877, 24921), False, 'from datetime import datetime\n')] |
from __future__ import annotations
import argparse
import asyncio
import getpass
import inspect
import json
import logging
import logging.config
import os
import threading
import time
import traceback
import warnings
import webbrowser
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import IO, TYPE_CHECKING, Any, Callable, cast
import bakalariapi
import platformdirs
import requests
import rich
from bakalariapi.utils import cs_timedelta, parseHTML
from prompt_toolkit.input import create_input
from prompt_toolkit.key_binding import KeyPress
from prompt_toolkit.keys import Keys
from rich.console import Console
from rich.logging import RichHandler
from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn
from rich.syntax import Syntax
from rich.traceback import install as tb_install
from urllib3.exceptions import InsecureRequestWarning
# Takový hack na to, aby `bakalarishell` šel spustit také přímo ze zdrojové složky
# Pokud se `bakalarishell` spustí jako modul (= přes `import`), tak vše proběhne v pořádku
# Pokud se ale spustí přes "python main.py" nebo "python bakalarishell" (kde "bakalarishell"
# je složka), tak relativní `import` selže ("ImportError: attempted relative import with no
# known parent package") a `shell` se naimportuje "přímo" (resp. ne relativně), což už je v pořádku.
# Pozn.: Pokud někdo dumá nad tím, proč zde tedy není jen druhá možnost, tak to je
# kvůli tomu, že ta zase pro změnu nefugnuje při importu jako modul, jelikož v tom případě
# hledá modul `shell` jako "globální" modul (ne jako "lokální" ve složce), tudíž selže.
if TYPE_CHECKING:
from . import shell
else:
try:
from . import shell
except ImportError:
import shell
tb_install(show_locals=True)
cls = shell.cls
api: bakalariapi.BakalariAPI
shell_instance: shell.Shell
dirs = platformdirs.PlatformDirs(
appauthor="BakalariAPI", appname="bakalarishell", roaming=True
)
CONFIG_FILE = "config.json"
TIME_FILE = "_lasttime"
@dataclass
class Args:
url: str | None = None
username: str | None = None
password: str | None = None
browser: str | None = None
executable_path: str | None = None
verbose: int = 0
test: int | None = None
auto_run: bool = False
no_init: bool = False
no_import: bool = False
disable_config: bool = False
commands: list[str] = field(default_factory=list)
args: Args
class RichTask:
def __init__(self, progress: Progress, task_id: TaskID) -> None:
self.progress = progress
self.task_id = task_id
def start(self):
self.progress.start_task(self.task_id)
def update(
self,
total: float | None = None,
completed: float | None = None,
advance: float | None = None,
description: str | None = None,
visible: bool | None = None,
refresh: bool = False,
**fields,
):
self.progress.update(
self.task_id,
total=total,
completed=completed,
advance=advance,
description=description,
visible=visible,
refresh=refresh,
**fields,
)
def finish(self):
task = self.progress.tasks[self.task_id]
task.finished_time = 0
##################################################
##### FUNKCE #####
##################################################
def rich_print(
*objects: Any,
sep: str = " ",
end: str = "\n",
file: IO[str] | None = None,
flush: bool = False,
color: str | None = None,
**kwargs,
):
c = rich.get_console() if file is None else Console(file=file)
if color is not None:
# Pravděpodobně někdy bude problém, že se vše převádí na string, ale zatím to problém není, tak to neřeším eShrug
objects = tuple(map(lambda x: f"[{color}]{x}[/{color}]", objects))
return c.print(*objects, sep=sep, end=end, **kwargs)
def partial_init_notice():
rich_print(
'Tuto akci nelze vykonat, jelikož shell se nachází v omezeném módu. Pro přepnutí do online módu můžete zkusit příkaz "init".',
color="yellow",
)
def dialog_ano_ne(
text: str = "", default: bool | None = None, color: str | None = None
) -> bool:
message = f"{text} Ano/Ne{'' if default is None else (' (Ano)' if default else ' (Ne)')}: "
while True:
# ano/true/yes/1 / ne/false/no/0
if color is not None:
rich_print(message, end="", color=color)
inpt = input()
else:
inpt = input(message)
if len(inpt) == 0:
if default is None:
continue
return default
input_letter = inpt[0].lower()
if input_letter in "aty1":
return True
if input_letter in "nf0":
return False
def dialog_cislo(text: str = "", default: int | None = None):
print(text, "" if default is None else f"({default})")
while True:
inpt = input()
if not inpt:
if default is None:
continue
return default
if inpt.isdecimal():
return int(inpt)
print("Špatná hodnota")
def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True):
output = ["Enter - Pokračování"] if enter_pokracovani else []
for key in keys:
if isinstance(key, tuple):
if key[1] == "":
output.append(key[0])
else:
output.append(f"[{key[1]}]{key[0]}[/{key[1]}]")
else:
output.append(key)
rich_print(", ".join(output))
def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None):
if title is not None:
print(title)
if isinstance(obj, bakalariapi.Komens):
rich_print(obj.format(True))
print("\n\n")
print_keys([("P - Potrvrdí přečtení zprávy", "" if obj.confirmed else "green")])
def komens_key_handler(key_press: KeyPress, done: Callable):
if key_press.key == "p":
print("Potvrzuji zprávu...")
obj.confirm(api)
print("Zpráva potvrzena")
asyncio.run(keyhandler(komens_key_handler))
elif isinstance(obj, bakalariapi.Grade):
rich_print(obj.format(True))
print("\n\n")
asyncio.run(keyhandler(None))
elif isinstance(obj, bakalariapi.Meeting):
rich_print(obj.format(True))
print("\n\n")
is_before = obj.is_before_start
delta = obj.start_time_delta
color = ""
# Delta totiž může být očividně i negativní
if not is_before and delta >= timedelta(hours=-1):
color = "red"
elif is_before and delta <= timedelta(minutes=5):
color = "yellow"
elif is_before and delta <= timedelta(minutes=30):
color = "green"
print_keys(
[("O - Otevře schůzku v prohlížeči", color), "Z - Zobrazí HTML pozvánky"]
)
def meeting_key_handler(key_press: KeyPress, done: Callable):
key = key_press.key.lower()
if key == "o":
webbrowser.open(obj.join_url)
elif key == "z":
c = Console()
c.print(Syntax(str(parseHTML(obj.content).prettify()), "html"))
asyncio.run(keyhandler(meeting_key_handler))
# elif isinstance(obj, bakalariapi.Student):
# pass
elif isinstance(obj, bakalariapi.Homework):
rich_print(obj.format(True))
print("\n\n")
print_keys(
[
("H - Označí úkol jako hotový", "" if obj.done else "green"),
"N - Označí úkol jako nehotový",
"Z - Zobrazí HTML úkolu",
]
)
def homework_key_handler(key_press: KeyPress, done: Callable):
key = key_press.key.lower()
if key == "h":
obj.mark_as_done(api, True)
print("Úkol označen jako hotový")
elif key == "n":
obj.mark_as_done(api, False)
print("Úkol označen jako nehotový")
elif key == "z":
c = Console()
c.print(Syntax(str(parseHTML(obj.content).prettify()), "html"))
asyncio.run(keyhandler(homework_key_handler))
else:
raise Exception(f"Undefined type '{type(obj)}' to show")
async def keyhandler(
handler: Callable[[KeyPress, Callable[[], None]], None] | None,
*,
done_on_enter: bool = True,
mask_keyboard_interrupt: bool = False,
):
"""
Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce.
Args:
handler:
Funkce do které se passují zaznamenané klávesy.
Bere 2 argumenty:
key_press:
Zaznamenaný stisk klávesy.
done:
Funkce, která při zavolání ukončí záznam kláves.
Pokud je `None`, nic se nevolá.
Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je `True`.
done_on_enter:
Pokud True, tak se při klávese Enter ukončí záznam kláves.
Pozn.: Pokud True, tak se funkce v parametru handler nevolá.
mask_keyboard_interrupt:
Pokud `True`, tak `KeyboardInterrupt` bude potlačen.
Pokud `False`, `KeyboardInterrupt` bude propagován.
Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází.
Příklad:
```
def handler(keys_press: KeyPress, done: Callable):
if key_press.key == "q":
done()
asyncio.run(keyhandler(handler))
```
Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední řádku je:
```
await keyhandler(handler)
```
"""
evnt = asyncio.Event()
inpt = create_input()
done = lambda: evnt.set()
def key_handler_proc(keys: list[KeyPress]):
for key_press in keys:
if done_on_enter and key_press.key == Keys.Enter:
done()
# elif key_press.key == Keys.F4:
# for key_press in keys:
# if key_press.key == Keys.Escape:
# raise SystemExit
elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC:
raise KeyboardInterrupt
elif handler is not None:
handler(key_press, done)
with inpt.raw_mode():
with inpt.attach(lambda: key_handler_proc(inpt.read_keys())):
await evnt.wait()
def get_io_filepath(file: str) -> str:
return os.path.join(dirs.user_data_dir, file)
def get_io_file(file: str, create_file: bool, mode: str = "r+") -> IO:
"""Vrátí file handler na daný soubor `file` v uživatelské (data) složce."""
path = get_io_filepath(file)
if not os.path.exists(path):
if not create_file:
raise FileNotFoundError()
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "x", encoding="utf-8"):
pass
return open(path, mode, encoding="utf-8")
def save_config():
with get_io_file(CONFIG_FILE, True) as f:
# Indent, protože chci, aby to šlo přehledně upravit i z editoru (i když k tomu nejspíše nikdy nedojde)
# (a navíc alespoň nemusí řešit formátování při "config show")
json.dump(args.__dict__, f, indent=4)
def disable_ssl():
def patch(f: Callable):
def patched(*args, **kwargs):
# `cast()` protože jsem zatím nepřišel na způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u)
session = cast(bakalariapi.sessions.RequestsSession, args[0])
bound = inspect.signature(f).bind(*args, **kwargs)
bound.apply_defaults()
login = bound.arguments["login"]
bound.arguments["login"] = False
x = f(*bound.args, **bound.kwargs)
session.session.verify = False
if login:
session.login()
return x
return patched
bakalariapi.sessions.RequestsSession.__init__ = patch(
bakalariapi.sessions.RequestsSession.__init__
)
# Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si začne stěžovat
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
##################################################
##### PŘÍKAZO-FUNKCE #####
##################################################
def Init() -> bool:
def partial_init_mode():
rich_print(
"\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.",
color="yellow",
)
def ask_import() -> bool:
try:
if args.no_import:
if dialog_ano_ne(
"Server není dostupný; Chce importovat uložená data?",
True,
"yellow",
):
Command_Import()
else:
partial_init_mode()
else:
rich_print(
"Server není dostupný; Uložená data byla již importována, je tedy možné pracovat se starými daty",
color="yellow",
)
partial_init_mode()
except KeyboardInterrupt:
partial_init_mode()
return False
if args.url is None:
try:
args.url = input("URL adresa serveru: ")
api.server_info.url = args.url
except KeyboardInterrupt:
rich_print("\nNebyla zadána adresa serveru", color="red")
partial_init_mode()
return False
if args.username is None:
try:
args.username = input("Přihlašovací jméno: ")
api.username = args.username
except KeyboardInterrupt:
rich_print("\nNebylo zadáno přihlašovací jméno", color="red")
partial_init_mode()
return False
if args.password is None:
try:
args.password = getpass.getpass("Heslo: ")
except KeyboardInterrupt:
rich_print(
"\nHeslo nebylo zadáno, předpokládá se prázdné heslo", color="yellow"
)
args.password = ""
api.password = args.password
try:
rich_print(
f"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...",
highlight=False,
)
try:
if not api.is_login_valid():
rich_print("Přihlašovací údaje jsou neplatné", color="red")
partial_init_mode()
return False
except requests.exceptions.SSLError:
# rich.get_console().print_exception()
try:
if dialog_ano_ne(
"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?",
False,
"yellow",
):
disable_ssl()
api.session_manager.kill_all(False)
print(
"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu..."
)
return Init()
else:
return ask_import()
except KeyboardInterrupt:
partial_init_mode()
return False
except requests.exceptions.RequestException:
return ask_import()
except KeyboardInterrupt:
rich_print("Inicializace byla předčasně ukončena", color="yellow")
partial_init_mode()
return False
rich_print("Server běží a přihlašovací údaje jsou správné", color="green")
print("Nastavuji...")
try:
with warnings.catch_warnings():
# Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně
warnings.simplefilter("ignore")
api.init()
except KeyboardInterrupt:
rich_print(
"Nebyly získány informace o stavu serveru, ale žádné funkce by tímto neměli být ovlivněny",
color="yellow",
)
return True
print("Nastaveno:")
ServerInfo()
return True
def ServerInfo():
rich_print(
f"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\n"
f"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\n"
f"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\n"
f"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\n"
f"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\n",
highlight=False,
)
if not (api.server_info.version is None) and not api.is_version_supported():
rich_print(
"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***",
highlight=False,
color="yellow",
)
def Command_Komens(limit: int | None = None, force_fresh: bool = False):
def fresh() -> list[bakalariapi.Komens]:
if api.is_partial_init:
partial_init_notice()
return []
output: list[bakalariapi.Komens] = []
with Progress() as progress:
task = RichTask(
progress, progress.add_task("Získávání zpráv", start=False, total=0)
)
unresolved = api._parse(
bakalariapi.modules.komens.getter_komens_ids(api)
).get(bakalariapi.UnresolvedID)[:limit]
task.update(total=len(unresolved))
for unresolved_id in unresolved:
output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0])
task.update(advance=1)
return output
if force_fresh:
zpravy = fresh()
else:
zpravy = api.get_komens(bakalariapi.GetMode.CACHED)
if len(zpravy) == 0:
print("Žádné zprávy v Lootingu, zkouším načíst ze serveru")
zpravy = fresh()
length = len(zpravy)
if length == 0:
print("Nebyly nalezeny žádné aktualní schůzky")
return
cls()
count = 1
for zprava in zpravy:
try:
show(zprava, f"*** Zpráva {count} z {length} ***")
count += 1
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Znamky(force_fresh: bool = False):
print("Získávám známky...")
try:
znamky = api.get_grades(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH
)
except bakalariapi.exceptions.PartialInitError:
partial_init_notice()
return
length = len(znamky)
print(f"Známky získány ({length}), zobrazuji...")
cls()
count = 1
for znamka in znamky:
try:
show(znamka, f"*** Známka {count} z {length} ***")
count += 1
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Schuzky(force_fresh: bool = False):
def fresh():
if api.is_partial_init:
partial_init_notice()
return []
output = []
with Progress() as progress:
task = RichTask(
progress, progress.add_task("Získávání schůzek", start=False, total=0)
)
unresolved = api._parse(
bakalariapi.modules.meetings.getter_future_meetings_ids(api)
).get(bakalariapi.UnresolvedID)
task.update(total=len(unresolved))
for unresolved_id in unresolved:
output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0])
task.update(advance=1)
return output
if force_fresh:
schuzky = fresh()
else:
schuzky = api.get_meetings(bakalariapi.GetMode.CACHED)
if len(schuzky) == 0:
print("Žádné schůzky v Lootingu, zkouším načíst ze serveru")
schuzky = fresh()
length = len(schuzky)
if length == 0:
print("Nebyly nalezeny žádné aktualní schůzky")
return
cls()
count = 1
for schuzka in schuzky:
try:
show(schuzka, f"*** Schůzka {count} z {length} ***")
count += 1
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Studenti(force_fresh: bool = False):
print("Získávám studenty...")
try:
studenti = api.get_students(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH
)
except bakalariapi.exceptions.PartialInitError:
partial_init_notice()
return
length = len(studenti)
print(f"Studenti získáni, počet studentů je {length}")
try:
count = dialog_cislo("Kolik zobrazit výsledků najednou?", 25)
except KeyboardInterrupt:
return
offset = 0
cls()
while offset < length:
try:
for _ in range(count):
if offset >= length:
break
print(studenti[offset].format())
offset += 1
input(
f"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})"
)
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Ukoly(fast: bool = False, force_fresh: bool = False):
print("Načítání úkolů...")
try:
if fast:
ukoly = api.get_homeworks(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH,
fast_mode=True,
)
else:
ukoly = api.get_homeworks(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH,
fast_mode=False,
unfinished_only=False,
only_first_page=False,
)
except bakalariapi.exceptions.PartialInitError:
partial_init_notice()
return
hotove = 0
nehotove = 0
for ukol in ukoly:
if ukol.done:
hotove += 1
else:
nehotove += 1
if hotove + nehotove == 0:
print("Nebyly nalezeny žádné aktualní úkoly")
return
print(f"Úkoly načteny (hotové {hotove}, nehotové {nehotove})")
zobraz_hotove = fast or dialog_ano_ne("Chte zobrazit již hotové úkoly?")
count = 1
for ukol in ukoly:
try:
if not zobraz_hotove and ukol.done:
continue
cls()
show(
ukol,
f"*** Domácí úkol {count} z {hotove + nehotove if zobraz_hotove else nehotove} ***",
)
count += 1
except KeyboardInterrupt:
print("\n")
break
def Command_Konec(nice: bool = True):
shell_instance.stop_loop()
api.kill(nice)
def Command_Export(file_name: str = "main"):
print("Generace JSON dat...")
with get_io_file(file_name, True) as f:
json.dump(api.looting.export_data(), f, ensure_ascii=False)
# Odstraníme data, která jsou případně po JSONu, co jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší jak náš současný)
f.truncate()
print(f"JSON data vygenerována a zapsána do souboru '{file_name}'")
def Command_Import(file_name: str = "main"):
try:
with get_io_file(file_name, False) as f:
api.looting.import_data(json.loads(f.read()))
except FileNotFoundError:
rich_print(
f"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje",
color="yellow",
)
else:
print(f"Data ze souboru '{file_name}' byla načtena")
def Command_Config(namespace: dict[str, Any]):
cmd = namespace["cmd"]
config_path = get_io_filepath(CONFIG_FILE)
if cmd == "show":
if os.path.exists(config_path):
with open(config_path, "r") as f:
rich_print(Syntax(f.read(), "json"))
else:
print("Žádná konfigurace není uložená")
elif cmd == "save":
save_config()
print("Konfigurace uložena")
elif cmd == "remove":
if os.path.exists(config_path):
os.remove(config_path)
print("Konfigurace byla vymazána")
else:
print("Nic se nevykonalo, jelikož konfigurace není uložená")
elif cmd == "check":
if os.path.exists(config_path):
s = os.stat(config_path)
rich_print(
f"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B"
)
else:
print("Žádná konfigurace není uložená")
elif cmd == "open":
dirname = os.path.dirname(config_path) # = dirs.user_data_dir()
if os.path.exists(dirname):
webbrowser.open(os.path.realpath(dirname))
else:
print("Nelze otevřít konfigurační složku, jelikož neexistuje")
##################################################
##### TESTY #####
##################################################
def RunTest(ID: int):
m = __import__(__name__)
t = f"Test{ID}"
if hasattr(m, t):
rich_print(f"Zahajuji test {ID}")
try:
o = getattr(m, t)()
rich_print(
f"Test {ID} skončil" + ("" if o is None else "; Výsledek testu:")
)
if o is not None:
rich_print(o)
except:
rich_print("Test skončil neúspěchem:", color="red")
traceback.print_exc()
else:
rich_print(f"Test {ID} nenalezen", color="red")
def Test0():
print("Spouštím testování...")
with api.session_manager.get_session_or_create(
bakalariapi.sessions.RequestsSession
) as session:
try:
while True:
last = session.get(
api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO)
).json()["data"]["remainingTime"]
print("\r", end="")
while True:
print(
"Současný zbývající čas: " + str(last) + " " * 20, end="\r"
) # Some spaces to rewrite previous text...
session.get(
api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND)
)
current = float(
session.get(
api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO)
).json()["data"]["remainingTime"]
)
if last < current:
print("\n")
break
last = current
time.sleep(1)
print(
"Sezení bylo prodlouženo, když zbývalo "
+ str(last)
+ " (+ max 1s) do konce a bylo prodlouženo na "
+ str(current)
)
except KeyboardInterrupt:
print("Testování ukončeno")
def Test1():
# "Kopírování"
print("Vytváření kopie dat skrze export/import...")
data = api.looting.export_data()
new = bakalariapi.looting.Looting()
new.import_data(data)
print("Kopie vytvořena")
# Porovnávání
typ_mismatch = 0
id_len_mismatch = 0
id_mismatch = 0
print("=" * 30)
print(f"Počet typů v datech (old): {len(api.looting.data)}")
print(f"Počet typů v datech (new): {len(api.looting.data)}")
print("Porovnávání zahájeno...")
for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()):
if typ_old != typ_new:
print(f"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'")
typ_mismatch += 1
continue
old_id_len = len(api.looting.data[typ_old])
new_id_len = len(new.data[typ_new])
if old_id_len != new_id_len:
print(
f"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}"
)
id_len_mismatch += 1
for id_old, obj_old, id_new, obj_new in zip(
api.looting.data[typ_old].keys(),
api.looting.data[typ_old].values(),
new.data[typ_new].keys(),
new.data[typ_new].values(),
):
if id_old != id_new:
print(
f"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})"
)
id_mismatch += 1
print(
f"Porovnávání dokončeno:\nChyb u typů:\t{typ_mismatch}\nChyb u ID:\t{id_mismatch}"
)
return (typ_mismatch, id_mismatch, id_len_mismatch)
def Test2():
print("Získávám IDčka online schůzek...")
IDs = api._parse(
bakalariapi.modules.meetings.getter_meetings_ids(
api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59)
)
).get(bakalariapi.UnresolvedID)
la = len(IDs)
print(f"IDčka online schůzek získany ({la})")
print()
error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = []
try:
with Progress() as progress:
task = RichTask(progress, progress.add_task("Získávání schůzek", total=la))
for ID in IDs:
task.update(description=f"Schůzka {ID.ID}")
try:
api._resolve(ID)
except bakalariapi.exceptions.BakalariQuerrySuccessError as e:
progress.log(f"Online schůzku {ID.ID} se nepodařilo načíst")
error.append(ID)
finally:
task.update(advance=1)
except KeyboardInterrupt:
pass
finally:
le = len(error)
print(
f"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%"
)
def Test3():
print("Tento test již není podporován... Sadge")
return
# return API.GetHomeworksIDs()
def Test4():
print("Tento test již není podporován... Sadge")
return
# return API.MarkHomeworkAsDone(input("ID Úkolu: "), input("ID Studenta: "), True)
def Test5():
print("Tento test již není podporován... Sadge")
return
# homeworks = API.GetHomeworks()
# print("Úkoly načteny...")
# zobrazHotove = AnoNeDialog("Chte zobrazit již hotové úkoly?")
# cls()
# for homework in homeworks:
# if not zobrazHotove and homework.Done:
# continue
# print("*** Domácí úkol ***")
# print(homework.Format())
# print("\n\n")
# input("Pro pokračování stiskni klávasu...")
# cls()
def Test6():
count_total = 0
count_invalid = 0
try:
while True:
count_total += 1
output = api.get_homeworks(
bakalariapi.GetMode.FRESH,
fast_mode=False,
unfinished_only=False,
only_first_page=False,
)
if len(output) <= 20:
count_invalid += 1
print("==============================")
print(f"Nepodařil se se pokus číslo {count_total}")
print(f"Nepodařených pokusů je {count_invalid} z {count_total}")
probrallity = (count_total - count_invalid) / count_total * 100
print("Pravděpodobnost úspěšnosti je %.2f%%" % probrallity)
print("==============================")
time.sleep(5)
except KeyboardInterrupt:
print("==============================")
print(f"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}")
probrallity = (count_total - count_invalid) / count_total * 100
print("Konečná ravděpodobnost úspěšnosti je %.2f%%" % probrallity)
##################################################
##### MAIN #####
##################################################
def main():
global api
global args
def load_args_from_config() -> dict | None:
global args
with get_io_file(CONFIG_FILE, True) as f:
parsed = json.load(f)
return parsed
parser = argparse.ArgumentParser(
description="Shell integrující funkcionalitu BakalářiAPI",
epilog="Ano, ano, ano... Actually je to web scraper, ale API zní líp :)",
)
if parser.prog == "":
parser.prog = "bakalarishell"
parser.add_argument(
"url",
help="URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá za běhu",
nargs="?",
default=None,
)
parser.add_argument(
metavar="jmeno",
help="Přihlašovací jméno; Pokud není tento argument přítomen, program se zeptá za běhu",
dest="username",
nargs="?",
default=None,
)
parser.add_argument(
metavar="heslo",
nargs="?",
help="Přihlašovací heslo; Pokud není tento argument přítomen, program se zeptá za běhu",
dest="password",
default=None,
)
parser.add_argument(
"-b",
"--browser",
choices=[x.name.lower() for x in bakalariapi.Browser],
type=str.lower, # => case-insensitive
help="Specifikuje WebDriver prohlížeče, který použít",
default=None,
)
parser.add_argument(
"-e",
"--executablePath",
help="Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'",
dest="executable_path",
default=None,
)
parser.add_argument(
"-t",
"--test",
type=int,
help="Test, který se má spustit",
# dest="test",
metavar="ID",
default=None,
)
parser.add_argument(
"-a",
"--auto-run",
help="Pokud je tato flaga přítomna, spustí se automatické úlohy",
action="store_true",
dest="auto_run",
default=None,
)
parser.add_argument(
"-n",
"--no-init",
help="Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována",
action="store_true",
dest="no_init",
default=None,
)
parser.add_argument(
"--no-import",
help="Pokud je tato flaga přítomna, nebude proveden import dat (z hlavního souboru)",
action="store_true",
dest="no_import",
default=None,
)
parser.add_argument(
"-v",
"--verbose",
help="Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)",
action="count",
default=None,
)
parser.add_argument(
"-d",
"--disable-config",
help="Soubor s konfigurací se bude ignorovat, tudíž se brát v potaz pouze argumenty z příkazové řádky",
action="store_true",
dest="disable_config",
default=None,
)
parser.add_argument(
"-c",
"--command",
help="Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát",
action="append",
dest="commands",
default=None,
)
# Všechny argumenty pro argparse MUSÍ mít "default=None", jinak se neprofiltrují
# a nelze pro daný argument načíst hodnotu z configu (protože hodnota z configu
# se přepíše hodnotou "None" z argparse)
parsed = {k: v for k, v in vars(parser.parse_args()).items() if v is not None}
# Jelikož hodnoty filtrujeme, tak pokud i po filtrování je "disable_config"
# v "parsed" tak má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in`
if not ("disable_config" in parsed):
from_config = load_args_from_config()
if from_config is not None:
parsed = from_config | parsed
args = Args(**parsed)
# Verbose:
# 0 - Nic
# 1 - Warning; Pouze BakalářiAPI
# 2 - Info; Pouze BakalářiAPI
# 3 - Debug; Pouze BakalářiAPI
# 4 - Info
# 5 - NOSET
if args.verbose != 0:
logging.basicConfig(
level=[
None,
"WARNING",
"INFO",
"DEBUG",
"INFO",
"NOTSET",
][args.verbose],
datefmt="[%X]",
handlers=[RichHandler()],
)
logging.info(
"Logging zapnut na levelu %s (%s)",
args.verbose,
logging.getLevelName(logging.root.level),
)
if args.verbose < 4:
for logger in [
logging.getLogger(name) for name in logging.root.manager.loggerDict
]:
if logger.name.startswith("bakalariapi"):
continue
logger.propagate = False
# logging.getLogger("bakalariapi").propagate = True
selenium: bakalariapi.SeleniumHandler | None = None
if args.browser is not None:
selenium = bakalariapi.SeleniumHandler(
bakalariapi.Browser[args.browser.upper()],
args.executable_path,
)
api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium)
successful_init = False
if not args.no_init:
successful_init = Init()
if not args.no_import:
try:
with get_io_file("main", False) as f:
api.looting.import_data(json.loads(f.read()))
except FileNotFoundError:
pass
if args.test is not None:
RunTest(args.test)
prepare_shell()
# Chceme `main()` locals, ne `prepare_shell()` locals
shell_instance.PYTHON_EXEC_LOCALS = locals()
print()
rich_print(
f"Bakalarishell připraven - verze BakalářiAPI je "
+ f"[green_yellow]{bakalariapi.__version__}[/green_yellow]"
if "dev" in bakalariapi.__version__
else f"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]"
)
lasttime: datetime = datetime.max
try:
with get_io_file(TIME_FILE, False) as f:
lasttime = datetime.fromisoformat(f.read())
except FileNotFoundError:
pass
if args.auto_run:
if successful_init:
def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask):
length = len(
api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True)
)
task.update(total=length, completed=length)
def task_komens(api: bakalariapi.BakalariAPI, task: RichTask):
unresolved = api._parse(
bakalariapi.modules.komens.getter_komens_ids(
api,
from_date=None if lasttime is None else lasttime - timedelta(5),
)
).get(bakalariapi.UnresolvedID)
task.update(total=len(unresolved))
task.start()
for unresolved_id in unresolved:
api._resolve(unresolved_id)
task.update(advance=1)
def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask):
length = len(api.get_all_grades())
task.update(total=length, completed=length)
def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask):
unresolved = api._parse(
bakalariapi.modules.meetings.getter_future_meetings_ids(api)
).get(bakalariapi.UnresolvedID)
task.update(total=len(unresolved))
task.start()
for unresolved_id in unresolved:
api._resolve(unresolved_id)
task.update(advance=1)
@dataclass
class Task:
description: str
function: Callable[[bakalariapi.BakalariAPI, RichTask], None]
start: bool = True
tasks: list[Task] = [
Task("Získání Komens zpráv", task_komens, False),
Task("Získání schůzek", task_schuzky, False),
Task("Získání úkolů", task_ukoly),
Task("Získání známek", task_znamky),
]
def autorun():
with Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
"{task.completed}/{task.total}",
TimeRemainingColumn(),
) as progress:
threads: list[threading.Thread] = []
for task in tasks:
thread = threading.Thread(
target=task.function,
args=(
api,
RichTask(
progress,
progress.add_task(
task.description, start=task.start, total=0
),
),
),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print()
autorun()
else:
rich_print(
"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci",
color="yellow",
)
if "exit" not in args.commands and (not args.no_import or args.auto_run):
print()
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
today_aware = (
datetime.now()
.astimezone()
.replace(hour=0, minute=0, second=0, microsecond=0)
)
first = True
for znamka in filter(
lambda x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade != "?",
api.looting.get(bakalariapi.Grade),
):
if first:
first = False
print("Poslední známky:")
note = znamka.note1.strip() or znamka.note2.strip()
rich_print(
f"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}"
+ ("" if note == "" else f" - {note}")
)
first = True
for komens in filter(
lambda x: x.grade == "?", api.looting.get(bakalariapi.Grade)
):
if first:
first = False
print("Nadcházející klasifikace:")
rich_print(
f"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}"
)
first = True
for schuzka in filter(
lambda x: today_aware < x.start_time
and x.start_time < today_aware + timedelta(2),
api.looting.get(bakalariapi.Meeting),
):
if first:
first = False
print("Dnešní a zítřejší schůzky:")
rich_print(
f"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \"{schuzka.name.strip()}\""
)
first = True
for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)):
if first:
first = False
print("Úkoly:")
ukol._sort_by_date
rich_print(
f"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}"
)
first = True
for znamka in filter(
lambda x: (x.need_confirm and not x.confirmed)
or min(lasttime, today - timedelta(5)) < x.time,
api.looting.get(bakalariapi.Komens),
):
if first:
first = False
print("Komens zprávy:")
rich_print(
f"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}"
+ (
" [yellow](nepotvrzená)[/yellow]"
if (znamka.need_confirm and not znamka.confirmed)
else ""
)
)
with get_io_file(TIME_FILE, True) as f:
f.write(datetime.now().isoformat())
if len(args.commands) != 0:
if successful_init:
print("Vykonávám zadané příkazy...")
for command in args.commands:
print(command)
shell_instance.proc_string(command)
else:
rich_print(
"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci",
color="yellow",
)
try:
shell_instance.start_loop()
except (shell.DummyShellError, KeyboardInterrupt):
Command_Konec(False)
def prepare_shell():
global shell_instance
predefined_commands = [x for x in shell.ShellPredefinedCommands]
predefined_commands.remove(shell.ShellPredefinedCommands.EXIT)
_globals = globals()
_globals["p"] = rich_print
_globals["i"] = rich.inspect
shell_instance = shell.Shell(
# prompt="[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]",
prompt="BakalariAPI Shell>",
allow_python_exec=True,
python_exec_prefix=" ",
python_exec_globals=_globals,
python_exec_locals=locals(),
predefined_commands=predefined_commands,
command_exception_traceback=True,
command_exception_traceback_locals=True,
command_exception_reraise=False,
raise_on_ctrlc=True,
end_on_ctrlc=True,
dummy_shell="exit" in args.commands,
)
parser_fresh = shell.ShellArgumentParser(add_help=False)
parser_fresh.add_argument(
"-f",
"--fresh",
help="Pokud je tato flaga přítomna, vynutí se získání dat ze serveru",
default=False,
action="store_true",
dest="force_fresh",
)
parser = shell.ShellArgumentParser(parents=[parser_fresh])
parser.add_argument(
"limit",
type=int,
nargs="?",
default=None,
help="Limituje počet zpráv, které se načtou a tím i zrychlí proces",
)
shell_instance.add_command(
shell.Command(
"komens",
Command_Komens,
short_help="Zobrazí komens zprávy",
argparser=parser,
spread_arguments=True,
aliases=["zpravy"],
)
)
shell_instance.add_command(
shell.Command(
"znamky",
Command_Znamky,
short_help="Zobrazí známky",
argparser=shell.ShellArgumentParser(parents=[parser_fresh]),
)
)
shell_instance.add_command(
shell.Command(
"schuzky",
Command_Schuzky,
short_help="Zobrazí (nadcházející) schůzky",
argparser=shell.ShellArgumentParser(parents=[parser_fresh]),
)
)
shell_instance.add_command(
shell.Command(
"studenti",
Command_Studenti,
short_help="Zobrazí studenty",
argparser=shell.ShellArgumentParser(parents=[parser_fresh]),
)
)
parser = shell.ShellArgumentParser()
parser.add_argument("ID", help="ID testu, který se má spustit")
shell_instance.add_command(
shell.Command(
"test",
RunTest,
argparser=parser,
short_help="Spustí daný test",
spread_arguments=True,
)
)
parser = shell.ShellArgumentParser(parents=[parser_fresh])
parser.add_argument(
"-s",
"--slow",
help="Pokud je tato flaga přítomna, úkoly budou získány v 'pomalém módu'",
action="store_false",
dest="fast",
default=True,
)
shell_instance.add_command(
shell.Command(
"ukoly",
Command_Ukoly,
argparser=parser,
short_help="Zobrazí úkoly",
spread_arguments=True,
)
)
shell_instance.add_command(
shell.Command(
"server",
ServerInfo,
short_help="Zobrazí informace o serveru",
)
)
parser = shell.ShellArgumentParser()
parser.add_argument(
"-f",
"--force",
help="Pokud je tato flaga přítomna, neprovede se odlášení sessionů a aplikace se tedy rychleji ukončí",
action="store_false",
default=True,
dest="nice",
)
shell_instance.add_command(
shell.Command(
"exit",
Command_Konec,
argparser=parser,
short_help="Ukončí shell",
spread_arguments=True,
)
)
parser = shell.ShellArgumentParser()
parser.add_argument(
"file_name",
nargs="?",
help="ID/jméno exportu",
default="main",
metavar="ID",
)
shell_instance.add_command(
shell.Command(
"export",
Command_Export,
argparser=parser,
short_help="Exportuje data z daného souboru",
spread_arguments=True,
)
)
parser = shell.ShellArgumentParser()
parser.add_argument(
"file_name",
nargs="?",
help="ID/jméno importu",
default="main",
metavar="ID",
)
shell_instance.add_command(
shell.Command(
"import",
Command_Import,
argparser=parser,
short_help="Importuje data z daného souboru",
spread_arguments=True,
)
)
shell_instance.add_command(
shell.Command("init", Init, short_help="Provede (opětovnou) inicializaci")
)
parser = shell.ShellArgumentParser()
subparsers = parser.add_subparsers(
required=True,
metavar="příkaz",
dest="cmd",
parser_class=shell.ShellArgumentParser,
)
subparsers.add_parser(
"show",
help="Zobrazí uloženou konfiguraci",
)
subparsers.add_parser(
"save",
help="Uloží současnou konfiguraci",
)
subparsers.add_parser(
"remove",
help="Odstraní uloženou konfiguraci",
)
subparsers.add_parser(
"check",
help="Zobrazí údaje o uložené konfiguraci",
)
subparsers.add_parser(
"open",
help="Otevře konfigurační složku",
)
shell_instance.add_command(
shell.Command(
"config",
Command_Config,
argparser=parser,
short_help="Příkaz na práci s uloženou konfigurací",
spread_arguments=False,
)
)
if __name__ == "__main__":
main()
| [
"os.remove",
"argparse.ArgumentParser",
"typing.cast",
"rich.traceback.install",
"logging.getLevelName",
"os.path.join",
"rich.logging.RichHandler",
"traceback.print_exc",
"warnings.simplefilter",
"shell.Command",
"bakalariapi.BakalariAPI",
"os.path.dirname",
"prompt_toolkit.input.create_input",
"os.path.exists",
"warnings.catch_warnings",
"inspect.signature",
"datetime.timedelta",
"platformdirs.PlatformDirs",
"rich.progress.Progress",
"datetime.datetime.now",
"json.dump",
"bakalariapi.modules.meetings.getter_future_meetings_ids",
"os.stat",
"os.path.realpath",
"time.sleep",
"dataclasses.field",
"datetime.datetime",
"rich.progress.BarColumn",
"bakalariapi.modules.komens.getter_komens_ids",
"datetime.datetime.fromtimestamp",
"rich.console.Console",
"rich.get_console",
"webbrowser.open",
"json.load",
"shell.ShellArgumentParser",
"warnings.filterwarnings",
"getpass.getpass",
"asyncio.Event",
"bakalariapi.utils.parseHTML",
"bakalariapi.looting.Looting",
"rich.progress.TimeRemainingColumn",
"logging.getLogger"
] | [((1762, 1790), 'rich.traceback.install', 'tb_install', ([], {'show_locals': '(True)'}), '(show_locals=True)\n', (1772, 1790), True, 'from rich.traceback import install as tb_install\n'), ((1872, 1965), 'platformdirs.PlatformDirs', 'platformdirs.PlatformDirs', ([], {'appauthor': '"""BakalariAPI"""', 'appname': '"""bakalarishell"""', 'roaming': '(True)'}), "(appauthor='BakalariAPI', appname='bakalarishell',\n roaming=True)\n", (1897, 1965), False, 'import platformdirs\n'), ((2399, 2426), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2404, 2426), False, 'from dataclasses import dataclass, field\n'), ((9897, 9912), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (9910, 9912), False, 'import asyncio\n'), ((9924, 9938), 'prompt_toolkit.input.create_input', 'create_input', ([], {}), '()\n', (9936, 9938), False, 'from prompt_toolkit.input import create_input\n'), ((10698, 10736), 'os.path.join', 'os.path.join', (['dirs.user_data_dir', 'file'], {}), '(dirs.user_data_dir, file)\n', (10710, 10736), False, 'import os\n'), ((12362, 12428), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'InsecureRequestWarning'}), "('ignore', category=InsecureRequestWarning)\n", (12385, 12428), False, 'import warnings\n'), ((28317, 28346), 'bakalariapi.looting.Looting', 'bakalariapi.looting.Looting', ([], {}), '()\n', (28344, 28346), False, 'import bakalariapi\n'), ((33335, 33501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Shell integrující funkcionalitu BakalářiAPI"""', 'epilog': '"""Ano, ano, ano... Actually je to web scraper, ale API zní líp :)"""'}), "(description=\n 'Shell integrující funkcionalitu BakalářiAPI', epilog=\n 'Ano, ano, ano... Actually je to web scraper, ale API zní líp :)')\n", (33358, 33501), False, 'import argparse\n'), ((38211, 38284), 'bakalariapi.BakalariAPI', 'bakalariapi.BakalariAPI', (['args.url', 'args.username', 'args.password', 'selenium'], {}), '(args.url, args.username, args.password, selenium)\n', (38234, 38284), False, 'import bakalariapi\n'), ((47098, 47139), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (47123, 47139), False, 'import shell\n'), ((47383, 47432), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {'parents': '[parser_fresh]'}), '(parents=[parser_fresh])\n', (47408, 47432), False, 'import shell\n'), ((48625, 48652), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {}), '()\n', (48650, 48652), False, 'import shell\n'), ((48954, 49003), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {'parents': '[parser_fresh]'}), '(parents=[parser_fresh])\n', (48979, 49003), False, 'import shell\n'), ((49631, 49658), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {}), '()\n', (49656, 49658), False, 'import shell\n'), ((50143, 50170), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {}), '()\n', (50168, 50170), False, 'import shell\n'), ((50578, 50605), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {}), '()\n', (50603, 50605), False, 'import shell\n'), ((51135, 51162), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {}), '()\n', (51160, 51162), False, 'import shell\n'), ((3657, 3675), 'rich.get_console', 'rich.get_console', ([], {}), '()\n', (3673, 3675), False, 'import rich\n'), ((3697, 3715), 'rich.console.Console', 'Console', ([], {'file': 'file'}), '(file=file)\n', (3704, 3715), False, 'from rich.console import Console\n'), ((10934, 10954), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10948, 10954), False, 'import os\n'), ((11449, 11486), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(4)'}), '(args.__dict__, f, indent=4)\n', (11458, 11486), False, 'import json\n'), ((24852, 24879), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (24866, 24879), False, 'import os\n'), ((47657, 47797), 'shell.Command', 'shell.Command', (['"""komens"""', 'Command_Komens'], {'short_help': '"""Zobrazí komens zprávy"""', 'argparser': 'parser', 'spread_arguments': '(True)', 'aliases': "['zpravy']"}), "('komens', Command_Komens, short_help='Zobrazí komens zprávy',\n argparser=parser, spread_arguments=True, aliases=['zpravy'])\n", (47670, 47797), False, 'import shell\n'), ((48761, 48868), 'shell.Command', 'shell.Command', (['"""test"""', 'RunTest'], {'argparser': 'parser', 'short_help': '"""Spustí daný test"""', 'spread_arguments': '(True)'}), "('test', RunTest, argparser=parser, short_help=\n 'Spustí daný test', spread_arguments=True)\n", (48774, 48868), False, 'import shell\n'), ((49263, 49374), 'shell.Command', 'shell.Command', (['"""ukoly"""', 'Command_Ukoly'], {'argparser': 'parser', 'short_help': '"""Zobrazí úkoly"""', 'spread_arguments': '(True)'}), "('ukoly', Command_Ukoly, argparser=parser, short_help=\n 'Zobrazí úkoly', spread_arguments=True)\n", (49276, 49374), False, 'import shell\n'), ((49487, 49564), 'shell.Command', 'shell.Command', (['"""server"""', 'ServerInfo'], {'short_help': '"""Zobrazí informace o serveru"""'}), "('server', ServerInfo, short_help='Zobrazí informace o serveru')\n", (49500, 49564), False, 'import shell\n'), ((49948, 50057), 'shell.Command', 'shell.Command', (['"""exit"""', 'Command_Konec'], {'argparser': 'parser', 'short_help': '"""Ukončí shell"""', 'spread_arguments': '(True)'}), "('exit', Command_Konec, argparser=parser, short_help=\n 'Ukončí shell', spread_arguments=True)\n", (49961, 50057), False, 'import shell\n'), ((50361, 50492), 'shell.Command', 'shell.Command', (['"""export"""', 'Command_Export'], {'argparser': 'parser', 'short_help': '"""Exportuje data z daného souboru"""', 'spread_arguments': '(True)'}), "('export', Command_Export, argparser=parser, short_help=\n 'Exportuje data z daného souboru', spread_arguments=True)\n", (50374, 50492), False, 'import shell\n'), ((50796, 50927), 'shell.Command', 'shell.Command', (['"""import"""', 'Command_Import'], {'argparser': 'parser', 'short_help': '"""Importuje data z daného souboru"""', 'spread_arguments': '(True)'}), "('import', Command_Import, argparser=parser, short_help=\n 'Importuje data z daného souboru', spread_arguments=True)\n", (50809, 50927), False, 'import shell\n'), ((51040, 51114), 'shell.Command', 'shell.Command', (['"""init"""', 'Init'], {'short_help': '"""Provede (opětovnou) inicializaci"""'}), "('init', Init, short_help='Provede (opětovnou) inicializaci')\n", (51053, 51114), False, 'import shell\n'), ((51844, 51983), 'shell.Command', 'shell.Command', (['"""config"""', 'Command_Config'], {'argparser': 'parser', 'short_help': '"""Příkaz na práci s uloženou konfigurací"""', 'spread_arguments': '(False)'}), "('config', Command_Config, argparser=parser, short_help=\n 'Příkaz na práci s uloženou konfigurací', spread_arguments=False)\n", (51857, 51983), False, 'import shell\n'), ((11042, 11063), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (11057, 11063), False, 'import os\n'), ((11717, 11768), 'typing.cast', 'cast', (['bakalariapi.sessions.RequestsSession', 'args[0]'], {}), '(bakalariapi.sessions.RequestsSession, args[0])\n', (11721, 11768), False, 'from typing import IO, TYPE_CHECKING, Any, Callable, cast\n'), ((14228, 14254), 'getpass.getpass', 'getpass.getpass', (['"""Heslo: """'], {}), "('Heslo: ')\n", (14243, 14254), False, 'import getpass\n'), ((16005, 16030), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (16028, 16030), False, 'import warnings\n'), ((16152, 16183), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (16173, 16183), False, 'import warnings\n'), ((18024, 18034), 'rich.progress.Progress', 'Progress', ([], {}), '()\n', (18032, 18034), False, 'from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn\n'), ((20037, 20047), 'rich.progress.Progress', 'Progress', ([], {}), '()\n', (20045, 20047), False, 'from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn\n'), ((30301, 30311), 'rich.progress.Progress', 'Progress', ([], {}), '()\n', (30309, 30311), False, 'from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn\n'), ((32624, 32637), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (32634, 32637), False, 'import time\n'), ((33286, 33298), 'json.load', 'json.load', (['f'], {}), '(f)\n', (33295, 33298), False, 'import json\n'), ((37563, 37603), 'logging.getLevelName', 'logging.getLevelName', (['logging.root.level'], {}), '(logging.root.level)\n', (37583, 37603), False, 'import logging\n'), ((25166, 25193), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (25180, 25193), False, 'import os\n'), ((26613, 26634), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (26632, 26634), False, 'import traceback\n'), ((37688, 37711), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (37705, 37711), False, 'import logging\n'), ((42736, 42750), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (42748, 42750), False, 'from datetime import datetime, timedelta\n'), ((48051, 48100), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {'parents': '[parser_fresh]'}), '(parents=[parser_fresh])\n', (48076, 48100), False, 'import shell\n'), ((48304, 48353), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {'parents': '[parser_fresh]'}), '(parents=[parser_fresh])\n', (48329, 48353), False, 'import shell\n'), ((48545, 48594), 'shell.ShellArgumentParser', 'shell.ShellArgumentParser', ([], {'parents': '[parser_fresh]'}), '(parents=[parser_fresh])\n', (48570, 48594), False, 'import shell\n'), ((11789, 11809), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (11806, 11809), False, 'import inspect\n'), ((25207, 25229), 'os.remove', 'os.remove', (['config_path'], {}), '(config_path)\n', (25216, 25229), False, 'import os\n'), ((25400, 25427), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (25414, 25427), False, 'import os\n'), ((27855, 27868), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (27865, 27868), False, 'import time\n'), ((30031, 30048), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (30039, 30048), False, 'from datetime import datetime, timedelta\n'), ((30050, 30084), 'datetime.datetime', 'datetime', (['(9999)', '(12)', '(31)', '(23)', '(59)', '(59)'], {}), '(9999, 12, 31, 23, 59, 59)\n', (30058, 30084), False, 'from datetime import datetime, timedelta\n'), ((37429, 37442), 'rich.logging.RichHandler', 'RichHandler', ([], {}), '()\n', (37440, 37442), False, 'from rich.logging import RichHandler\n'), ((6712, 6731), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-1)'}), '(hours=-1)\n', (6721, 6731), False, 'from datetime import datetime, timedelta\n'), ((7204, 7233), 'webbrowser.open', 'webbrowser.open', (['obj.join_url'], {}), '(obj.join_url)\n', (7219, 7233), False, 'import webbrowser\n'), ((20244, 20304), 'bakalariapi.modules.meetings.getter_future_meetings_ids', 'bakalariapi.modules.meetings.getter_future_meetings_ids', (['api'], {}), '(api)\n', (20299, 20304), False, 'import bakalariapi\n'), ((25445, 25465), 'os.stat', 'os.stat', (['config_path'], {}), '(config_path)\n', (25452, 25465), False, 'import os\n'), ((25769, 25797), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (25784, 25797), False, 'import os\n'), ((25835, 25858), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (25849, 25858), False, 'import os\n'), ((41412, 41423), 'rich.progress.BarColumn', 'BarColumn', ([], {}), '()\n', (41421, 41423), False, 'from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn\n'), ((41567, 41588), 'rich.progress.TimeRemainingColumn', 'TimeRemainingColumn', ([], {}), '()\n', (41586, 41588), False, 'from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn\n'), ((42838, 42852), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (42850, 42852), False, 'from datetime import datetime, timedelta\n'), ((45653, 45667), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (45665, 45667), False, 'from datetime import datetime, timedelta\n'), ((6795, 6815), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (6804, 6815), False, 'from datetime import datetime, timedelta\n'), ((7283, 7292), 'rich.console.Console', 'Console', ([], {}), '()\n', (7290, 7292), False, 'from rich.console import Console\n'), ((18229, 18278), 'bakalariapi.modules.komens.getter_komens_ids', 'bakalariapi.modules.komens.getter_komens_ids', (['api'], {}), '(api)\n', (18273, 18278), False, 'import bakalariapi\n'), ((40465, 40525), 'bakalariapi.modules.meetings.getter_future_meetings_ids', 'bakalariapi.modules.meetings.getter_future_meetings_ids', (['api'], {}), '(api)\n', (40520, 40525), False, 'import bakalariapi\n'), ((44096, 44108), 'datetime.timedelta', 'timedelta', (['(2)'], {}), '(2)\n', (44105, 44108), False, 'from datetime import datetime, timedelta\n'), ((6882, 6903), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (6891, 6903), False, 'from datetime import datetime, timedelta\n'), ((25888, 25913), 'os.path.realpath', 'os.path.realpath', (['dirname'], {}), '(dirname)\n', (25904, 25913), False, 'import os\n'), ((43049, 43061), 'datetime.timedelta', 'timedelta', (['(5)'], {}), '(5)\n', (43058, 43061), False, 'from datetime import datetime, timedelta\n'), ((45064, 45076), 'datetime.timedelta', 'timedelta', (['(5)'], {}), '(5)\n', (45073, 45076), False, 'from datetime import datetime, timedelta\n'), ((8235, 8244), 'rich.console.Console', 'Console', ([], {}), '()\n', (8242, 8244), False, 'from rich.console import Console\n'), ((17202, 17216), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17214, 17216), False, 'from datetime import datetime, timedelta\n'), ((25541, 25575), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['s.st_mtime'], {}), '(s.st_mtime)\n', (25563, 25575), False, 'from datetime import datetime, timedelta\n'), ((39836, 39848), 'datetime.timedelta', 'timedelta', (['(5)'], {}), '(5)\n', (39845, 39848), False, 'from datetime import datetime, timedelta\n'), ((7328, 7350), 'bakalariapi.utils.parseHTML', 'parseHTML', (['obj.content'], {}), '(obj.content)\n', (7337, 7350), False, 'from bakalariapi.utils import cs_timedelta, parseHTML\n'), ((8280, 8302), 'bakalariapi.utils.parseHTML', 'parseHTML', (['obj.content'], {}), '(obj.content)\n', (8289, 8302), False, 'from bakalariapi.utils import cs_timedelta, parseHTML\n')] |
import json
import codecs
import pandas as pd
import boto3
csv_path = "./fixed_tweets.csv"
save_path = "./fixed_tweets.json"
df = pd.read_csv(csv_path, header=None, encoding = "utf-8")
df.columns =["tweet"]
df_json = df.to_dict(orient='records')
resource = boto3.resource('dynamodb', region_name='ap-northeast-1')
# Connect to the DynamoDB table
table = resource.Table('konomania-tweet')
# Load the JSON object created in the step 3 using put_item method
for i, tweet in enumerate(df_json):
if i > 1: break
tweet["id"] = i
table.put_item(Item=tweet)
# Test
# response = table.get_item(Key={'seat_no': 'A 314216'})
# response | [
"pandas.read_csv",
"boto3.resource"
] | [((133, 185), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'header': 'None', 'encoding': '"""utf-8"""'}), "(csv_path, header=None, encoding='utf-8')\n", (144, 185), True, 'import pandas as pd\n'), ((261, 317), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'region_name': '"""ap-northeast-1"""'}), "('dynamodb', region_name='ap-northeast-1')\n", (275, 317), False, 'import boto3\n')] |
"""Authentication routes."""
from __future__ import annotations
from flask import g
from flask_accept import accept_fallback
from keeper.api import api
from keeper.auth import password_auth
from keeper.logutils import log_route
from ._models import AuthTokenResponse
@api.route("/token")
@accept_fallback
@log_route()
@password_auth.login_required
def get_auth_token() -> str:
"""Obtain a token for API users.
**Example request**
.. code-block:: http
GET /token HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Authorization: Basic dXNlcjpwYXNz
Connection: keep-alive
Host: localhost:5000
User-Agent: HTTPie/0.9.3
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 139
Content-Type: application/json
Date: Tue, 09 Feb 2016 20:23:11 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"token": "<KEY>..."
}
:reqheader Authorization: ``username:password``
:>json string token: Token string. Use this token in the basic auth
``username`` field.
:statuscode 200: No errors.
:statuscode 401: Not authenticated.
"""
return AuthTokenResponse(token=g.user.generate_auth_token()).json()
| [
"keeper.logutils.log_route",
"keeper.api.api.route",
"flask.g.user.generate_auth_token"
] | [((274, 293), 'keeper.api.api.route', 'api.route', (['"""/token"""'], {}), "('/token')\n", (283, 293), False, 'from keeper.api import api\n'), ((312, 323), 'keeper.logutils.log_route', 'log_route', ([], {}), '()\n', (321, 323), False, 'from keeper.logutils import log_route\n'), ((1250, 1278), 'flask.g.user.generate_auth_token', 'g.user.generate_auth_token', ([], {}), '()\n', (1276, 1278), False, 'from flask import g\n')] |
"""
Test princomp extraction from CLI
"""
import argparse
import os
import numpy as np
from demo_utils import get_random_data
from hebbnets.networks import MultilayerHahNetwork
np.set_printoptions(suppress=True)
def _argparse():
parser = argparse.ArgumentParser(
prog="Testing HebbNet principal components",
description="Testing HebbNet principal components by decomposing random data"
)
parser.add_argument(
"--num_samples",
help="Number of samples for synthetic data",
default=25,
type=int,
required=False
)
parser.add_argument(
"--data_dimension",
help="Dimension of synthetic data",
default=100,
type=int,
required=False
)
parser.add_argument(
"--data_latent_dimension",
help="Latent dimension of synthetic data",
default=3,
type=int,
required=False
)
parser.add_argument(
"--num_pc",
help="Number of principle components to extract",
default=2,
type=int,
required=False
)
return parser.parse_args()
def get_top_princomps(data_array, num_pcs):
U, S, V = np.linalg.svd(np.array(data_array))
_idx = np.argsort(S)[-num_pcs:]
return V[_idx, :].T
def main(args):
# Make data
demo_data = get_random_data(
args.num_samples,
args.data_dimension,
latent_dim=args.data_latent_dimension
)
# Build/train network
hah_network = MultilayerHahNetwork(
args.data_dimension,
[args.num_pc],
has_bias=False,
act_type='linear',
)
hah_network.train(demo_data, num_epochs=1000)
# Build/train network
real_princomps = get_top_princomps(demo_data, args.num_pc)
hebb_princomps = np.squeeze(hah_network.layers[0].input_weights)
hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True)
# Show the inner product of top two PCs with learned input weights
inner_prod_mat = real_princomps.T.matmul(hebb_princomps)
prod_as_string = np.array_str(
inner_prod_mat,
suppress_small=True,
precision=4
)
print(np.array_str(inner_prod_mat, precision=4))
if __name__ == "__main__":
args = _argparse()
main(args)
| [
"numpy.set_printoptions",
"demo_utils.get_random_data",
"argparse.ArgumentParser",
"hebbnets.networks.MultilayerHahNetwork",
"numpy.array_str",
"numpy.argsort",
"numpy.linalg.norm",
"numpy.array",
"numpy.squeeze"
] | [((186, 220), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (205, 220), True, 'import numpy as np\n'), ((254, 410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""Testing HebbNet principal components"""', 'description': '"""Testing HebbNet principal components by decomposing random data"""'}), "(prog='Testing HebbNet principal components',\n description=\n 'Testing HebbNet principal components by decomposing random data')\n", (277, 410), False, 'import argparse\n'), ((1348, 1446), 'demo_utils.get_random_data', 'get_random_data', (['args.num_samples', 'args.data_dimension'], {'latent_dim': 'args.data_latent_dimension'}), '(args.num_samples, args.data_dimension, latent_dim=args.\n data_latent_dimension)\n', (1363, 1446), False, 'from demo_utils import get_random_data\n'), ((1517, 1612), 'hebbnets.networks.MultilayerHahNetwork', 'MultilayerHahNetwork', (['args.data_dimension', '[args.num_pc]'], {'has_bias': '(False)', 'act_type': '"""linear"""'}), "(args.data_dimension, [args.num_pc], has_bias=False,\n act_type='linear')\n", (1537, 1612), False, 'from hebbnets.networks import MultilayerHahNetwork\n'), ((1810, 1857), 'numpy.squeeze', 'np.squeeze', (['hah_network.layers[0].input_weights'], {}), '(hah_network.layers[0].input_weights)\n', (1820, 1857), True, 'import numpy as np\n'), ((1880, 1933), 'numpy.linalg.norm', 'np.linalg.norm', (['hebb_princomps'], {'axis': '(0)', 'keepdims': '(True)'}), '(hebb_princomps, axis=0, keepdims=True)\n', (1894, 1933), True, 'import numpy as np\n'), ((2089, 2151), 'numpy.array_str', 'np.array_str', (['inner_prod_mat'], {'suppress_small': '(True)', 'precision': '(4)'}), '(inner_prod_mat, suppress_small=True, precision=4)\n', (2101, 2151), True, 'import numpy as np\n'), ((1215, 1235), 'numpy.array', 'np.array', (['data_array'], {}), '(data_array)\n', (1223, 1235), True, 'import numpy as np\n'), ((1248, 1261), 'numpy.argsort', 'np.argsort', (['S'], {}), '(S)\n', (1258, 1261), True, 'import numpy as np\n'), ((2193, 2234), 'numpy.array_str', 'np.array_str', (['inner_prod_mat'], {'precision': '(4)'}), '(inner_prod_mat, precision=4)\n', (2205, 2234), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
## Author: <NAME>
## Copyright: Copyright 2018-2019, Packt Publishing Limited
## Version: 0.0.1
## Maintainer: <NAME>
## Email: <EMAIL>
## Linkedin: https://www.linkedin.com/in/linus1/
## Contributor : {if you debug, append your name here}
## Contributor Email : {if you debug, append your email here}
## Status: active
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(0)
def true_fun(X):
"""
given X it will provide its mapping to Y by sing function np.cos(1.5 * np.pi * X)
:param X:
:return:
"""
return np.cos(1.5 * np.pi * X)
if __name__ == '__main__':
n_samples = 30
degrees = [1, 3, 9, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
"""
Evaluating and plotting for each degree of freedom
"""
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using cross-validation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
# predicting on test data
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
# plotting the True and predicted function
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\n TEST MSE = {:.2e}".format(
degrees[i], -scores.mean()))
plt.show()
| [
"matplotlib.pyplot.xlim",
"sklearn.pipeline.Pipeline",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.random.randn",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.setp",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.linspace",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((618, 635), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (632, 635), True, 'import numpy as np\n'), ((804, 827), 'numpy.cos', 'np.cos', (['(1.5 * np.pi * X)'], {}), '(1.5 * np.pi * X)\n', (810, 827), True, 'import numpy as np\n'), ((1018, 1045), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 5)'}), '(figsize=(14, 5))\n', (1028, 1045), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2436, 2438), True, 'import matplotlib.pyplot as plt\n'), ((928, 953), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (942, 953), True, 'import numpy as np\n'), ((1226, 1260), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticks': '()', 'yticks': '()'}), '(ax, xticks=(), yticks=())\n', (1234, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1351), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'degrees[i]', 'include_bias': '(False)'}), '(degree=degrees[i], include_bias=False)\n', (1312, 1351), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1431, 1449), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1447, 1449), False, 'from sklearn.linear_model import LinearRegression\n'), ((1470, 1573), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('polynomial_features', polynomial_features), ('linear_regression',\n linear_regression)]"], {}), "([('polynomial_features', polynomial_features), (\n 'linear_regression', linear_regression)])\n", (1478, 1573), False, 'from sklearn.pipeline import Pipeline\n'), ((1716, 1808), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['pipeline', 'X[:, np.newaxis]', 'y'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(10)'}), "(pipeline, X[:, np.newaxis], y, scoring=\n 'neg_mean_squared_error', cv=10)\n", (1731, 1808), False, 'from sklearn.model_selection import cross_val_score\n'), ((1858, 1880), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1869, 1880), True, 'import numpy as np\n'), ((2131, 2186), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'edgecolor': '"""b"""', 's': '(20)', 'label': '"""Samples"""'}), "(X, y, edgecolor='b', s=20, label='Samples')\n", (2142, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2211), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2206, 2211), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2236), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2231, 2236), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2262), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 1)'], {}), '((0, 1))\n', (2254, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2289), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2, 2)'], {}), '((-2, 2))\n', (2280, 2289), True, 'import matplotlib.pyplot as plt\n'), ((2299, 2321), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2309, 2321), True, 'import matplotlib.pyplot as plt\n'), ((978, 1004), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (993, 1004), True, 'import numpy as np\n')] |
import numpy as np
import argparse
from utils import Audio
def sample_wav_audio(path):
audio = Audio()
mel = audio.audio_to_mel(path)
samples = audio.mel_sample(mel, width=128, k=5)
return samples
def save_embeddings(name, samples):
audio = Audio()
avg_embed = np.zeros(256, dtype=np.float32)
for mel in samples:
embed = audio.mel_to_embed(mel)
avg_embed += embed
avg_embed = avg_embed / 5
np.save(f'./embeddings/{name}.npy', avg_embed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store', type=str, required=True)
parser.add_argument('--name', action='store', type=str, required=True)
args = parser.parse_args()
samples = sample_wav_audio(args.path)
save_embeddings(args.name, samples) | [
"utils.Audio",
"numpy.save",
"numpy.zeros",
"argparse.ArgumentParser"
] | [((100, 107), 'utils.Audio', 'Audio', ([], {}), '()\n', (105, 107), False, 'from utils import Audio\n'), ((263, 270), 'utils.Audio', 'Audio', ([], {}), '()\n', (268, 270), False, 'from utils import Audio\n'), ((287, 318), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.float32'}), '(256, dtype=np.float32)\n', (295, 318), True, 'import numpy as np\n'), ((448, 494), 'numpy.save', 'np.save', (['f"""./embeddings/{name}.npy"""', 'avg_embed'], {}), "(f'./embeddings/{name}.npy', avg_embed)\n", (455, 494), True, 'import numpy as np\n'), ((536, 561), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (559, 561), False, 'import argparse\n')] |
import argparse
import logging
import os
import pickle
import random
import ujson
import sys
import math
from ctypes import c_ulong
from multiprocessing import Array, Queue
from multiprocessing.sharedctypes import RawArray
from queue import Empty
from time import time
import numpy as np
import resource
from scipy.sparse import csr_matrix
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from sklearn.metrics.pairwise import cosine_similarity
from data.bug_report_database import BugReportDatabase
from data.preprocessing import concatenateSummaryAndDescription
from experiments.sparse_vector import TokenizerStemmer
from nltk import TreebankWordTokenizer, SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
def loadData(filePath):
f = open(filePath, 'r')
bugIds = set()
duplicateByBugId = {}
pairs = []
for l in f:
l = l.strip()
if len(l) == 0:
break
bug1Id, bug2Id, label = l.split(',')
label = int(label)
pairs.append((bug1Id, bug2Id, label))
bugIds.add(bug1Id)
bugIds.add(bug2Id)
if label == 1:
duplicateBug1List = duplicateByBugId.get(bug1Id, set())
if len(duplicateBug1List) == 0:
duplicateByBugId[bug1Id] = duplicateBug1List
duplicateBug1List.add(bug2Id)
duplicateBug2List = duplicateByBugId.get(bug2Id, set())
if len(duplicateBug2List) == 0:
duplicateByBugId[bug2Id] = duplicateBug2List
duplicateBug2List.add(bug1Id)
return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations']
class Obj(object):
def __init__(self, dict):
for k, v in dict.items():
setattr(self, k, v)
def predictDeepLearningModel(bugEmbeddingsById, validationPairs):
batchSize = 1024
predictions = []
nBatches = math.ceil(float(len(validationPairs)) / batchSize)
firstBugPairs = []
secondBugPairs = []
for bug1, bug2 in validationPairs:
firstBugPairs.append(bugEmbeddingsById[bug1])
secondBugPairs.append(bugEmbeddingsById[bug2])
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda)
bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda)
if arguments.model == 'retrieval':
predictionInput = [bug1s, bug2s]
elif arguments.model == 'classification':
predictionInput = model[1](bug1s, bug2s)
output = predictionFunction(predictionInput).data.cpu().numpy()
for pr in output:
if isinstance(pr, (np.float32, np.uint8)):
predictions.append(pr)
else:
predictions.append(pr[-1])
return predictions
def parallel(start, duplicateBugs, q):
logger = logging.getLogger()
c = time()
logger.info(
"Process %s started to compute the similarity for %d duplicate bugs. Start idx: %d" % (os.getpid(), len(duplicateBugs), start))
for i, db in enumerate(duplicateBugs):
q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)])
if i % 20 == 0 and i != 0:
logger.info("TF-IDF: Process %s processed %d Duplicate bug of %d in %f" % (
os.getpid(), i, len(duplicateBugs), time() - c))
c = time()
q.put([-1, None])
def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds):
batchSize = 1024
nPairs = len(bugIds)
nBatches = math.ceil(float(nPairs) / batchSize)
bugEmbedding1 = vectorByBug[duplicateBug]
similarityScores = []
nbDim = bugEmbedding1.shape[1]
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
data1 = []
indices1 = []
ptrs1 = [0]
data2 = []
indices2 = []
ptrs2 = [0]
for otherBug in bugIds[batchStart: batchStart + batchSize]:
data1.extend(bugEmbedding1.data)
indices1.extend(bugEmbedding1.indices)
ptrs1.append(len(indices1))
bugEmbedding2 = vectorByBug[otherBug]
data2.extend(bugEmbedding2.data)
indices2.extend(bugEmbedding2.indices)
ptrs2.append(len(indices2))
matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))
matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))
score = cosine_similarity(matrix1, matrix2)
for i in range(score.shape[0]):
similarityScores.append(score[i][i])
return similarityScores
def predictTFIDF(pairs):
batchSize = 8192
nPairs = len(pairs)
nBatches = math.ceil(float(nPairs) / batchSize)
similarityScores = []
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
data1 = []
indices1 = []
ptrs1 = [0]
data2 = []
indices2 = []
ptrs2 = [0]
for bug1, bug2 in pairs[batchStart: batchStart + batchSize]:
bugEmbedding1 = vectorByBug[bug1]
data1.extend(bugEmbedding1.data)
indices1.extend(bugEmbedding1.indices)
ptrs1.append(len(indices1))
bugEmbedding2 = vectorByBug[bug2]
data2.extend(bugEmbedding2.data)
indices2.extend(bugEmbedding2.indices)
ptrs2.append(len(indices2))
nbDim = vectorByBug[bug1].shape[1]
pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))
pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))
score = cosine_similarity(pairBug1, pairBug2)
for i in range(score.shape[0]):
similarityScores.append(score[i][i])
return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int)
def chunks(l, n):
chunkSize = int(len(l) / n)
remaining = len(l) % n
chunks = []
begin = 0
for i in range(n):
if remaining != 0:
additional = 1
remaining -= 1
else:
additional = 0
end = begin + chunkSize + additional
chunks.append(l[begin:end])
begin = end
return chunks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--recall_ratio_k', nargs='+', required=True,
help="list of the values of k to be used in the recall ratio. If k is empty list so recall rate "
"is not calculated")
parser.add_argument('--model', help="model")
parser.add_argument('--model_type', help="model type")
parser.add_argument('--bug_dataset', help="")
parser.add_argument('--input', required=True)
parser.add_argument('--retrieval_threshold', type=float, default=None, help="")
parser.add_argument('--nb_processes', type=int, default=8, help="")
parser.add_argument('--cuda', action="store_true", help="enable cuda.")
logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S', )
logger = logging.getLogger()
args = parser.parse_args()
print(args)
global bugIds
args.recall_ratio_k = [int(k) for k in args.recall_ratio_k]
bugIds, duplicateByBugId, pairs, validations = loadData(args.input)
biggestValidation = validations[-1]
bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset)
bugIds = list(bugIds)
similarityListByDuplicate = []
if args.model_type == 'tfidf':
# Load Model
global vectorByBug
vectorByBug = {}
tfIdfVectorizer = pickle.load(open(args.model, 'rb'))
# Generate bag of words representation for each bug
texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds]
vectors = tfIdfVectorizer.transform(texts)
for idx, bugId in enumerate(bugIds):
vectorByBug[bugId] = vectors[idx]
else:
# We can't import torch without allocating a GPU in Cedar cluster.
from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \
calculateSimilarityScoresDL, \
CosinePrediction, getDataHandlerLexiconEmb, getModel
import torch
import torch.nn.functional as F
from util.torch_util import softmaxPrediction, getVariable
from data.dataset import BugDataExtractor
# Load Model and DataHandlers
arguments = Obj({
'load': args.model,
'cuda': args.cuda,
'summary_bidirectional': False,
'classifier_hidden_size': 300,
'classifier_mul_dif': True
})
dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments)
encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments)
encoderContainer.eval()
model.eval()
# Set the similarity and prediction functions
if arguments.model == 'classification':
similarityFunction = model[1]
if args.cuda:
similarityFunction.cuda()
predictionFunction = softmaxPrediction
elif arguments.model == 'retrieval':
similarityFunction = F.cosine_similarity
predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda)
if args.cuda:
model.cuda()
encoderContainer.cuda()
# Generate the embedding for each bug
logger.info("Generating Embeddings")
dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers)
bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer)
# Start to calculate all duplicate pairs recommend list
c = time()
logger.info("Calculating similarity scores")
dupDictItems = duplicateByBugId.items()
if args.model_type == 'tfidf':
# Calculating the score for tf-idf. We had to parallel this step because the sequential version was too slow.
import multiprocessing
logger.info("Calculating cosine similarity of tf-idf model using %d processes" % (args.nb_processes))
funcArgs = []
duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems]
q = Queue()
processes = []
similarityScoresList = [0] * len(duplicateBugs)
startToWrite = 0
for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)):
arr = RawArray(c_ulong, [int(bugId) for bugId in chunk])
processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q)))
startToWrite += len(chunk)
for p in processes:
p.start()
count = 0
while True:
try:
id, scoreList = q.get()
if id == -1:
# The process send a tuple (-1,None) when it is ending its work.
count += 1
# Break the loop when all processes were terminated
if count == len(processes):
break
else:
similarityScoresList[id] = scoreList
except Empty as e:
pass
logger.info(
"Total time to calculate cosine similarity of %d duplicate bugs: %s " % (len(dupDictItems), time() - c))
c = time()
for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems):
# Calculate the similarity score of duplicate bug with each bug
if args.model_type == 'tfidf':
similarityScores = similarityScoresList.pop(0)
else:
similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds,
args.cuda)
# Remove pair (duplicateBug, duplicateBug) and create tuples with bug id and its similarity score.
bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug]
# Sort in descending order the bugs by probability of being duplicate
similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True)
similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList]))
if i % 200 == 0 and i != 0:
logger.info("Processed %d Duplicate bug of %d in %f" % (i, len(duplicateByBugId), time() - c))
c = time()
# For each different proportion, we calculate the recall rate and the precision, recall, accuracy
recallKs = sorted([int(k) for k in args.recall_ratio_k])
biggestKValue = recallKs[-1]
total = len(duplicateByBugId)
for validation in validations:
logger.info("Calculating metrics to a validation with proportion: %d" % validation['k'])
valitionBugIds = {}
# Prepare data to prediction
validationPairs = []
targets = []
bugIdsOfValidation = set()
for pairIndex in validation['indexes']:
bug1, bug2, label = pairs[pairIndex]
validationPairs.append((bug1, bug2))
valitionBugIds[bug1] = True
valitionBugIds[bug2] = True
bugIdsOfValidation.add(bug1)
bugIdsOfValidation.add(bug2)
targets.append(max(0, label))
logger.debug("Amount of duplicate pairs: %d\tAmount of pairs: %d" % (
np.count_nonzero(np.asarray(targets)), len(targets)))
logger.debug("Amount of bugs: %d" % (len(bugIdsOfValidation)))
logger.info("Predicting pair labels: %d" % validation['k'])
if args.model_type == 'tfidf':
predictions = predictTFIDF(validationPairs)
else:
predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs)
# Calculate Recall Rate
hitsPerRateK = [0] * len(recallKs)
logger.info("Calculating Recall Rate")
for duplicateBug, similarityList in similarityListByDuplicate:
pos = biggestKValue + 1
cur = 0
listOfDuplicates = duplicateByBugId[duplicateBug]
for bugId in similarityList:
if bugId not in bugIdsOfValidation:
continue
if bugId in listOfDuplicates:
pos = cur + 1
break
cur += 1
if cur >= biggestKValue:
break
for idx, k in enumerate(recallKs):
if k < pos:
continue
hitsPerRateK[idx] += 1
logger.info("Recall Rate Results:")
for k, hit in zip(recallKs, hitsPerRateK):
rate = float(hit) / total
logger.info("\t\t k=%d: %.3f (%d/%d) " % (k, rate, hit, total))
# Calculate Acc, precision, recall and f1
accum = accuracy_score(targets, predictions, normalize=False)
acc = accum / len(targets)
prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions)
logger.info("Accuracy: %.3f (%d/%d)" % (acc * 100, accum, len(targets)))
logger.info("Precision: {}\tRecall: {}\tF1:{}".format(list(np.around(prec * 100, decimals=3)),
list(np.around(recall * 100, decimals=3)),
list(np.around(f1 * 100, decimals=3))))
logger.info("")
| [
"argparse.ArgumentParser",
"experiments.duplicate_bug_detection_deep_learning.getDataHandlerLexiconEmb",
"sklearn.metrics.accuracy_score",
"numpy.around",
"multiprocessing.Queue",
"data.bug_report_database.BugReportDatabase.fromJson",
"experiments.duplicate_bug_detection_deep_learning.getModel",
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.metrics.pairwise.cosine_similarity",
"experiments.duplicate_bug_detection_deep_learning.calculateSimilarityScoresDL",
"data.dataset.BugDataExtractor",
"numpy.asarray",
"experiments.duplicate_bug_detection_deep_learning.generateBugEmbeddings",
"experiments.duplicate_bug_detection_deep_learning.CosinePrediction",
"os.getpid",
"torch.stack",
"logging.basicConfig",
"time.time",
"multiprocessing.Process",
"logging.getLogger"
] | [((2982, 3001), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2999, 3001), False, 'import logging\n'), ((3010, 3016), 'time.time', 'time', ([], {}), '()\n', (3014, 3016), False, 'from time import time\n'), ((6412, 6437), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6435, 6437), False, 'import argparse\n'), ((7125, 7249), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-4s %(message)s"""', 'level': 'logging.DEBUG', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='%(asctime)s %(levelname)-4s %(message)s', level\n =logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')\n", (7144, 7249), False, 'import logging\n'), ((7285, 7304), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7302, 7304), False, 'import logging\n'), ((7572, 7616), 'data.bug_report_database.BugReportDatabase.fromJson', 'BugReportDatabase.fromJson', (['args.bug_dataset'], {}), '(args.bug_dataset)\n', (7598, 7616), False, 'from data.bug_report_database import BugReportDatabase\n'), ((9985, 9991), 'time.time', 'time', ([], {}), '()\n', (9989, 9991), False, 'from time import time\n'), ((11632, 11638), 'time.time', 'time', ([], {}), '()\n', (11636, 11638), False, 'from time import time\n'), ((4602, 4637), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['matrix1', 'matrix2'], {}), '(matrix1, matrix2)\n', (4619, 4637), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((5784, 5821), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['pairBug1', 'pairBug2'], {}), '(pairBug1, pairBug2)\n', (5801, 5821), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((8945, 8980), 'experiments.duplicate_bug_detection_deep_learning.getDataHandlerLexiconEmb', 'getDataHandlerLexiconEmb', (['arguments'], {}), '(arguments)\n', (8969, 8980), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((9015, 9070), 'experiments.duplicate_bug_detection_deep_learning.getModel', 'getModel', (['dataHandlers', 'lexicons', 'embeddings', 'arguments'], {}), '(dataHandlers, lexicons, embeddings, arguments)\n', (9023, 9070), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((9776, 9824), 'data.dataset.BugDataExtractor', 'BugDataExtractor', (['bugReportDataset', 'dataHandlers'], {}), '(bugReportDataset, dataHandlers)\n', (9792, 9824), False, 'from data.dataset import BugDataExtractor\n'), ((9853, 9915), 'experiments.duplicate_bug_detection_deep_learning.generateBugEmbeddings', 'generateBugEmbeddings', (['bugIds', 'dataExtractor', 'encoderContainer'], {}), '(bugIds, dataExtractor, encoderContainer)\n', (9874, 9915), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((10507, 10514), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (10512, 10514), False, 'from multiprocessing import Array, Queue\n'), ((15127, 15180), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['targets', 'predictions'], {'normalize': '(False)'}), '(targets, predictions, normalize=False)\n', (15141, 15180), False, 'from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n'), ((15246, 15299), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['targets', 'predictions'], {}), '(targets, predictions)\n', (15277, 15299), False, 'from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n'), ((2278, 2339), 'torch.stack', 'torch.stack', (['firstBugPairs[batchStart:batchStart + batchSize]'], {}), '(firstBugPairs[batchStart:batchStart + batchSize])\n', (2289, 2339), False, 'import torch\n'), ((2381, 2443), 'torch.stack', 'torch.stack', (['secondBugPairs[batchStart:batchStart + batchSize]'], {}), '(secondBugPairs[batchStart:batchStart + batchSize])\n', (2392, 2443), False, 'import torch\n'), ((3507, 3513), 'time.time', 'time', ([], {}), '()\n', (3511, 3513), False, 'from time import time\n'), ((11926, 12029), 'experiments.duplicate_bug_detection_deep_learning.calculateSimilarityScoresDL', 'calculateSimilarityScoresDL', (['duplicateBug', 'similarityFunction', 'bugEmbeddingsById', 'bugIds', 'args.cuda'], {}), '(duplicateBug, similarityFunction,\n bugEmbeddingsById, bugIds, args.cuda)\n', (11953, 12029), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((12710, 12716), 'time.time', 'time', ([], {}), '()\n', (12714, 12716), False, 'from time import time\n'), ((3129, 3140), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3138, 3140), False, 'import os\n'), ((5925, 5953), 'numpy.asarray', 'np.asarray', (['similarityScores'], {}), '(similarityScores)\n', (5935, 5953), True, 'import numpy as np\n'), ((9522, 9575), 'experiments.duplicate_bug_detection_deep_learning.CosinePrediction', 'CosinePrediction', (['args.retrieval_threshold', 'args.cuda'], {}), '(args.retrieval_threshold, args.cuda)\n', (9538, 9575), False, 'from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, calculateSimilarityScoresDL, CosinePrediction, getDataHandlerLexiconEmb, getModel\n'), ((10797, 10866), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'parallel', 'args': '(startToWrite, arr, q)'}), '(target=parallel, args=(startToWrite, arr, q))\n', (10820, 10866), False, 'import multiprocessing\n'), ((15449, 15482), 'numpy.around', 'np.around', (['(prec * 100)'], {'decimals': '(3)'}), '(prec * 100, decimals=3)\n', (15458, 15482), True, 'import numpy as np\n'), ((15552, 15587), 'numpy.around', 'np.around', (['(recall * 100)'], {'decimals': '(3)'}), '(recall * 100, decimals=3)\n', (15561, 15587), True, 'import numpy as np\n'), ((15657, 15688), 'numpy.around', 'np.around', (['(f1 * 100)'], {'decimals': '(3)'}), '(f1 * 100, decimals=3)\n', (15666, 15688), True, 'import numpy as np\n'), ((3442, 3453), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3451, 3453), False, 'import os\n'), ((11610, 11616), 'time.time', 'time', ([], {}), '()\n', (11614, 11616), False, 'from time import time\n'), ((13695, 13714), 'numpy.asarray', 'np.asarray', (['targets'], {}), '(targets)\n', (13705, 13714), True, 'import numpy as np\n'), ((3478, 3484), 'time.time', 'time', ([], {}), '()\n', (3482, 3484), False, 'from time import time\n'), ((12681, 12687), 'time.time', 'time', ([], {}), '()\n', (12685, 12687), False, 'from time import time\n')] |
"""Tests for registering custom formatters."""
from pyramid.testing import DummyRequest
from pyramid.testing import testConfig
def test_add_formatter() -> None:
"""Test registration of a custom formatter."""
with testConfig() as config:
request = DummyRequest()
config.include("pyramid_openapi3")
config.pyramid_openapi3_add_formatter("foormatter", lambda x: x)
formatter = request.registry.settings["pyramid_openapi3_formatters"].get(
"foormatter", None
)
assert formatter("foo") == "foo"
| [
"pyramid.testing.testConfig",
"pyramid.testing.DummyRequest"
] | [((225, 237), 'pyramid.testing.testConfig', 'testConfig', ([], {}), '()\n', (235, 237), False, 'from pyramid.testing import testConfig\n'), ((267, 281), 'pyramid.testing.DummyRequest', 'DummyRequest', ([], {}), '()\n', (279, 281), False, 'from pyramid.testing import DummyRequest\n')] |
#!/usr/bin/env python
"""Test USCG specific 8:367:22 area notice message Version 23 samples."""
import datetime
import unittest
from ais_area_notice import m366_22
# from m366_22 import AreaNotice
# from m366_22 import AreaNoticeCircle
# from m366_22 import AreaNoticeRectangle
# from m366_22 import AreaNoticeSector
# from m366_22 import AreaNoticePoly
# from m366_22 import AreaNoticeText
# from m366_22 import SHAPES
class TestAreaNotice(unittest.TestCase):
def testEmptyInit(self):
self.assertRaises(m366_22.Error, m366_22.AreaNotice)
def testInitWithAreaType(self):
area_type = 1
now = datetime.datetime.utcnow()
an = m366_22.AreaNotice(area_type=area_type, when=now)
self.assertFalse(an.areas)
self.assertEqual(an.area_type, area_type)
self.assertEqual(an.when.year, now.year)
self.assertEqual(an.when.month, now.month)
self.assertEqual(an.when.day, now.day)
self.assertEqual(an.when.hour, now.hour)
self.assertEqual(an.when.minute, now.minute)
self.assertEqual(an.when.second, 0)
self.assertIsNone(an.duration_min)
self.assertIsNone(an.link_id)
self.assertIsNone(an.mmsi)
class TestVersion23Samples(unittest.TestCase):
@unittest.skip('TODO(schwehr): Fix this failure.')
def testCircle(self):
# TODO(grepjohnson): Why are there two messages?
aivdm = (
'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F'
#'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E'
)
an = m366_22.AreaNotice(nmea_strings=[aivdm])
# self.assertEqual(an., )
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"unittest.skip",
"ais_area_notice.m366_22.AreaNotice",
"datetime.datetime.utcnow"
] | [((1203, 1252), 'unittest.skip', 'unittest.skip', (['"""TODO(schwehr): Fix this failure."""'], {}), "('TODO(schwehr): Fix this failure.')\n", (1216, 1252), False, 'import unittest\n'), ((1593, 1608), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1606, 1608), False, 'import unittest\n'), ((614, 640), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (638, 640), False, 'import datetime\n'), ((650, 699), 'ais_area_notice.m366_22.AreaNotice', 'm366_22.AreaNotice', ([], {'area_type': 'area_type', 'when': 'now'}), '(area_type=area_type, when=now)\n', (668, 699), False, 'from ais_area_notice import m366_22\n'), ((1492, 1532), 'ais_area_notice.m366_22.AreaNotice', 'm366_22.AreaNotice', ([], {'nmea_strings': '[aivdm]'}), '(nmea_strings=[aivdm])\n', (1510, 1532), False, 'from ais_area_notice import m366_22\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright 2015 Baidu, Inc.
#
########################################################################
"""
File: test_create_thumbnail.py
Date: 2015/06/10 15:15:40
"""
import os
import sys
import unittest
import json
import time
import media_config
import re
import mediaBase
_NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
_COMMON_PATH = _NOW_PATH + '../../../'
sys.path.insert(0, _COMMON_PATH)
from baidubce.services.media import media_client
from baidubce.exception import BceHttpClientError
from baidubce.exception import BceServerError
from baidubce.exception import BceClientError
import nose
from nose import tools
from nose.tools import assert_raises
from nose.tools import assert_is_none
from nose.tools import raises
class TestCreateThumbnail(mediaBase.MediaBase):
"""test create thumbnail"""
def __init__(self):
"""construction """
mediaBase.MediaBase.__init__(self)
self.pre = self.prefix + 'createthumb'
self.pipeline_name = self.pre
self.container = 'mp4'
self.capacity = 1
self.key = '10s.mp4'
self.key_prefix = '/00mingxioutput'
self.target_format = 'jpg'
self.sizing_policy = 'keep'
self.width_in_pixel = 640
self.height_in_pixel = 400
self.mode = 'manual'
self.start_time_in_second = 0
self.end_time_in_second = 50
self.interval_in_second = 10
self.client = media_client.MediaClient(media_config.config)
def setUp(self):
"""create env"""
time.sleep(2)
succ = True
try:
resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket,
self.targetBucket)
except Exception as e:
print(e.message)
succ = False
finally:
nose.tools.assert_true(succ)
def tearDown(self):
"""clear env"""
time.sleep(2)
resp = self.client.list_pipelines()
for each_pipeline in resp.pipelines:
pipeline_name = each_pipeline.pipeline_name
if (pipeline_name.startswith(self.pre)):
resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name)
if resp.thumbnails:
for each_job in resp.thumbnails:
while(1):
resp = self.client.get_thumbnail_job(each_job.job_id)
if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED':
print('please wait ....\n')
time.sleep(5)
else:
break
resp = self.client.delete_pipeline(pipeline_name)
def test_create_thumbnail_normal(self):
"""create thumbnail normal"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': self.target_format,
'sizingPolicy': self.sizing_policy,
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
capture = {'mode': self.mode,
'startTimeInSecond': self.start_time_in_second,
'endTimeInSecond': self.end_time_in_second,
'intervalInSecond': self.interval_in_second
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_with_pipeline_deleted(self):
"""create thumbnail with delete pipeline"""
resp = self.client.delete_pipeline(self.pipeline_name)
nose.tools.assert_is_not_none(resp)
source = {'key': self.key}
try:
self.client.create_thumbnail_job(self.pipeline_name, source)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('pipeline has been deleted')
else:
assert True == False, 'not throw BceServerError'
def test_create_thumbnail_with_pipeline_not_exist(self):
"""create thumbnail with pipeline not exist"""
source = {'key': self.key}
try:
self.client.create_thumbnail_job('not_exist_pipeline', source)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('The requested pipeline does not exist')
else:
assert True == False, 'not throw BceServerError'
def test_create_thumbnail_with_pipeline_none(self):
"""create thumbnail with pipeline none"""
source = {'key': self.key}
try:
self.client.create_thumbnail_job(None, source)
except ValueError as e:
assert e.message.startswith('arg "pipeline_name" should not be None')
def test_create_thumbnail_with_pipeline_empty(self):
"""create thumbnail with pipeline empty"""
source = {'key': self.key}
with nose.tools.assert_raises_regexp(BceClientError,
'pipeline_name can\'t be empty string'):
self.client.create_thumbnail_job('', source)
def test_create_thumbnail_with_key_is_chiness(self):
"""create thumbnail job with key is chiness"""
self.key = 'test--*--中文.mp4'
source = {'key': self.key}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_with_key_is_multiple_chars(self):
"""create thumbnail job with key is multiple chars"""
self.key = 'job_测试_123.mp4'
source = {'key': self.key}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_with_key_not_exist(self):
"""create thumbnail with key not exist"""
source = {'key': 'not_exist.mp4'}
try:
self.client.create_thumbnail_job(self.pipeline_name, source)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist')
else:
assert True == False, 'not throw BceServerError'
def test_create_thumbnail_with_key_include_folder(self):
"""create thumbnail with key include folder"""
source = {'key': 'media/info/jobtest.mp4'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_with_key_long_name(self):
"""create thumbnail with key long name"""
source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_keyprefix_none(self):
"""create thumbnail with key prefix is none"""
source = {'key': self.key}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_keyprefix_keydot(self):
"""create thumbnail with key prefix key dot"""
source = {'key': 'test.thumbnail.csdn.mp4'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_format_png(self):
"""create thumbnail with png pic"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': self.sizing_policy,
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_format_not_in_enum(self):
"""create thumbnail format not in enum"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'txt',
'sizingPolicy': self.sizing_policy,
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('Could not read JSON: Can not construct')
else:
assert True == False
def test_create_thumbnail_sizingpolicy_in_enum(self):
"""create thumbnail with png pic"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_sizingpolicy_not_in_enum(self):
"""create thumbnail format not in enum"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'notsizing',
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('Could not read JSON: Can not construct')
else:
assert True == False
def test_create_thumbnail_widthinpixel_equal_2000(self):
"""create thumbnail with width pixel equal 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': 2000,
'heightInPixel': self.height_in_pixel,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_widthinpixel_lessthan_10(self):
"""create thumbnail with width pixel less than 10"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': 5,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=')
else:
assert True == False
def test_create_thumbnail_widthinpixel_morethan_2000(self):
"""create thumbnail with width pixel more than 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': 2001,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=')
else:
assert True == False
def test_create_thumbnail_heightinpixel_equal_2000(self):
"""create thumbnail withheight pixel equal 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': 2000,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_heightinpixel_lessthan_10(self):
"""create thumbnail with height pixel less than 10"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': 5,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=')
else:
assert True == False
def test_create_thumbnail_heightinpixel_morethan_2000(self):
"""create thumbnail with height pixel more than 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': 2001,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=')
else:
assert True == False
def test_create_thumbnail_mode_is_auto(self):
"""create thumbnail with mode is auto"""
source = {'key': self.key}
capture = {'mode': 'auto'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_not_in_enum(self):
"""create thumbnail with mode not in enum"""
source = {'key': self.key}
capture = {'mode': 'notmode'}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('Could not read JSON: Can not')
else:
assert True == False
def test_create_thumbnail_start_time_lessthan_0(self):
"""create thumbnail with start time less than 0"""
source = {'key': self.key}
capture = {'startTimeInSecond': -1}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'capture.startTimeInSecond:capture.startTimeInSecond')
else:
assert True == False
def test_create_thumbnail_start_time_float(self):
"""create thumbnail with start time float"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond': 1.25,
'endTimeInSecond': 50,
'intervalInSecond': 10}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_manual_none_starttime(self):
"""create thumbnail mode is manual with start time is none"""
source = {'key': self.key}
capture = {
'mode': 'manual'
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('start time is required in manual mode')
else:
assert True == False
def test_create_thumbnail_end_time_lessthan_0(self):
"""create thumbnail with end time less than 0"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'startTimeInSecond': 0,
'endTimeInSecond': -1,
'intervalInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'capture.endTimeInSecond:capture.endTimeInSecond')
else:
assert True == False
def test_create_thumbnail_end_time_float(self):
"""create thumbnail with end time float"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond': 1,
'endTimeInSecond': 48.34,
'intervalInSecond': 10}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_auto_with_starttime(self):
"""create thumbnail mode is auto with end time"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'startTimeInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'cannot specify start time, end time, interval or frame number in auto mode')
else:
assert True == False
def test_create_thumbnail_mode_auto_with_endtime(self):
"""create thumbnail mode is auto with end time"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'endTimeInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'cannot specify start time, end time, interval or frame number in auto mode')
else:
assert True == False
def test_create_thumbnail_mode_auto_with_interval(self):
"""create thumbnail mode is auto with interval time"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'intervalInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'cannot specify start time, end time, interval or frame number in auto mode')
else:
assert True == False
def test_create_thumbnail_mode_manual_with_null_endtime(self):
"""create thumbnail mode is manual with end time none"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond': 10
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self):
"""create thumbnail mode is manual with endtime less than start time"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':20,
'endTimeInSecond':10,
'intervalInSecond': 5
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('start time cannot larger than end time')
else:
assert True == False
def test_create_thumbnail_mode_manual_endtime_null(self):
"""create thumbnail mode is manual with endtime null"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':100,
'intervalInSecond': 5
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_manual_interval_null(self):
"""create thumbnail mode is manual with interval null"""
source = {'key': '测试视频.mp4'}
capture = {
'mode': 'manual',
'startTimeInSecond':10,
'endTimeInSecond': 20
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
print(resp)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_interval_less_0(self):
"""create thumbnail mode is manual with interver null"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':1,
'endTimeInSecond':50,
'intervalInSecond': -1
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'capture.intervalInSecond:capture.intervalInSecond')
else:
assert True == False
def test_create_thumbnail_interval_float(self):
"""create thumbnail mode is manual with interver float"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':1,
'endTimeInSecond':50,
'intervalInSecond': 1.56
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_start_equal_end(self):
"""create thumbnail start time equal end time"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':10,
'endTimeInSecond':10,
'intervalInSecond': 1.56
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
| [
"mediaBase.MediaBase.__init__",
"os.path.abspath",
"nose.tools.assert_true",
"nose.tools.assert_raises_regexp",
"sys.path.insert",
"baidubce.services.media.media_client.MediaClient",
"time.sleep",
"nose.tools.assert_is_not_none"
] | [((504, 536), 'sys.path.insert', 'sys.path.insert', (['(0)', '_COMMON_PATH'], {}), '(0, _COMMON_PATH)\n', (519, 536), False, 'import sys\n'), ((432, 457), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (447, 457), False, 'import os\n'), ((1010, 1044), 'mediaBase.MediaBase.__init__', 'mediaBase.MediaBase.__init__', (['self'], {}), '(self)\n', (1038, 1044), False, 'import mediaBase\n'), ((1563, 1608), 'baidubce.services.media.media_client.MediaClient', 'media_client.MediaClient', (['media_config.config'], {}), '(media_config.config)\n', (1587, 1608), False, 'from baidubce.services.media import media_client\n'), ((1664, 1677), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1674, 1677), False, 'import time\n'), ((2037, 2050), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2047, 2050), False, 'import time\n'), ((3631, 3673), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (3660, 3673), False, 'import nose\n'), ((3861, 3896), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp'], {}), '(resp)\n', (3890, 3896), False, 'import nose\n'), ((5726, 5761), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp'], {}), '(resp)\n', (5755, 5761), False, 'import nose\n'), ((6044, 6079), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp'], {}), '(resp)\n', (6073, 6079), False, 'import nose\n'), ((6848, 6883), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp'], {}), '(resp)\n', (6877, 6883), False, 'import nose\n'), ((7169, 7204), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp'], {}), '(resp)\n', (7198, 7204), False, 'import nose\n'), ((7432, 7474), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (7461, 7474), False, 'import nose\n'), ((7721, 7763), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (7750, 7763), False, 'import nose\n'), ((8252, 8294), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (8281, 8294), False, 'import nose\n'), ((9549, 9591), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (9578, 9591), False, 'import nose\n'), ((10848, 10890), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (10877, 10890), False, 'import nose\n'), ((12908, 12950), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (12937, 12950), False, 'import nose\n'), ((14749, 14791), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (14778, 14791), False, 'import nose\n'), ((16323, 16365), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (16352, 16365), False, 'import nose\n'), ((18078, 18120), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (18107, 18120), False, 'import nose\n'), ((20534, 20576), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (20563, 20576), False, 'import nose\n'), ((21712, 21754), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (21741, 21754), False, 'import nose\n'), ((22184, 22226), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (22213, 22226), False, 'import nose\n'), ((23386, 23428), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (23415, 23428), False, 'import nose\n'), ((23855, 23897), 'nose.tools.assert_is_not_none', 'nose.tools.assert_is_not_none', (['resp.job_id'], {}), '(resp.job_id)\n', (23884, 23897), False, 'import nose\n'), ((1951, 1979), 'nose.tools.assert_true', 'nose.tools.assert_true', (['succ'], {}), '(succ)\n', (1973, 1979), False, 'import nose\n'), ((5290, 5380), 'nose.tools.assert_raises_regexp', 'nose.tools.assert_raises_regexp', (['BceClientError', '"""pipeline_name can\'t be empty string"""'], {}), '(BceClientError,\n "pipeline_name can\'t be empty string")\n', (5321, 5380), False, 'import nose\n'), ((2721, 2734), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2731, 2734), False, 'import time\n')] |
#!/usr/bin/env python3
"""
This module specifies class Monitored designated for processing of cryptocurrencies blocks
"""
import threading
import logging
from timeit import default_timer as timer
from datetime import datetime, timedelta
from .coin import BTC, BCH, DASH, ZEC, LTC, ETH
from .database import Database
from .notifier import Notifier
logger = logging.getLogger(__name__)
class Monitor():
"""
Monitor controls the processing of cryptocurrencies bloks
"""
stop = threading.Event()
coins = []
threads = []
database = None
notifier = None
def __init__(self, config):
"""
Construct new Monitor object
:param config: configuration dict
"""
self.config = config
self.database = Database(config['db'], self.config)
self.notifier = Notifier(config, self.database)
for coin in config['coins']:
coin_inst = coin(config, self.stop)
coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id']
self.coins.append(coin_inst)
def shutdown(self, signum, frame):
"""
Terminate threads of each component
"""
logger.info('Shuting down')
self.stop.set()
for thread in self.threads:
thread.join()
self.notifier.process_remaining()
def test_connection(self):
"""
Test connectivity of all components
"""
self.notifier.test_connection()
for coin in self.coins:
if not coin.test_connection():
raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__))
def start(self):
"""
Start thread for every coin and notifier
"""
for coin in self.coins:
logger.info('%s: monitoring started', coin)
thread = threading.Thread(target=self.worker, args=(coin,))
self.threads.append(thread)
thread.start()
thread = threading.Thread(target=self.notifier.worker, args=(self.stop,))
self.threads.append(thread)
thread.start()
def set_last_blocks(self):
"""
Set the current block of each coin as the last processed
"""
for coin in self.coins:
number, block_hash = coin.get_last_block_number()
self.database.insert_block(coin, number, block_hash)
logger.info('%s: setting %s as last processed block', coin, number)
def process_block(self, database, coin, number):
"""
Process transaction of <coin> in a block of number <number>
:param database: Database object
:param coin: Coin object
:param number: block number
:return: number of the next block
"""
time_start = timer()
coin.get_block(number)
block_id = database.insert_block(coin, number, coin.get_block_hash())
logger.info('%s: processing block: %s', coin, number)
cnt = 0
for tx_hash in coin.get_block_transactions():
addresses = coin.get_transaction_io(tx_hash)
self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses)
cnt += 1
time_total = timer() - time_start
logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total)
return number + 1
def last_processed_block(self, database, coin):
"""
Get the last block procesesd of <coin>
:param database: Database object
:param coin: Coin object
:return: number of last processed block
"""
number = database.get_last_block_number(coin)
while True:
hash_saved = database.get_block_hash(coin, number)
hash_node = coin.get_block_hash(number)
if hash_saved == hash_node or hash_saved is None:
break
database.delete_block(coin, number)
number -= 1
#print("last_processed_block> ", number)
return number
def worker(self, coin):
"""
Process new blocks of cryptocurrency <coin> until stop event is set.
:param coin: a class inherited from Coin
"""
database = Database(self.config['db'], self.config)
while not self.stop.is_set():
current_number = self.last_processed_block(database, coin) + 1
last_number, _ = coin.get_last_block_number()
#print(current_number, last_number)
while current_number <= last_number:
if self.stop.is_set():
break
try:
current_number = self.process_block(database, coin, current_number)
except InterruptedError:
break
until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds()
if until_next_block < 0: # should be already generated
until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of block time) before trying again
self.stop.wait(timeout=until_next_block)
logger.info('%s: terminating', coin)
| [
"threading.Thread",
"timeit.default_timer",
"threading.Event",
"datetime.datetime.now",
"logging.getLogger"
] | [((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((492, 509), 'threading.Event', 'threading.Event', ([], {}), '()\n', (507, 509), False, 'import threading\n'), ((2015, 2079), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.notifier.worker', 'args': '(self.stop,)'}), '(target=self.notifier.worker, args=(self.stop,))\n', (2031, 2079), False, 'import threading\n'), ((2819, 2826), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2824, 2826), True, 'from timeit import default_timer as timer\n'), ((1871, 1921), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.worker', 'args': '(coin,)'}), '(target=self.worker, args=(coin,))\n', (1887, 1921), False, 'import threading\n'), ((3255, 3262), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3260, 3262), True, 'from timeit import default_timer as timer\n'), ((4909, 4923), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4921, 4923), False, 'from datetime import datetime, timedelta\n')] |
import threading
class MultiThreading(object):
def __init__(self, scrapers):
self.scrapers = scrapers
def run(self):
threads = []
for i in range(len(self.scrapers)):
t = threading.Thread(target=self.scrapers[i].start)
t.start()
threads.append(t)
for thread in threads:
thread.join() | [
"threading.Thread"
] | [((218, 265), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.scrapers[i].start'}), '(target=self.scrapers[i].start)\n', (234, 265), False, 'import threading\n')] |
import glob
import numpy as np
from matplotlib import pyplot as plt
for filename in glob.glob("*.dat"):
print(filename)
name = filename.split(".")[0]
data = np.loadtxt(filename, delimiter=",")
size = int(np.sqrt(len(data)))
data = data.reshape((size, size))
fig, ax = plt.subplots(figsize=(5.12, 5.12))
ax.imshow(data)
plt.tick_params(
bottom=False, left=False, right=False, top=False,
labelbottom=False, labelleft=False, labelright=False, labeltop=False
)
plt.tight_layout()
plt.savefig(name + ".png")
plt.close() | [
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((85, 103), 'glob.glob', 'glob.glob', (['"""*.dat"""'], {}), "('*.dat')\n", (94, 103), False, 'import glob\n'), ((170, 205), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (180, 205), True, 'import numpy as np\n'), ((293, 327), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5.12, 5.12)'}), '(figsize=(5.12, 5.12))\n', (305, 327), True, 'from matplotlib import pyplot as plt\n'), ((352, 491), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'bottom': '(False)', 'left': '(False)', 'right': '(False)', 'top': '(False)', 'labelbottom': '(False)', 'labelleft': '(False)', 'labelright': '(False)', 'labeltop': '(False)'}), '(bottom=False, left=False, right=False, top=False,\n labelbottom=False, labelleft=False, labelright=False, labeltop=False)\n', (367, 491), True, 'from matplotlib import pyplot as plt\n'), ((514, 532), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (530, 532), True, 'from matplotlib import pyplot as plt\n'), ((537, 563), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.png')"], {}), "(name + '.png')\n", (548, 563), True, 'from matplotlib import pyplot as plt\n'), ((568, 579), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (577, 579), True, 'from matplotlib import pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 00:25:27 2017
@author: Wayne
"""
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
#%%
mydf1= mydf[outliers.outliers==False]
z = np.log(data.trip_duration+1)
X = mydf1
Xtest = testdf
data_test = xgb.DMatrix(Xtest)
#%%
rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2))
#%%
parms = {'max_depth':14, #maximum depth of a tree
'objective':'reg:linear',
'eta' :0.025,
'subsample':0.8,#SGD will use this percentage of data
'lambda ' :4, #L2 regularization term,>1 more conservative
'colsample_bytree ':0.9,
'colsample_bylevel':1,
'min_child_weight': 10,
'nthread' :3} #number of cpu core to use
#%% split training set to validation set
Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1)
#Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1)
data_tr = xgb.DMatrix(Xtrain, label=Ztrain)
data_val = xgb.DMatrix(Xval , label=Zval)
evallist = [(data_tr, 'train'), (data_val, 'valid')]
model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist,
early_stopping_rounds=30, maximize=False,
verbose_eval=100)
print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration))
#%% training all the data
data_train = xgb.DMatrix(X, label=z)
evallist = [(data_train, 'train')]
model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist,
maximize=False,
verbose_eval=100)
#%%
#%%
ztest = model.predict(data_test)
#%%
ytest = np.exp(ztest)-1
submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest})
submission.to_csv('submission_1.csv', index=False)
#%%
with open('filename.pickle', 'rb') as handle:
b = pickle.load(handle)
#%%
for d in (mydf,testdf):
print(d.Temp.mean())
#%%
print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops')
print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops')
print('We do not need to worry about missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops')
print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique()))))
#%% Kmeans
from sklearn.cluster import MiniBatchKMeans
coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values,
mydf[['dropoff_latitude', 'dropoff_longitude']].values,
testdf[['pickup_latitude', 'pickup_longitude']].values,
testdf[['dropoff_latitude', 'dropoff_longitude']].values))
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind])
for df in (mydf,testdf):
df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']])
df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']])
#%%
train_loc = [None]*2;test_loc=[None]*2
for i,loc in enumerate(['pickup_loc','dropoff_loc']):
train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_')
test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_')
train_loc = pd.concat(train_loc,axis=1)
test_loc = pd.concat(test_loc,axis=1)
#%%
mydf1 = pd.concat([mydf,train_loc],axis = 1)
testdf1 = pd.concat([testdf,test_loc],axis = 1)
#%%
mydf1 = mydf1[mydf1['outliers']==False]
mydf1 = mydf1.drop(['id','outliers'],axis=1)
z = mydf1.log_trip_duration
X = mydf1.drop(['log_trip_duration'],axis=1)
Xtest = testdf1.drop('id',axis=1)
#%%
X = X.drop(['pickup_loc','dropoff_loc'],axis=1)
#%%
Xtest=Xtest.drop(['pickup_loc','dropoff_loc'],axis=1) | [
"pandas.DataFrame",
"sklearn.cluster.MiniBatchKMeans",
"numpy.log",
"xgboost.train",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies",
"pickle.load",
"numpy.mean",
"numpy.exp",
"numpy.intersect1d",
"pandas.concat",
"xgboost.DMatrix",
"numpy.vstack"
] | [((273, 303), 'numpy.log', 'np.log', (['(data.trip_duration + 1)'], {}), '(data.trip_duration + 1)\n', (279, 303), True, 'import numpy as np\n'), ((342, 360), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xtest'], {}), '(Xtest)\n', (353, 360), True, 'import xgboost as xgb\n'), ((899, 952), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'z'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, z, test_size=0.2, random_state=1)\n', (915, 952), False, 'from sklearn.model_selection import train_test_split\n'), ((1043, 1076), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xtrain'], {'label': 'Ztrain'}), '(Xtrain, label=Ztrain)\n', (1054, 1076), True, 'import xgboost as xgb\n'), ((1090, 1119), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xval'], {'label': 'Zval'}), '(Xval, label=Zval)\n', (1101, 1119), True, 'import xgboost as xgb\n'), ((1188, 1314), 'xgboost.train', 'xgb.train', (['parms', 'data_tr'], {'num_boost_round': '(881)', 'evals': 'evallist', 'early_stopping_rounds': '(30)', 'maximize': '(False)', 'verbose_eval': '(100)'}), '(parms, data_tr, num_boost_round=881, evals=evallist,\n early_stopping_rounds=30, maximize=False, verbose_eval=100)\n', (1197, 1314), True, 'import xgboost as xgb\n'), ((1480, 1503), 'xgboost.DMatrix', 'xgb.DMatrix', (['X'], {'label': 'z'}), '(X, label=z)\n', (1491, 1503), True, 'import xgboost as xgb\n'), ((1551, 1655), 'xgboost.train', 'xgb.train', (['parms', 'data_train'], {'num_boost_round': '(880)', 'evals': 'evallist', 'maximize': '(False)', 'verbose_eval': '(100)'}), '(parms, data_train, num_boost_round=880, evals=evallist, maximize=\n False, verbose_eval=100)\n', (1560, 1655), True, 'import xgboost as xgb\n'), ((1784, 1837), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': test.id, 'trip_duration': ytest}"], {}), "({'id': test.id, 'trip_duration': ytest})\n", (1796, 1837), True, 'import pandas as pd\n'), ((2610, 2860), 'numpy.vstack', 'np.vstack', (["(mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[[\n 'dropoff_latitude', 'dropoff_longitude']].values, testdf[[\n 'pickup_latitude', 'pickup_longitude']].values, testdf[[\n 'dropoff_latitude', 'dropoff_longitude']].values)"], {}), "((mydf[['pickup_latitude', 'pickup_longitude']].values, mydf[[\n 'dropoff_latitude', 'dropoff_longitude']].values, testdf[[\n 'pickup_latitude', 'pickup_longitude']].values, testdf[[\n 'dropoff_latitude', 'dropoff_longitude']].values))\n", (2619, 2860), True, 'import numpy as np\n'), ((3526, 3554), 'pandas.concat', 'pd.concat', (['train_loc'], {'axis': '(1)'}), '(train_loc, axis=1)\n', (3535, 3554), True, 'import pandas as pd\n'), ((3567, 3594), 'pandas.concat', 'pd.concat', (['test_loc'], {'axis': '(1)'}), '(test_loc, axis=1)\n', (3576, 3594), True, 'import pandas as pd\n'), ((3610, 3646), 'pandas.concat', 'pd.concat', (['[mydf, train_loc]'], {'axis': '(1)'}), '([mydf, train_loc], axis=1)\n', (3619, 3646), True, 'import pandas as pd\n'), ((3658, 3695), 'pandas.concat', 'pd.concat', (['[testdf, test_loc]'], {'axis': '(1)'}), '([testdf, test_loc], axis=1)\n', (3667, 3695), True, 'import pandas as pd\n'), ((1754, 1767), 'numpy.exp', 'np.exp', (['ztest'], {}), '(ztest)\n', (1760, 1767), True, 'import numpy as np\n'), ((1951, 1970), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1962, 1970), False, 'import pickle\n'), ((3380, 3433), 'pandas.get_dummies', 'pd.get_dummies', (['mydf[loc]'], {'prefix': 'loc', 'prefix_sep': '"""_"""'}), "(mydf[loc], prefix=loc, prefix_sep='_')\n", (3394, 3433), True, 'import pandas as pd\n'), ((3457, 3512), 'pandas.get_dummies', 'pd.get_dummies', (['testdf[loc]'], {'prefix': 'loc', 'prefix_sep': '"""_"""'}), "(testdf[loc], prefix=loc, prefix_sep='_')\n", (3471, 3512), True, 'import pandas as pd\n'), ((394, 416), 'numpy.mean', 'np.mean', (['((z - zp) ** 2)'], {}), '((z - zp) ** 2)\n', (401, 416), True, 'import numpy as np\n'), ((2977, 3025), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': '(20)', 'batch_size': '(10000)'}), '(n_clusters=20, batch_size=10000)\n', (2992, 3025), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2166, 2213), 'numpy.intersect1d', 'np.intersect1d', (['train.id.values', 'test.id.values'], {}), '(train.id.values, test.id.values)\n', (2180, 2213), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import cPickle
import ipdb
class Detector():
def __init__(self,weight_file_path,n_labels):
self.image_mean=[103.939,116.779,123.68]
self.n_labels=n_labels
with open(weight_file_path)as f:
self.pretrained_weights=cPickle.load(f)
def get_weight(self,layer_name):
layer=self.pretrained_weights[layer_name]
return layer[0]
def get_bias(self,layer_name):
layer=self.pretrained_weights[layer_name]
return layer[1]
def get_conv_weight(self,name):
f=self.get_weight(name)
return f.transpose((2,3,1,0))
def conv_layer(self,bottom,name):
with tf.variable_scope(name)as scope:
w=self.get_conv_weight(name)
b=self.get_bias(name)
conv_weights=tf.get_variable("W",shape=w.shape,initializer=tf.constant_initializer(w))
conv_biases=tf.get_variable("b",shape=b.shape,initializer=tf.constant_initializer(b))
conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME')
bias=tf.nn.bias_add(conv,conv_biases)
relu=tf.nn.relu(bias,name=name)
return relu
def new_conv_layer(self,bottom,filter_shape,name):
with tf.variable_scope(name)as scope:
w=tf.get_variable("W",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01))
b=tf.get_variable("b",shape=filter_shape[-1],initializer=tf.constant_initializer(0.))
conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME')
bias=tf.nn.bias_add(conv,b)
return bias
def fc_layer(self,bottom,name,create=False):
shape=bottom.get_shape().as_list()
dim=np.prod(shape[1:])
x=tf.reshape(bottom,[-1,dim])
cw=self.get_weight(name)
b=self.get_bias(name)
if name=="fc6":
cw=cw.reshape((4096,512,7,7))
cw=cw.transpose((2,3,1,0))
cw=cw.reshape((25088,4096))
else:
cw=cw.transpose((1,0))
with tf.variable_scope(name)as scope:
cw=tf.get_variable("W",shape=cw.shape,initializer=tf.constant_initializer(cw))
b=tf.get_variable("b",shape=b.shape,initializer=tf.constant_initializer(b))
fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope)
return fc
def new_fc_layer(self,bottom,input_size,output_size,name):
shape=bottom.get_shape().to_list()
dim=np.prod(shape[1:])
x=tf.reshape(bottom,[-1,dim])
with tf.variable_scope(name)as scope:
w=tf.get_variable("W",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01))
b=tf.get_variable("b",shape=[output_size],initializer=tf.constant_initializer(0.))
fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope)
return fc
def inference(self,rgb,train=False):
rgb*=255.
r,g,b=tf.split(rgb,3,3)
bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3)
relu1_1=self.conv_layer(bgr,"conv1_1")
relu1_2=self.conv_layer(relu1_1,"conv1_2")
pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1')
relu2_1=self.conv_layer(pool1,"conv2_1")
relu2_2=self.conv_layer(relu2_1,"conv2_2")
pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2')
relu3_1=self.conv_layer(pool2,"conv3_1")
relu3_2=self.conv_layer(relu3_1,"conv3_2")
relu3_3=self.conv_layer(relu3_2,"conv3_3")
pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3')
relu4_1=self.conv_layer(pool3,"conv4_1")
relu4_2=self.conv_layer(relu4_1,"conv4_2")
relu4_3=self.conv_layer(relu4_2,"conv4_3")
pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4')
relu5_1=self.conv_layer(pool4,"conv5_1")
relu5_2=self.conv_layer(relu5_1,"conv5_2")
relu5_3=self.conv_layer(relu5_2,"conv5_3")
conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],"conv6")
gap=tf.reduce_mean(conv6,[1,2])
with tf.variable_scope("GAP"):
gap_w=tf.get_variable("W",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01))
output=tf.matmul(gap,gap_w)
return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output
def get_classmap(self,label,conv6):
conv6_resized=tf.image.resize_bilinear(conv6,[224,224])
with tf.variable_scope("GAP",reuse=True):
label_w=tf.gather(tf.transpose(tf.get_variable("W")),label)
label_w=tf.reshape(label_w,[-1,1024,1])
conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024])
classmap=tf.matmul(conv6_resized,label_w)
classmap=tf.reshape(classmap,[-1,224,224])
return classmap
| [
"tensorflow.nn.relu",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.concat",
"cPickle.load",
"tensorflow.variable_scope",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool",
"tensorflow.matmul",
"tensorflow.get_variable",
"tensorflow.random_normal_initializer",
"tensorflow.nn.conv2d",
"tensorflow.split",
"numpy.prod",
"tensorflow.image.resize_bilinear"
] | [((1521, 1539), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (1528, 1539), True, 'import numpy as np\n'), ((1544, 1573), 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, dim]'], {}), '(bottom, [-1, dim])\n', (1554, 1573), True, 'import tensorflow as tf\n'), ((2136, 2154), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (2143, 2154), True, 'import numpy as np\n'), ((2159, 2188), 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, dim]'], {}), '(bottom, [-1, dim])\n', (2169, 2188), True, 'import tensorflow as tf\n'), ((2540, 2559), 'tensorflow.split', 'tf.split', (['rgb', '(3)', '(3)'], {}), '(rgb, 3, 3)\n', (2548, 2559), True, 'import tensorflow as tf\n'), ((2564, 2655), 'tensorflow.concat', 'tf.concat', (['[b - self.image_mean[0], g - self.image_mean[1], r - self.image_mean[2]]', '(3)'], {}), '([b - self.image_mean[0], g - self.image_mean[1], r - self.\n image_mean[2]], 3)\n', (2573, 2655), True, 'import tensorflow as tf\n'), ((2736, 2836), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu1_2'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool1"""'}), "(relu1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool1')\n", (2750, 2836), True, 'import tensorflow as tf\n'), ((2918, 3018), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu2_2'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool2"""'}), "(relu2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool2')\n", (2932, 3018), True, 'import tensorflow as tf\n'), ((3145, 3245), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu3_3'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool3"""'}), "(relu3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool3')\n", (3159, 3245), True, 'import tensorflow as tf\n'), ((3372, 3472), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['relu4_3'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool4"""'}), "(relu4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME', name='pool4')\n", (3386, 3472), True, 'import tensorflow as tf\n'), ((3657, 3686), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['conv6', '[1, 2]'], {}), '(conv6, [1, 2])\n', (3671, 3686), True, 'import tensorflow as tf\n'), ((3834, 3855), 'tensorflow.matmul', 'tf.matmul', (['gap', 'gap_w'], {}), '(gap, gap_w)\n', (3843, 3855), True, 'import tensorflow as tf\n'), ((3966, 4009), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['conv6', '[224, 224]'], {}), '(conv6, [224, 224])\n', (3990, 4009), True, 'import tensorflow as tf\n'), ((4174, 4222), 'tensorflow.reshape', 'tf.reshape', (['conv6_resized', '[-1, 224 * 224, 1024]'], {}), '(conv6_resized, [-1, 224 * 224, 1024])\n', (4184, 4222), True, 'import tensorflow as tf\n'), ((4229, 4262), 'tensorflow.matmul', 'tf.matmul', (['conv6_resized', 'label_w'], {}), '(conv6_resized, label_w)\n', (4238, 4262), True, 'import tensorflow as tf\n'), ((4273, 4309), 'tensorflow.reshape', 'tf.reshape', (['classmap', '[-1, 224, 224]'], {}), '(classmap, [-1, 224, 224])\n', (4283, 4309), True, 'import tensorflow as tf\n'), ((289, 304), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (301, 304), False, 'import cPickle\n'), ((628, 651), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (645, 651), True, 'import tensorflow as tf\n'), ((905, 969), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['bottom', 'conv_weights', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(bottom, conv_weights, [1, 1, 1, 1], padding='SAME')\n", (917, 969), True, 'import tensorflow as tf\n'), ((972, 1005), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'conv_biases'], {}), '(conv, conv_biases)\n', (986, 1005), True, 'import tensorflow as tf\n'), ((1013, 1040), 'tensorflow.nn.relu', 'tf.nn.relu', (['bias'], {'name': 'name'}), '(bias, name=name)\n', (1023, 1040), True, 'import tensorflow as tf\n'), ((1113, 1136), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1130, 1136), True, 'import tensorflow as tf\n'), ((1338, 1391), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['bottom', 'w', '[1, 1, 1, 1]'], {'padding': '"""SAME"""'}), "(bottom, w, [1, 1, 1, 1], padding='SAME')\n", (1350, 1391), True, 'import tensorflow as tf\n'), ((1394, 1417), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {}), '(conv, b)\n', (1408, 1417), True, 'import tensorflow as tf\n'), ((1776, 1799), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1793, 1799), True, 'import tensorflow as tf\n'), ((2194, 2217), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2211, 2217), True, 'import tensorflow as tf\n'), ((3692, 3716), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GAP"""'], {}), "('GAP')\n", (3709, 3716), True, 'import tensorflow as tf\n'), ((4015, 4051), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GAP"""'], {'reuse': '(True)'}), "('GAP', reuse=True)\n", (4032, 4051), True, 'import tensorflow as tf\n'), ((4126, 4160), 'tensorflow.reshape', 'tf.reshape', (['label_w', '[-1, 1024, 1]'], {}), '(label_w, [-1, 1024, 1])\n', (4136, 4160), True, 'import tensorflow as tf\n'), ((1991, 2007), 'tensorflow.matmul', 'tf.matmul', (['x', 'cw'], {}), '(x, cw)\n', (2000, 2007), True, 'import tensorflow as tf\n'), ((2441, 2456), 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), '(x, w)\n', (2450, 2456), True, 'import tensorflow as tf\n'), ((780, 806), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['w'], {}), '(w)\n', (803, 806), True, 'import tensorflow as tf\n'), ((869, 895), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['b'], {}), '(b)\n', (892, 895), True, 'import tensorflow as tf\n'), ((1202, 1241), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (1230, 1241), True, 'import tensorflow as tf\n'), ((1301, 1329), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1324, 1329), True, 'import tensorflow as tf\n'), ((1862, 1889), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['cw'], {}), '(cw)\n', (1885, 1889), True, 'import tensorflow as tf\n'), ((1942, 1968), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['b'], {}), '(b)\n', (1965, 1968), True, 'import tensorflow as tf\n'), ((2295, 2334), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (2323, 2334), True, 'import tensorflow as tf\n'), ((2391, 2419), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2414, 2419), True, 'import tensorflow as tf\n'), ((3786, 3825), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (3814, 3825), True, 'import tensorflow as tf\n'), ((4086, 4106), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {}), "('W')\n", (4101, 4106), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
import os
from unittest import TestCase
import tempfile
from server import db
def use_temp_db(filename):
def _use_temp_db(fn):
def wrapper(obj):
with tempfile.TemporaryDirectory() as dirname:
fn(obj, os.path.join(dirname, filename))
return wrapper
return _use_temp_db
class TestMatchResultDBController(TestCase):
def test_get_from_blank(self):
with tempfile.NamedTemporaryFile() as f:
logger = db.MatchResultDBController(f.name)
results = logger.get()
self.assertEqual(results, [])
@use_temp_db('test.db')
def test_add_one(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add(0, 1, 2, 0)
lg.add(1, 2, 3, 0)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
})
self.assertEqual(logger.current_id, 1)
@use_temp_db('test.db')
def test_add_list(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), (0, 0))
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
})
@use_temp_db('test.db')
def test_add_list2(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), 0)
results = logger.get(ordered=True)
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
})
@use_temp_db('test.db')
def test_delete(self, filename):
logger = db.MatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 3), (2, 4), 0)
lg.add((2, 3), (5, 7), (6, 8), 2)
with logger as lg:
deleted = lg.delete(0)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 3,
'winner': 7,
'loser': 8,
'trigger_id': 2,
})
self.assertEqual(len(deleted), 2)
self.assertEqual(deleted[1], {
'id': 1,
'winner': 3,
'loser': 4,
'trigger_id': 0,
})
class TestRatedMatchResultDBController(TestCase):
def test_get_from_blank(self):
with tempfile.NamedTemporaryFile() as f:
logger = db.RatedMatchResultDBController(f.name)
results = logger.get()
self.assertEqual(results, [])
@use_temp_db('test.db')
def test_add_one(self, filename):
logger = db.RatedMatchResultDBController(filename)
with logger as lg:
lg.add(0, 1, 2, 0, 1400.0, 1600.0)
lg.add(1, 2, 3, 0, 1550, 1450)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
'winner_rate': 1550.0,
'loser_rate': 1450.0,
})
@use_temp_db('test.db')
def test_add_list(self, filename):
logger = db.RatedMatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), 0, (1400, 1550), (1600, 1450))
results = logger.get(ordered=True)
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
'winner_rate': 1550.0,
'loser_rate': 1450.0,
})
@use_temp_db('test.db')
def test_add_delete(self, filename):
logger = db.RatedMatchResultDBController(filename)
with logger as lg:
lg.add((0, 1), (1, 2), (2, 3), 0, (1400, 1550), (1600, 1450))
lg.add((2, 3), (5, 6), (7, 8), 2, (1300, 1700), (1510, 1490))
with logger as lg:
deleted = lg.delete(0)
results = logger.get()
self.assertEqual(len(results), 2)
self.assertEqual(results[1], {
'id': 3,
'winner': 6,
'loser': 8,
'trigger_id': 2,
'winner_rate': 1700.0,
'loser_rate': 1490.0,
})
self.assertEqual(len(deleted), 2)
self.assertEqual(deleted[1], {
'id': 1,
'winner': 2,
'loser': 3,
'trigger_id': 0,
'winner_rate': 1550.0,
'loser_rate': 1450.0,
})
class TestItemLabelDBController(TestCase):
def test_get_from_blank(self):
with tempfile.NamedTemporaryFile() as f:
logger = db.ItemLabelDBController(f.name)
results = logger.get()
self.assertEqual(results, [])
@use_temp_db('test.db')
def test_add_one(self, filename):
logger = db.ItemLabelDBController(filename)
with logger as lg:
lg.add(0, 'foo')
lg.add(1, 'bar')
results = logger.get()
self.assertEqual(results, [
{'id': 0, 'label': 'foo'},
{'id': 1, 'label': 'bar'},
])
@use_temp_db('test.db')
def test_add_list(self, filename):
logger = db.ItemLabelDBController(filename)
with logger as lg:
lg.add((0, 1), ('foo', 'bar'))
results = logger.get(ordered=True)
self.assertEqual(results, [
{'id': 0, 'label': 'foo'},
{'id': 1, 'label': 'bar'},
])
@use_temp_db('test.db')
def test_delete(self, filename):
logger = db.ItemLabelDBController(filename)
with logger as lg:
lg.add((0, 1), ('foo', 'bar'))
with logger as lg:
deleted = lg.delete('foo')
results = logger.get()
self.assertEqual(results, [
{'id': 1, 'label': 'bar'},
])
self.assertEqual(deleted, [
{'id': 0, 'label': 'foo'}
])
| [
"tempfile.NamedTemporaryFile",
"tempfile.TemporaryDirectory",
"server.db.ItemLabelDBController",
"server.db.MatchResultDBController",
"os.path.join",
"server.db.RatedMatchResultDBController"
] | [((695, 731), 'server.db.MatchResultDBController', 'db.MatchResultDBController', (['filename'], {}), '(filename)\n', (721, 731), False, 'from server import db\n'), ((1176, 1212), 'server.db.MatchResultDBController', 'db.MatchResultDBController', (['filename'], {}), '(filename)\n', (1202, 1212), False, 'from server import db\n'), ((1600, 1636), 'server.db.MatchResultDBController', 'db.MatchResultDBController', (['filename'], {}), '(filename)\n', (1626, 1636), False, 'from server import db\n'), ((2028, 2064), 'server.db.MatchResultDBController', 'db.MatchResultDBController', (['filename'], {}), '(filename)\n', (2054, 2064), False, 'from server import db\n'), ((3019, 3060), 'server.db.RatedMatchResultDBController', 'db.RatedMatchResultDBController', (['filename'], {}), '(filename)\n', (3050, 3060), False, 'from server import db\n'), ((3555, 3596), 'server.db.RatedMatchResultDBController', 'db.RatedMatchResultDBController', (['filename'], {}), '(filename)\n', (3586, 3596), False, 'from server import db\n'), ((4089, 4130), 'server.db.RatedMatchResultDBController', 'db.RatedMatchResultDBController', (['filename'], {}), '(filename)\n', (4120, 4130), False, 'from server import db\n'), ((5265, 5299), 'server.db.ItemLabelDBController', 'db.ItemLabelDBController', (['filename'], {}), '(filename)\n', (5289, 5299), False, 'from server import db\n'), ((5627, 5661), 'server.db.ItemLabelDBController', 'db.ItemLabelDBController', (['filename'], {}), '(filename)\n', (5651, 5661), False, 'from server import db\n'), ((5984, 6018), 'server.db.ItemLabelDBController', 'db.ItemLabelDBController', (['filename'], {}), '(filename)\n', (6008, 6018), False, 'from server import db\n'), ((442, 471), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (469, 471), False, 'import tempfile\n'), ((499, 533), 'server.db.MatchResultDBController', 'db.MatchResultDBController', (['f.name'], {}), '(f.name)\n', (525, 533), False, 'from server import db\n'), ((2761, 2790), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2788, 2790), False, 'import tempfile\n'), ((2818, 2857), 'server.db.RatedMatchResultDBController', 'db.RatedMatchResultDBController', (['f.name'], {}), '(f.name)\n', (2849, 2857), False, 'from server import db\n'), ((5014, 5043), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5041, 5043), False, 'import tempfile\n'), ((5071, 5103), 'server.db.ItemLabelDBController', 'db.ItemLabelDBController', (['f.name'], {}), '(f.name)\n', (5095, 5103), False, 'from server import db\n'), ((201, 230), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (228, 230), False, 'import tempfile\n'), ((267, 298), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (279, 298), False, 'import os\n')] |
"""Quantitative Proteomic Service
Usage:
pyqp api
pyqp cli <proteomicTSV> <proteomeXML> [--field=<quantity_column>] [--adress=<apiAdress>] [--port=<apiPort>] [--verbose] [--topScore=<pathway_number>]
Options:
-h --help Show this screen.
--field=<quantity column> csv column header featuring signal
--purb=purb aa
--intg=intg bbb
--alpha=alpha ccc
--ncore=ncore ddd
--sizelim=sizelim eee
--prot=<proteomeXML> ggg
--adress=<apiAdress> aaa
--port=<apiPort> aaa
--verbose iiii
--topScore=<pathway_number> aaaa
"""
# TEST W/ mycoplasma proteome
# The test this
#python -m pyqp cli previous/wt2_subset.tsv unigo/src/unigo/data/uniprot-proteome_UP000000625.xml.gz
from docopt import docopt
#from pyT2GA import analysis
from unigo import Unigo as createUniGOTree
from unigo import uloads as createGOTreeFromAPI
from .utils import proteomicWrapper
from pyproteinsExt.uniprot import EntrySet as createUniprotCollection
from requests import get
from .api import app
import time
arguments = docopt(__doc__)
#print(arguments)
abnd_field = arguments['--field'] if arguments['--field'] else "Corrected Abundance ratio (1,526968203)"
nTop = int(arguments['--topScore']) if arguments['--topScore'] else 20
if arguments['cli']:
quantProteomic = proteomicWrapper( csv_file = arguments['<proteomicTSV>'], abnd_label = abnd_field)
uColl = createUniprotCollection(collectionXML = arguments['<proteomeXML>'] )
missingProt = []
for x in quantProteomic.uniprot:
if not uColl.has(x):
print(f"{x} not found in proteome")
missingProt.append(x)
for x in missingProt:
quantProteomic.remove(x)
taxid = uColl.taxids[0]
apiAdress = arguments['--adress'] if arguments['--adress'] else "127.0.0.1"
apiPort = arguments['--port'] if arguments['--port'] else "5000"
url = f"http://{apiAdress}:{apiPort}/unigo/{taxid}"
print(f"Fetching universal annotation tree from {url}")
expUniprotID = [ _ for _ in quantProteomic.uniprot ]
resp = get(url)
if resp.status_code == 404:
print(f"{url} returned 404, provided proteome XML {taxid} may not be registred")
else:
unigoTree = createGOTreeFromAPI(resp.text, expUniprotID)
x,y = unigoTree.dimensions
print("Unigo Object successfully buildt w/ following dimensions:")
print(f"\txpTree => nodes:{x[0]} children_links:{x[1]}, total_protein_occurences:{x[2]}, protein_set:{x[3]}")
print(f"\t universeTree => nodes:{y[0]} children_links:{y[1]}, total_protein_occurences:{y[2]}, protein_set:{y[3]}")
nDelta=int(0.1 * len(quantProteomic))
print(f"{len(quantProteomic)} proteins available in quantitative records, taking first {nDelta} as of quantity modified")
print("Computing ORA")
deltaUniprotID = expUniprotID[:nDelta]
rankingsORA = unigoTree.computeORA(deltaUniprotID, verbose = arguments['--verbose'])
print(f"Test Top - {nTop}\n{rankingsORA[:nTop]}")
if arguments['api']:
app.run(port=1234)
"""
unigoTree = createUniGOTree( backgroundUniColl = uColl,
proteinList = [ x for x in quantProteomic.uniprot ],
fetchLatest = False)
start = time.perf_counter()
# Taking 10% w/ highest qtty value
rankingsORA = unigoTree.computeORA(
[ _ for _ in quantProteomic[nTop].uniprot ]
, verbose = False)
stop = time.perf_counter()
print(f"Test Top - {5}\n{rankingsORA[5]}")
print(f"Execution time is {stop-start} sc")
"""
# Unnecssary
def typeGuardTaxID(proteomicData, uColl):
taxids = {}
for uID in proteomicData.uniprot:
uObj = uColl.get(uID)
if not uObj.taxid in taxids:
taxids[uObj.taxid] = 0
taxids[uObj.taxid] += 1
return sorted( [ (k,v) for k,v in taxids.items() ], key=lambda x:x[1] )
#r = pyt2ga.analysis(proteoRes, GOpwRes, STRINGRes, mapperRes, intg=False,
# abnd_label = "Corrected Abundance ratio (1,526968203)", ncore=3) | [
"unigo.uloads",
"pyproteinsExt.uniprot.EntrySet",
"requests.get",
"docopt.docopt"
] | [((1037, 1052), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1043, 1052), False, 'from docopt import docopt\n'), ((1406, 1471), 'pyproteinsExt.uniprot.EntrySet', 'createUniprotCollection', ([], {'collectionXML': "arguments['<proteomeXML>']"}), "(collectionXML=arguments['<proteomeXML>'])\n", (1429, 1471), True, 'from pyproteinsExt.uniprot import EntrySet as createUniprotCollection\n'), ((2091, 2099), 'requests.get', 'get', (['url'], {}), '(url)\n', (2094, 2099), False, 'from requests import get\n'), ((2251, 2295), 'unigo.uloads', 'createGOTreeFromAPI', (['resp.text', 'expUniprotID'], {}), '(resp.text, expUniprotID)\n', (2270, 2295), True, 'from unigo import uloads as createGOTreeFromAPI\n')] |
from kafka import KafkaConsumer, TopicPartition
from json import loads
from sqlalchemy import create_engine, Table, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
import os
user = os.getenv('MYSQL_user')
pw = os.getenv('MYSQL')
str_sql = 'mysql+mysqlconnector://' + user + ':' + pw + '@localhost/ZipBank'
engine = create_engine(str_sql)
Base = declarative_base(bind=engine)
class XactionConsumer:
def __init__(self):
self.consumer = KafkaConsumer('bank-customer-new',
bootstrap_servers=['localhost:9092'],
# auto_offset_reset='earliest',
value_deserializer=lambda m: loads(m.decode('ascii')))
## These are two python dictionaries
# Ledger is the one where all the transaction get posted
self.customer = {}
self.customer_list = []
#Go back to the readme.
def handleMessages(self):
self.CustDb()
for message in self.consumer:
message = message.value
print('{} received'.format(message))
self.customer[message['custid']] = message
# add message to the transaction table in your SQL usinf SQLalchemy
if message['custid'] in self.customer_list:
print("Already a customer")
else:
with engine.connect() as connection:
connection.execute("insert into person (custid, createdate, fname, lname) values(%s, %s, %s, %s)", (message['custid'], message['createdate'], message['fname'], message['lname']))
print(self.customer)
def CustDb(self):
with engine.connect() as connection:
cust = connection.execute("select custid from person")
cust_list = cust.fetchall()
for row in range(len(cust_list)):
self.customer_list.append(row)
class Transaction(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
custid = Column(Integer, primary_key=True)
createdate = Column(Integer)
fname = Column(String(50))
lname = Column(String(50))
if __name__ == "__main__":
Base.metadata.create_all(engine)
c = XactionConsumer()
c.handleMessages() | [
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column",
"sqlalchemy.create_engine",
"os.getenv"
] | [((214, 237), 'os.getenv', 'os.getenv', (['"""MYSQL_user"""'], {}), "('MYSQL_user')\n", (223, 237), False, 'import os\n'), ((243, 261), 'os.getenv', 'os.getenv', (['"""MYSQL"""'], {}), "('MYSQL')\n", (252, 261), False, 'import os\n'), ((348, 370), 'sqlalchemy.create_engine', 'create_engine', (['str_sql'], {}), '(str_sql)\n', (361, 370), False, 'from sqlalchemy import create_engine, Table, Column, Integer, String\n'), ((378, 407), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {'bind': 'engine'}), '(bind=engine)\n', (394, 407), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2053, 2086), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2059, 2086), False, 'from sqlalchemy import create_engine, Table, Column, Integer, String\n'), ((2104, 2119), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (2110, 2119), False, 'from sqlalchemy import create_engine, Table, Column, Integer, String\n'), ((2139, 2149), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (2145, 2149), False, 'from sqlalchemy import create_engine, Table, Column, Integer, String\n'), ((2170, 2180), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (2176, 2180), False, 'from sqlalchemy import create_engine, Table, Column, Integer, String\n')] |
import os
from google.cloud import speech_v1p1beta1 as speech
import io
#Set env variable, because it resets every shell session
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/robin_jf_andersson/mbox_speaker_diarization/mbox1-28508a73fde1.json"
def speaker_diarization(audio_file, channels, sample_rate, nbr_of_persons):
client = speech.SpeechClient()
speech_file = audio_file
with open(speech_file, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=sample_rate,
language_code="en-US",
enable_speaker_diarization=True,
diarization_speaker_count=nbr_of_persons,
audio_channel_count=channels,
enable_separate_recognition_per_channel=True, #change this if respeaker is configured correctly
model="video",
)
print("Waiting for operation to complete...")
response = client.recognize(config=config, audio=audio)
# The transcript within each result is separate and sequential per result.
# However, the words list within an alternative includes all the words
# from all the results thus far. Thus, to get all the words with speaker
# tags, you only have to take the words list from the last result:
result = response.results[-1]
words_info = result.alternatives[0].words
output_result = {}
#saving each word with corresponding speaker tag into a dictionary of word lists
for i in range(nbr_of_persons):
word_counter = 0
speaker_data = {}
words = []
for word_info in words_info:
if(word_info.speaker_tag == (i+1)):
words.append(word_info.word)
word_counter += 1
speaker_data["number_of_words"] = word_counter
speaker_data["words"] = words
output_result[(i+1)] = speaker_data
#print(output_result)
return output_result
#test
#diarization_service("audiofiles/Test7.wav")
| [
"google.cloud.speech_v1p1beta1.RecognitionConfig",
"google.cloud.speech_v1p1beta1.RecognitionAudio",
"google.cloud.speech_v1p1beta1.SpeechClient"
] | [((343, 364), 'google.cloud.speech_v1p1beta1.SpeechClient', 'speech.SpeechClient', ([], {}), '()\n', (362, 364), True, 'from google.cloud import speech_v1p1beta1 as speech\n'), ((492, 532), 'google.cloud.speech_v1p1beta1.RecognitionAudio', 'speech.RecognitionAudio', ([], {'content': 'content'}), '(content=content)\n', (515, 532), True, 'from google.cloud import speech_v1p1beta1 as speech\n'), ((547, 867), 'google.cloud.speech_v1p1beta1.RecognitionConfig', 'speech.RecognitionConfig', ([], {'encoding': 'speech.RecognitionConfig.AudioEncoding.LINEAR16', 'sample_rate_hertz': 'sample_rate', 'language_code': '"""en-US"""', 'enable_speaker_diarization': '(True)', 'diarization_speaker_count': 'nbr_of_persons', 'audio_channel_count': 'channels', 'enable_separate_recognition_per_channel': '(True)', 'model': '"""video"""'}), "(encoding=speech.RecognitionConfig.AudioEncoding.\n LINEAR16, sample_rate_hertz=sample_rate, language_code='en-US',\n enable_speaker_diarization=True, diarization_speaker_count=\n nbr_of_persons, audio_channel_count=channels,\n enable_separate_recognition_per_channel=True, model='video')\n", (571, 867), True, 'from google.cloud import speech_v1p1beta1 as speech\n')] |
from manimlib.imports import *
from ManimProjects.utils.Parabola import Parabola
from ManimProjects.utils.geometry import CText
class Prob1(Parabola):
CONFIG = {
'x_min' : -5
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.7)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
y_val = ValueTracker(8)
p1 = Dot()
p1.set_color(DARK_BLUE)
p1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()),
y_val.get_value()
)))
p1.plot_depth = 1
p1Label = TexMobject('P_1').scale(0.7)
p1Label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
p2 = Dot()
p2.set_color(DARK_BLUE)
p2.add_updater(lambda m:\
m.move_to(self.get_opposite(p1)))
p2.plot_depth = 1
p2Label = TexMobject('P_2').scale(0.7)
p2Label.add_updater(lambda m:\
m.next_to(p2, RIGHT, buff=SMALL_BUFF))
focus_chord = Line()
focus_chord.add_updater(lambda m:\
m.put_start_and_end_on(
p1.get_center(),
self.get_opposite(p1)
))
self.play(ShowCreation(p1), ShowCreation(p1Label))
self.play(ShowCreation(focus_chord))
self.play(ShowCreation(p2), ShowCreation(p2Label))
fc_def = CText('焦点弦')
fc_def.move_to(focus_chord.get_center())
fc_def.shift(0.2 * RIGHT + 0.1 * DOWN)
self.play(Write(fc_def))
self.wait(2)
self.play(FadeOut(fc_def))
q_y = ValueTracker(2)
q = Dot()
q.set_fill(DARK_BLUE)
q.plot_depth = 1
q.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(q_y.get_value()),
q_y.get_value()
)))
qLabel = TexMobject('Q').scale(0.7)
qLabel.add_updater(lambda m:\
m.next_to(q, LEFT, buff=SMALL_BUFF))
k1 = Dot()
k1.set_fill(BLUE_E)
k1.plot_depth = 1
k1.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p1, q)))
k1Label = TexMobject('K_1').scale(0.7)
k1Label.add_updater(lambda m:\
m.next_to(k1, LEFT, buff=SMALL_BUFF))
k2 = Dot()
k2.set_fill(BLUE_E)
k2.plot_depth = 1
k2.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p2, q)))
k2Label = TexMobject('K_2').scale(0.7)
k2Label.add_updater(lambda m:\
m.next_to(k2, LEFT, buff=SMALL_BUFF))
l1 = Line()
l1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q),
self.chord_to_directrix(p1, q)
))
l2 = Line()
l2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q),
self.chord_to_directrix(p2, q)
))
self.play(ShowCreation(q), ShowCreation(qLabel))
self.play(ShowCreation(l1), ShowCreation(l2))
self.play(*[ShowCreation(e) for e in [k1, k2, k1Label, k2Label]])
k1f = Line()
k1f.add_updater(lambda m:\
m.put_start_and_end_on(
k1.get_center(), focus.get_center()
))
k2f = Line()
k2f.add_updater(lambda m:\
m.put_start_and_end_on(
k2.get_center(), focus.get_center()
))
self.play(ShowCreation(k1f), ShowCreation(k2f))
self.wait(1)
self.play(ApplyMethod(y_val.set_value,
5))
summary = TexMobject('K_1F \\perp K_2F').scale(2)
summary.to_edge(RIGHT)
self.wait(1)
self.play(Write(summary))
self.wait(5)
qf = Line()
qf.add_updater(lambda m:\
m.put_start_and_end_on(q.get_center(),
focus.get_center()))
self.play(ShowCreation(qf))
self.wait(1)
self.play(ApplyMethod(q_y.set_value,
-1))
self.wait(1)
self.play(ApplyMethod(y_val.set_value,
0.5))
self.wait(1)
self.play(ApplyMethod(y_val.set_value,
3),
ApplyMethod(q_y.set_value, 0.5))
self.wait(10)
class Prob2(Parabola):
CONFIG = {
'focus': 2,
'x_min': -4
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.7)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
q1_y = ValueTracker(9)
q1 = Dot()
q1.set_fill(DARK_BLUE)
q1.plot_depth = 1
q1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(q1_y.get_value()),
q1_y.get_value()
)))
q1_label = TexMobject('Q_1').scale(0.5)
q1_label.add_updater(lambda m:\
m.next_to(q1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(q1), ShowCreation(q1_label))
q2 = Dot()
q2.set_fill(DARK_BLUE)
q2.plot_depth = 1
q2.add_updater(lambda m:\
m.move_to(self.get_opposite(q1)))
q2_label = TexMobject('Q_2').scale(0.5)
q2_label.add_updater(lambda m:\
m.next_to(q2, RIGHT, buff=SMALL_BUFF))
q1q2 = Line()
q1q2.add_updater(lambda m:\
m.put_start_and_end_on(
q1.get_center(),
self.get_opposite(q1)
))
self.play(*[ShowCreation(e) for e in\
[q2, q2_label, q1q2]])
p1_y = ValueTracker(2)
p1 = Dot()
p1.set_fill(DARK_BLUE)
p1.plot_depth = 1
p1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(p1_y.get_value()),
p1_y.get_value()
)))
p1_label = TexMobject('P_1').scale(0.5)
p1_label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(p1), ShowCreation(p1_label))
p2 = Dot()
p2.set_fill(DARK_BLUE)
p2.plot_depth = 1
p2.add_updater(lambda m:\
m.move_to(self.get_opposite(p1)))
p2_label = TexMobject('P_2').scale(0.5)
p2_label.add_updater(lambda m:\
m.next_to(p2, RIGHT, buff=SMALL_BUFF))
p1p2 = Line()
p1p2.add_updater(lambda m:\
m.put_start_and_end_on(
p1.get_center(),
self.get_opposite(p1)
))
self.play(*[ShowCreation(e) for e in\
[p2, p2_label, p1p2]])
k1 = Dot()
k1.set_fill(DARK_BROWN)
k1.plot_depth = 1
k1.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p1, q1)))
k1_label = TexMobject('K_1').scale(0.5)
k1_label.add_updater(lambda m:\
m.next_to(k1, LEFT, buff=SMALL_BUFF))
p1q1 = Line()
p1q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q1),
self.chord_to_directrix(p1, q1)
))
p2q2 = Line()
p2q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q2),
self.chord_to_directrix(p2, q2)
))
self.play(*[ShowCreation(e) for e in \
[k1, k1_label, p1q1, p2q2]])
k2 = Dot()
k2.set_fill(DARK_BROWN)
k2.plot_depth = 1
k2.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p2, q1)))
k2_label = TexMobject('K_2').scale(0.5)
k2_label.add_updater(lambda m:\
m.next_to(k2, LEFT, buff=SMALL_BUFF))
p2q1 = Line()
p2q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q1),
self.chord_to_directrix(p2, q1)
))
p1q2 = Line()
p1q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q2),
self.chord_to_directrix(p1, q2)
))
self.play(*[ShowCreation(e) for e in \
[k2, k2_label, p2q1, p1q2]])
explain = CText('这些交点在准线上').scale(0.3)
explain.to_edge(RIGHT)
self.wait(2)
self.play(Write(explain))
self.wait(5)
self.play(ApplyMethod(q1_y.set_value, 0.5),
ApplyMethod(p1_y.set_value, -3))
self.wait(3)
self.play(ApplyMethod(q1_y.set_value, 3),
ApplyMethod(p1_y.set_value, -9))
self.wait(10)
class Prob3(Parabola):
CONFIG = {
'focus': 2,
'x_min': -4
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.7)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
q1_y = ValueTracker(9)
q1 = Dot()
q1.set_fill(DARK_BLUE)
q1.plot_depth = 1
q1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(q1_y.get_value()),
q1_y.get_value()
)))
q1_label = TexMobject('Q_1').scale(0.5)
q1_label.add_updater(lambda m:\
m.next_to(q1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(q1), ShowCreation(q1_label))
q2 = Dot()
q2.set_fill(DARK_BLUE)
q2.plot_depth = 1
q2.add_updater(lambda m:\
m.move_to(self.get_opposite(q1)))
q2_label = TexMobject('Q_2').scale(0.5)
q2_label.add_updater(lambda m:\
m.next_to(q2, RIGHT, buff=SMALL_BUFF))
q1q2 = Line()
q1q2.add_updater(lambda m:\
m.put_start_and_end_on(
q1.get_center(),
self.get_opposite(q1)
))
self.play(*[ShowCreation(e) for e in\
[q2, q2_label, q1q2]])
p1_y = ValueTracker(2)
p1 = Dot()
p1.set_fill(DARK_BLUE)
p1.plot_depth = 1
p1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(p1_y.get_value()),
p1_y.get_value()
)))
p1_label = TexMobject('P_1').scale(0.5)
p1_label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
self.play(ShowCreation(p1), ShowCreation(p1_label))
p2 = Dot()
p2.set_fill(DARK_BLUE)
p2.plot_depth = 1
p2.add_updater(lambda m:\
m.move_to(self.get_opposite(p1)))
p2_label = TexMobject('P_2').scale(0.5)
p2_label.add_updater(lambda m:\
m.next_to(p2, RIGHT, buff=SMALL_BUFF))
p1p2 = Line()
p1p2.add_updater(lambda m:\
m.put_start_and_end_on(
p1.get_center(),
self.get_opposite(p1)
))
self.play(*[ShowCreation(e) for e in\
[p2, p2_label, p1p2]])
k1 = Dot()
k1.set_fill(DARK_BROWN)
k1.plot_depth = 1
k1.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p1, q1)))
k1_label = TexMobject('K_1').scale(0.5)
k1_label.add_updater(lambda m:\
m.next_to(k1, LEFT, buff=SMALL_BUFF))
p1q1 = Line()
p1q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q1),
self.chord_to_directrix(p1, q1)
))
p2q2 = Line()
p2q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q2),
self.chord_to_directrix(p2, q2)
))
self.play(*[ShowCreation(e) for e in \
[k1, k1_label, p1q1, p2q2]])
k2 = Dot()
k2.set_fill(DARK_BROWN)
k2.plot_depth = 1
k2.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(p2, q1)))
k2_label = TexMobject('K_2').scale(0.5)
k2_label.add_updater(lambda m:\
m.next_to(k2, LEFT, buff=SMALL_BUFF))
p2q1 = Line()
p2q1.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p2, q1),
self.chord_to_directrix(p2, q1)
))
p1q2 = Line()
p1q2.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p1, q2),
self.chord_to_directrix(p1, q2)
))
self.play(*[ShowCreation(e) for e in \
[k2, k2_label, p2q1, p1q2]])
k1f = Line()
k1f.add_updater(lambda m:\
m.put_start_and_end_on(
k1.get_center(),
focus.get_center()
))
k2f = Line()
k2f.add_updater(lambda m:\
m.put_start_and_end_on(
k2.get_center(),
focus.get_center()
))
explain = TexMobject('K_1F \\perp K_2F')
explain.to_edge(RIGHT)
self.wait(2)
self.play(ShowCreation(k1f), ShowCreation(k2f))
self.wait(3)
self.play(Write(explain))
self.wait(5)
self.play(ApplyMethod(q1_y.set_value, 0.5),
ApplyMethod(p1_y.set_value, -3))
self.wait(3)
self.play(ApplyMethod(q1_y.set_value, 3),
ApplyMethod(p1_y.set_value, -9))
self.wait(10)
class Prob4(Parabola):
CONFIG = {
'focus': 3,
'x_min': -10
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.5)
focusLabel.next_to(focus, RIGHT)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
a = Dot()
a.set_fill(DARK_BROWN)
a.move_to(self.coords_to_point(0, 0))
a.plot_depth = 1
a_label = TexMobject('A').scale(0.5)
a_label.next_to(a, RIGHT)
self.play(*[ShowCreation(e) for e in [a, a_label]])
y_val = ValueTracker(8)
m = Dot()
m.set_fill(DARK_BLUE)
m.plot_depth = 1
m.add_updater(lambda m:\
m.move_to(self.coords_to_point(
-self.focus, y_val.get_value()
)))
m_label = TexMobject('M').scale(0.5)
m_label.add_updater(lambda l:\
l.next_to(m, LEFT))
p = Dot()
p.set_fill(DARK_BLUE)
p.plot_depth = 1
p.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()),
y_val.get_value()
)))
p_label = TexMobject('P').scale(0.5)
p_label.add_updater(lambda m:\
m.next_to(p, RIGHT))
self.play(*[ShowCreation(e) for e in\
[m, m_label, p, p_label]])
k = Dot()
k.set_fill(DARK_BLUE)
k.plot_depth = 1
k.add_updater(lambda m:\
m.move_to(self.chord_to_directrix(
p, a
)))
k_label = TexMobject('K').scale(0.5)
k_label.add_updater(lambda m:\
m.next_to(k, LEFT))
pk = Line()
pk.add_updater(lambda l:\
l.put_start_and_end_on(
p.get_center(),
self.chord_to_directrix(p, a)
))
mp = Line()
mp.add_updater(lambda l:\
l.put_start_and_end_on(
m.get_center(),
p.get_center()
))
self.play(*[ShowCreation(e) for e in\
[k, k_label, pk, mp]])
kf = Line()
kf.add_updater(lambda l:\
l.put_start_and_end_on(
k.get_center(),
focus.get_center()
))
mf = Line()
mf.add_updater(lambda l:\
l.put_start_and_end_on(
m.get_center(),
focus.get_center()
))
self.play(ShowCreation(kf), ShowCreation(mf))
form = TexMobject('KF \\perp MF')
form.scale(0.7)
form.to_edge(RIGHT)
self.play(Write(form))
af = DashedLine(a.get_center(), focus.get_center())
pf = DashedLine()
def get_pf_extent():
vec = focus.get_center() - p.get_center()
vec = normalize(vec)
return focus.get_center() + 2 * vec
pf.add_updater(lambda m:\
m.put_start_and_end_on(
p.get_center(),
get_pf_extent()
))
self.play(ShowCreation(af), ShowCreation(pf))
self.wait(3)
self.play(ApplyMethod(y_val.set_value, 2))
self.wait(3)
self.play(ApplyMethod(y_val.set_value, -2))
self.wait(3)
self.play(ApplyMethod(y_val.set_value, -8))
self.wait(10)
class Prob5(Parabola):
CONFIG = {
'focus': 3,
'x_min': -10
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
directrix = self.get_directrix()
focus = Dot().move_to(self.get_focus())
focus.set_fill(DARK_BROWN)
focus.plot_depth = 1
focusLabel = TexMobject('F').scale(0.5)
focusLabel.next_to(focus, RIGHT + UP)
self.play(*[ShowCreation(e) for\
e in [graph, directrix, focus, focusLabel]])
h_line = self.get_horizontal()
x = Dot()
x.set_fill(DARK_BROWN)
x.plot_depth = 1
x.move_to(self.coords_to_point(-self.focus, 0))
x_label = TexMobject('X').scale(0.5)
x_label.next_to(x, LEFT + UP)
self.play(ShowCreation(h_line))
self.play(ShowCreation(x), ShowCreation(x_label))
y_val = ValueTracker(8)
p = Dot()
p.set_fill(DARK_BLUE)
p.plot_depth = 1
p.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()),
y_val.get_value()
)))
q = Dot()
q.set_fill(DARK_BLUE)
q.plot_depth = 1
q.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(-y_val.get_value()),
-y_val.get_value()
)))
t = Dot()
t.set_fill(DARK_BLUE)
t.plot_depth = 1
t.add_updater(lambda m:\
m.move_to(self.coords_to_point(
self.func(y_val.get_value()), 0
)))
p_label = TexMobject('P').scale(0.5)
p_label.add_updater(lambda m:\
m.next_to(p, RIGHT))
q_label = TexMobject('Q').scale(0.5)
q_label.add_updater(lambda m:\
m.next_to(q, RIGHT))
t_label = TexMobject('T').scale(0.5)
t_label.add_updater(lambda m:\
m.next_to(t, RIGHT + UP))
pq = Line()
pq.add_updater(lambda m:\
m.put_start_and_end_on(
p.get_center(),
self.coords_to_point(
self.func(-y_val.get_value()),
-y_val.get_value()
)))
pt = Line()
pt.add_updater(lambda m:\
m.put_start_and_end_on(
p.get_center(),
self.coords_to_point(
self.func(y_val.get_value()), 0
)))
self.play(ShowCreation(p), ShowCreation(p_label))
self.play(ShowCreation(pt))
self.play(ShowCreation(t), ShowCreation(t_label))
label1 = CText('纵标线').scale(0.3)\
.next_to(pt, RIGHT)
self.play(ShowCreation(label1))
self.wait()
self.play(FadeOut(label1))
self.play(ShowCreation(pq))
self.remove(pt)
self.play(ShowCreation(q), ShowCreation(q_label))
label2 = CText('双纵标线').scale(0.3)\
.next_to(t, RIGHT+DOWN)
self.play(ShowCreation(label2))
self.wait()
self.play(FadeOut(label2))
self.wait()
inter = Dot()
inter.set_fill(DARK_BLUE)
inter.plot_depth = 1
inter.add_updater(lambda m:\
m.move_to(
self.coords_to_point(
4*(self.focus**3)/(y_val.get_value()**2),
4*self.focus**2/y_val.get_value()
) if y_val.get_value() != 0 else
self.coords_to_point(0, 0)
))
inter_label = TexMobject("P'").scale(0.5)
inter_label.add_updater(lambda m:\
m.next_to(inter, LEFT + UP, buff=SMALL_BUFF))
px = Line()
px.add_updater(lambda m:\
m.put_start_and_end_on(
self.right(p, inter),
x.get_center()
))
self.play(ShowCreation(px))
self.play(ShowCreation(inter),
ShowCreation(inter_label))
self.wait()
form = CText("P'Q经过焦点").shift(UP)
form.scale(0.5)
form.to_edge(RIGHT)
self.play(Write(form))
interq = Line()
interq.add_updater(lambda m:\
m.put_start_and_end_on(
inter.get_center(),
q.get_center()
))
self.play(ShowCreation(interq))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, 4))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, -4))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, -9))
self.wait(2)
self.play(ApplyMethod(y_val.set_value, 9))
self.wait(10) | [
"ManimProjects.utils.geometry.CText"
] | [((1736, 1748), 'ManimProjects.utils.geometry.CText', 'CText', (['"""焦点弦"""'], {}), "('焦点弦')\n", (1741, 1748), False, 'from ManimProjects.utils.geometry import CText\n'), ((8883, 8900), 'ManimProjects.utils.geometry.CText', 'CText', (['"""这些交点在准线上"""'], {}), "('这些交点在准线上')\n", (8888, 8900), False, 'from ManimProjects.utils.geometry import CText\n'), ((21899, 21915), 'ManimProjects.utils.geometry.CText', 'CText', (['"""P\'Q经过焦点"""'], {}), '("P\'Q经过焦点")\n', (21904, 21915), False, 'from ManimProjects.utils.geometry import CText\n'), ((20522, 20534), 'ManimProjects.utils.geometry.CText', 'CText', (['"""纵标线"""'], {}), "('纵标线')\n", (20527, 20534), False, 'from ManimProjects.utils.geometry import CText\n'), ((20813, 20826), 'ManimProjects.utils.geometry.CText', 'CText', (['"""双纵标线"""'], {}), "('双纵标线')\n", (20818, 20826), False, 'from ManimProjects.utils.geometry import CText\n')] |
'''faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado
pelo usuário. O programa será interrompido quando o valor solicitado for negativo.'''
from time import sleep
n = 0
cont = 0
while n >= 0:
print('--' * 15)
print('\033[33mPara cancelar, digite um número negativo.\033[m')
n = int(input('Qual número deseja saber a tabuada ? '))
print('--' * 15)
if n < 0:
print('\033[31mFinalizando o programa...\033[m')
sleep(1)
break
else:
for c in range (0,11):
print(f'{n} x {c} = {n*c}')
| [
"time.sleep"
] | [((489, 497), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (494, 497), False, 'from time import sleep\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
from contextlib import contextmanager
import click
import crayons
def open_url(url):
click.echo("Opening {}.".format(crayons.white(url, bold=True)))
click.launch(url)
def get_config_file_path():
home = os.path.expanduser("~")
return os.path.realpath('{}/.commands.json'.format(home))
@contextmanager
def get_config_file(mode='r'):
""" Return the file storing the commands.
:param str mode: the mode the file with be opened with. Default: r
:return: the file object.
:rtype: file
"""
path = get_config_file_path()
if not os.path.exists(path):
generate_empty_config_file()
with open(path, mode) as datafile:
yield datafile
def generate_empty_config_file():
""" Reset the config file."""
with open(get_config_file_path(), 'w') as datafile:
json.dump({}, datafile)
def build_command(name, url):
""" Build a click command according the arguments.
:param str name: label that the user will use to trigger the command.
:param str url: the url that will be opened.
:rtype: click.Command
"""
return click.Command(
name,
callback=lambda: open_url(url),
help='Open {}'.format(url)
)
| [
"json.dump",
"crayons.white",
"os.path.exists",
"click.launch",
"os.path.expanduser"
] | [((228, 245), 'click.launch', 'click.launch', (['url'], {}), '(url)\n', (240, 245), False, 'import click\n'), ((287, 310), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (305, 310), False, 'import os\n'), ((640, 660), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (654, 660), False, 'import os\n'), ((896, 919), 'json.dump', 'json.dump', (['{}', 'datafile'], {}), '({}, datafile)\n', (905, 919), False, 'import json\n'), ((192, 221), 'crayons.white', 'crayons.white', (['url'], {'bold': '(True)'}), '(url, bold=True)\n', (205, 221), False, 'import crayons\n')] |
import json
import requests
import ccxt
import time
import os
import pandas as pd
from datetime import datetime, timedelta
import operator
import csv
import cfg
liquid = ccxt.liquid(cfg.liquid_misc_credential)
exchange = liquid
since = exchange.milliseconds() - 86400000 # -1 day from now
def save_and_get_str():
# SAVE
all_orders = []
since = exchange.milliseconds() - 86400000 * 5 # -1 day from now
while since < exchange.milliseconds():
symbol = 'ETH/JPY' # change for your symbol
limit = 100 # change for your limit
orders = exchange.fetch_my_trades(symbol, since, limit)
if len(orders) > 1:
since = orders[len(orders) - 1]['timestamp']
all_orders += orders
else:
break
df = pd.DataFrame(
columns=['utc', 'time', 'type', 'amount', 'price', 'fee', 'takerOrMaker'])
for element in all_orders:
trade = element['info']
trade_utc = datetime.utcfromtimestamp(
float(trade['created_at'])).strftime('%Y-%m-%d %H:%M:%S.%f')
trades_to_append = str(int(float(trade['created_at']) * 1000)) + ',' + str(trade_utc) + ',' + str(trade['my_side']) + ',' + str(abs(
float(trade['quantity']))) + ',' + str(float(trade['price'])) + ',' + str(element['fee']) + ',' + str(element['takerOrMaker'])
df.loc[len(df.index)] = trades_to_append.split(",")
# df.to_csv('transaction_liquid.csv')
if not os.path.isfile("transaction_liquid.csv"):
csv_content = df.to_csv(index=False)
else:
csv_content = df.to_csv(
index=False, header=None)
with open('transaction_liquid.csv', 'a') as csvfile:
csvfile.write(csv_content)
def sort_csv():
x = pd.read_csv("transaction_liquid.csv")
print(x.iloc[0])
x = x.drop_duplicates().sort_values('time', ascending=False)
x.to_csv('transaction_liquid.csv', index=False)
print('sorted')
while True:
save_and_get_str()
sort_csv()
time.sleep(23 * 60)
| [
"pandas.DataFrame",
"pandas.read_csv",
"time.sleep",
"ccxt.liquid",
"os.path.isfile"
] | [((172, 211), 'ccxt.liquid', 'ccxt.liquid', (['cfg.liquid_misc_credential'], {}), '(cfg.liquid_misc_credential)\n', (183, 211), False, 'import ccxt\n'), ((784, 875), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['utc', 'time', 'type', 'amount', 'price', 'fee', 'takerOrMaker']"}), "(columns=['utc', 'time', 'type', 'amount', 'price', 'fee',\n 'takerOrMaker'])\n", (796, 875), True, 'import pandas as pd\n'), ((1745, 1782), 'pandas.read_csv', 'pd.read_csv', (['"""transaction_liquid.csv"""'], {}), "('transaction_liquid.csv')\n", (1756, 1782), True, 'import pandas as pd\n'), ((1998, 2017), 'time.sleep', 'time.sleep', (['(23 * 60)'], {}), '(23 * 60)\n', (2008, 2017), False, 'import time\n'), ((1458, 1498), 'os.path.isfile', 'os.path.isfile', (['"""transaction_liquid.csv"""'], {}), "('transaction_liquid.csv')\n", (1472, 1498), False, 'import os\n')] |
import re
from datetime import datetime
from datetime import timedelta
import dateutil.parser
import pytz
import tzlocal
# datetime objct for beginning of epoch
T_EPOCH = datetime(1970, 1, 1, tzinfo=pytz.utc)
DEFAULT = object() # singleton, for args with default values
class DateTimeError(Exception):
""" custom exception """
class DateTime(object):
single_delta = r'(?:\s*([+-]\d+(?:\.\d*)?)(?:\s*([shMdw])?)\s*)'
single_delta = r'(?:\s*([+-]\d+(?:\.\d*)?)\s*([shMdw]?)\s*)'
# attempt to handle comma separated list of deltas
# multi_delta = r'^%s(?:,%s)*$' % (single_delta, single_delta)
delta_rex = re.compile('^' + single_delta + '$')
delta_units = {
's': (0, 1),
'M': (0, 60),
'h': (0, 3600),
'd': (1, 0),
'w': (7, 0),
'': (1, 0), # default unit = days
}
@classmethod
def strptimedelta(cls, deltastr, info=None, raise_on_error=True):
""" parses a date time string and returns a datetime timedelta object
Supported Formats:
'+-<num><unit>'
where unit =
s for seconds
h for hours
M for minutes
d for days
w for weeks
default = days
"""
# not implemented so far
# and rounding (use by strptime) =
# d for days
# default no rounding
# """
# TODO: think about using dateutil.parser.relativedelta
rslt = datetime.now(pytz.utc)
fields = (val.strip() for val in deltastr.split(','))
delta_rex = cls.delta_rex
for field in fields:
match = delta_rex.match(field)
if not match:
raise DateTimeError("can't parse %r as delta" % field)
value, unit = match.groups()
value = float(value)
days, seconds = cls.delta_units[unit]
rslt += timedelta(days * value, seconds * value)
return rslt
@classmethod
def strptime(cls, datestr=None, fmt=None, tzinfo=DEFAULT):
""" parses a date time string and returns a date time object
Supported Formats:
- formats as supported by dateutil.parser
- None, '', 0, '0' and 'now' -> datetime.now()
- if fmt is passed same as datetime.strptime
:param datestr: date string to be passed
:param fmt: if passedm then use datetime's normal strptime
BUT add a time zone info
:param tzinfo: if no tz info is specified in the string, then
this param decides which time zone shall be used.
DEFAULT: use local time zone
None: return naive time zone object
other: use other time zone
"""
# NOT IMPLEMENTED SO FAR
# - delta format with +-num units[rounding],
# where unit =
# s for seconds
# M for minutes
# h for hours
# d for days
# w for weeks
# and rounding =
# d for days
# default no rounding
tzinfo = tzinfo if tzinfo is not DEFAULT else tzlocal.get_localzone()
if fmt:
rslt = datetime.strptime(datestr, fmt)
else:
if isinstance(datestr, (int, float)):
datestr = str(datestr)
datestr = datestr.strip() if datestr else datestr
if datestr in (None, '', '0', 'now'):
return datetime.now(tzinfo)
if datestr[:1] in "+-" or ',' in datestr:
return cls.strptimedelta(datestr, tzinfo)
rslt = dateutil.parser.parse(datestr)
if rslt.tzinfo is None and tzinfo:
rslt = tzinfo.localize(rslt)
return rslt
@classmethod
def parse_range(cls, rangestr=None, default_from='-1d', default_to='now'):
""" parses a time range string
a time range string is a comma separated list of a start time
and a end time
"""
if rangestr is None:
from_str = default_from
to_str = default_to
else:
from_str, to_str = [v.strip() for v in rangestr.split(',', 1)]
from_str = from_str if from_str else default_from
to_str = to_str if to_str else default_to
t_from = cls.strptime(from_str)
t_to = cls.strptime(to_str)
return t_from, t_to
class Time(DateTime):
@classmethod
def strptime(cls, datestr):
pass
class Date(DateTime):
@classmethod
def strptime(cls, datestr):
pass
def fname_to_time(fname, use_ctime=False, use_mtime=False, tz=None):
""" extracts date time from an fname
examples of supported formats:
"fnameYYYYMMDD" just a date
"fnameYYYY-MM-DD" date with separators
"fnameYYYYMMDD_HHmmss" date and time
"fnameYYYYMMDD-HHmmss" date and time
"fnameYYYYMMDD-HH-mm-ss" date and time
"fnameYYYYMMDD-ssssssssss" date and time(in seconds since epoche)
:param fname: file name to parse
:param use_ctime: if file name contains no string use file's ctime
:param use_mtime: if file name contains no string use file's mtime
"""
def to_timestamp(t):
""" convert a datetime object to seconds since epoch """
return (t - T_EPOCH).total_seconds()
| [
"tzlocal.get_localzone",
"datetime.datetime",
"datetime.datetime.strptime",
"datetime.timedelta",
"datetime.datetime.now",
"re.compile"
] | [((173, 210), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(1970, 1, 1, tzinfo=pytz.utc)\n', (181, 210), False, 'from datetime import datetime\n'), ((634, 670), 're.compile', 're.compile', (["('^' + single_delta + '$')"], {}), "('^' + single_delta + '$')\n", (644, 670), False, 'import re\n'), ((1600, 1622), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (1612, 1622), False, 'from datetime import datetime\n'), ((2032, 2072), 'datetime.timedelta', 'timedelta', (['(days * value)', '(seconds * value)'], {}), '(days * value, seconds * value)\n', (2041, 2072), False, 'from datetime import timedelta\n'), ((3378, 3401), 'tzlocal.get_localzone', 'tzlocal.get_localzone', ([], {}), '()\n', (3399, 3401), False, 'import tzlocal\n'), ((3437, 3468), 'datetime.datetime.strptime', 'datetime.strptime', (['datestr', 'fmt'], {}), '(datestr, fmt)\n', (3454, 3468), False, 'from datetime import datetime\n'), ((3707, 3727), 'datetime.datetime.now', 'datetime.now', (['tzinfo'], {}), '(tzinfo)\n', (3719, 3727), False, 'from datetime import datetime\n')] |
"""
This module implements connections for CUBRIDdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from CUBRIDdb.cursors import *
import types, _cubrid
class Connection(object):
"""CUBRID Database Connection Object"""
def __init__(self, *args, **kwargs):
'Create a connecton to the database.'
self.charset = ''
kwargs2 = kwargs.copy()
self.charset = kwargs2.pop('charset', 'utf8')
self.connection = _cubrid.connect(*args, **kwargs2)
def __del__(self):
pass
def cursor(self, dictCursor = None):
if dictCursor:
cursorClass = DictCursor
else:
cursorClass = Cursor
return cursorClass(self)
def set_autocommit(self, value):
if not isinstance(value, bool):
raise ValueError("Parameter should be a boolean value")
if value:
switch = 'TRUE'
else:
switch = 'FALSE'
self.connection.set_autocommit(switch)
def get_autocommit(self):
if self.connection.autocommit == 'TRUE':
return True
else:
return False
autocommit = property(get_autocommit, set_autocommit, doc = "autocommit value for current Cubrid session")
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def close(self):
self.connection.close()
def escape_string(self, buf):
return self.connection.escape_string(buf)
| [
"_cubrid.connect"
] | [((634, 667), '_cubrid.connect', '_cubrid.connect', (['*args'], {}), '(*args, **kwargs2)\n', (649, 667), False, 'import types, _cubrid\n')] |
from setuptools import setup
setup(name='tutorials',
version='POC',
url='https://github.com/pyomeca/tutorials.git',
author='pyomeca',
packages=['src'],
zip_safe=False)
| [
"setuptools.setup"
] | [((30, 178), 'setuptools.setup', 'setup', ([], {'name': '"""tutorials"""', 'version': '"""POC"""', 'url': '"""https://github.com/pyomeca/tutorials.git"""', 'author': '"""pyomeca"""', 'packages': "['src']", 'zip_safe': '(False)'}), "(name='tutorials', version='POC', url=\n 'https://github.com/pyomeca/tutorials.git', author='pyomeca', packages=\n ['src'], zip_safe=False)\n", (35, 178), False, 'from setuptools import setup\n')] |
import random
import json
import math
async def battle_attack(x, y, u, e, call):
if x == y:
await call.answer("❗ Противник увернулся от удара", show_alert=True)
return e.health, e.defence
else:
if e.defence <= 0:
e.health -= u.damage
return e.health, e.defence
else:
if u.damage > e.defence:
miss_dmg = u.damage - e.defence
e.health -= miss_dmg
e.defence = 0
return e.health, e.defence
else:
e.defence -= u.damage
return e.health, e.defence
async def battle_defence(x, y, u, e, call):
if x == y:
await call.answer("❗ Ты увернулся от удара", show_alert=True)
return u.health, u.defence
else:
if u.defence <= 0:
u.health -= e.damage
return u.health, u.defence
else:
if e.damage > u.defence:
miss_dmg = e.damage - u.defence
u.health -= miss_dmg
u.defence = 0
return u.health, u.defence
else:
u.defence -= e.damage
return u.health, u.defence
def power(obj, maximal=False):
if maximal is True:
hp = obj.max_health + obj.max_defence
else:
hp = obj.health + obj.defence
return hp * obj.damage
def exam_choose(user):
from app.models.examinators import exams
for i in range(len(exams)):
if user.rank == '-':
return exams[0]
elif exams[i].rank == user.rank:
try:
return exams[i + 1]
except IndexError:
return 'Максимальный ранг!'
def set_difficulty(m, u):
if m * 3 <= u:
difficulty = 'Оч. легко'
elif m * 2.5 <= u:
difficulty = 'Легко'
elif m * 2 < u:
difficulty = 'Нормально'
elif m * 1.5 < u:
difficulty = 'Сложно'
elif m < u:
difficulty = 'Очень сложно'
elif m > u * 3:
difficulty = 'Верная смерть'
elif m >= u:
difficulty = 'Невозможно'
else:
return
return difficulty
def get_xp(lvl):
"""
Returns total XP according to gain level
"""
total_xp = int((lvl * 10) ** 1.1)
return total_xp * lvl
# def json_inv(u):
# """
# Converts string from database to list
# Example: '[3, 2]' => [3, 2]
# :param u: User
# :return: User's inventory as list
# """
# inventory = json.loads(u['inventory']) if u['inventory'] != '[]' else []
# return inventory
def item_drop(chance):
"""
:param chance: Mob's chance of drop
:return: True/False
"""
c = random.randint(1, 100)
if c <= chance:
return True
return False
def round_down(n, decimals=0):
"""
Rounds a number down to a specified number of digits.
:param decimals: Specified number of digits
:param n: Float
"""
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
def enemy_calc(u_attack, u_health, u_defence, lvl):
enemy, result = [], []
if lvl != 1:
multiplier = round_down(random.uniform(0.4, 1.1), 1)
else:
multiplier = 0.4
print(multiplier)
for stat in (u_attack, u_health, u_defence):
enemy.append(round(stat*multiplier) if stat != 0 else 0)
e_power = enemy[0]*(enemy[1]+enemy[2])
formulae = int((e_power/(lvl**1.45))*2)
result = [enemy, formulae if formulae > 1 else 2]
return result
| [
"math.floor",
"random.randint",
"random.uniform"
] | [((2701, 2723), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2715, 2723), False, 'import random\n'), ((3000, 3026), 'math.floor', 'math.floor', (['(n * multiplier)'], {}), '(n * multiplier)\n', (3010, 3026), False, 'import math\n'), ((3170, 3194), 'random.uniform', 'random.uniform', (['(0.4)', '(1.1)'], {}), '(0.4, 1.1)\n', (3184, 3194), False, 'import random\n')] |
import pandas as pd
from news_classifier.database import db
def load_data(projection: dict) -> pd.DataFrame:
"""
Load the data from the Mongo collection and transform
into a pandas dataframe
:projection: A dictionary with the fields to load from database
:return: A pandas dataframe with the data
"""
articles = db.read_articles(
projection=projection
)
return pd.DataFrame(articles)
| [
"pandas.DataFrame",
"news_classifier.database.db.read_articles"
] | [((343, 382), 'news_classifier.database.db.read_articles', 'db.read_articles', ([], {'projection': 'projection'}), '(projection=projection)\n', (359, 382), False, 'from news_classifier.database import db\n'), ((408, 430), 'pandas.DataFrame', 'pd.DataFrame', (['articles'], {}), '(articles)\n', (420, 430), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
import argparse
import sys
from collections import defaultdict
DEFAULT_OUT = "stackcollapse-merged.txt"
def merge(files, dst):
data = defaultdict(lambda: 0)
for file in files:
with open(file, "r") as fp:
for line in fp.readlines():
stack, hits = line.rsplit(" ", 1)
hits = int(hits)
data[stack] += hits
with open(dst, "w") as fp:
for stack, hits in data.items():
print(stack, hits, file=fp)
def main():
parser = argparse.ArgumentParser(sys.argv[0])
parser = argparse.ArgumentParser(
description="merge multiple stackcollapes into a single one"
)
parser.add_argument(
"files", metavar="FILE", type=str, nargs="+", help="a stackcollapse file"
)
parser.add_argument(
"-o",
"--out",
default=DEFAULT_OUT,
help=f"write resulting stackcollapse to this file (default: {DEFAULT_OUT})",
)
opts = parser.parse_args(sys.argv[1:])
merge(opts.files, opts.out)
if __name__ == "__main__":
main()
| [
"collections.defaultdict",
"argparse.ArgumentParser"
] | [((164, 187), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (175, 187), False, 'from collections import defaultdict\n'), ((546, 582), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (569, 582), False, 'import argparse\n'), ((596, 686), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""merge multiple stackcollapes into a single one"""'}), "(description=\n 'merge multiple stackcollapes into a single one')\n", (619, 686), False, 'import argparse\n')] |
import os
from fastapi import Depends, FastAPI, Response, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi_users import FastAPIUsers
from fastapi_users.authentication import JWTAuthentication
from sqlalchemy.orm import Session
from .database.session import database, user_db
from .deps import db_session
from .models.disability import Disability as DisabilityModel
from .models.education import Education as EducationModel
from .models.experience import Experience as ExperienceModel
from .models.language import Language as LanguageModel
from .schemas.disability import Disability as DisabilitySchema
from .schemas.disability import DisabilityDB
from .schemas.education import Education as EducationSchema
from .schemas.education import EducationDB
from .schemas.experience import Experience as ExperienceSchema
from .schemas.experience import ExperienceDB
from .schemas.language import Language as LanguageSchema
from .schemas.language import LanguageDB
from .schemas.user import User, UserCreate, UserDB, UserUpdate
APP_SECRET = os.getenv("APP_SECRET")
jwt_authentication = JWTAuthentication(secret=APP_SECRET, lifetime_seconds=3600, tokenUrl="/auth/jwt/login")
app = FastAPI()
fastapi_users = FastAPIUsers(
user_db,
[jwt_authentication],
User,
UserCreate,
UserUpdate,
UserDB,
)
app.include_router(fastapi_users.get_auth_router(jwt_authentication), prefix="/auth/jwt", tags=["auth"])
app.include_router(fastapi_users.get_register_router(), prefix="/auth", tags=["auth"])
app.include_router(fastapi_users.get_reset_password_router(APP_SECRET), prefix="/auth", tags=["auth"])
app.include_router(fastapi_users.get_verify_router(APP_SECRET), prefix="/auth", tags=["auth"])
app.include_router(fastapi_users.get_users_router(), prefix="/users", tags=["users"])
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]
)
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.get("/user/experience", tags=["experience"], response_model=list[ExperienceDB])
def get_user_experience(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
experiences = session.query(ExperienceModel).filter(ExperienceModel.user_id == user.id).all()
return [
ExperienceDB(
id=exp.id,
position=exp.position,
employer=exp.employer,
city=exp.city,
start_date=exp.start_date,
end_date=exp.end_date,
description=exp.description,
)
for exp in experiences
]
@app.post("/user/experience", tags=["experience"], status_code=status.HTTP_201_CREATED)
def add_user_experience(
request: ExperienceSchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
experience = ExperienceModel(**request.dict(), user_id=user.id)
session.add(experience)
session.commit()
session.refresh(experience)
@app.put("/user/experience", tags=["experience"])
def edit_user_experience(
id: int,
request: ExperienceSchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
experience = (
session.query(ExperienceModel)
.filter(ExperienceModel.user_id == user.id)
.filter(ExperienceModel.id == id)
.one_or_none()
)
if experience:
experience.position = request.position
experience.employer = request.employer
experience.city = request.city
experience.start_date = request.start_date
experience.end_date = request.end_date
experience.description = request.description
session.commit()
session.refresh(experience)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/experience", tags=["experience"])
def remove_user_experience(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(ExperienceModel)
.filter(ExperienceModel.user_id == user.id)
.filter(ExperienceModel.id == id)
.delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
@app.get("/user/education", tags=["education"], response_model=list[EducationDB])
def get_user_education(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
educations = session.query(EducationModel).filter(EducationModel.user_id == user.id).all()
return [
EducationDB(
id=edu.id,
edu_type=edu.edu_type.value,
name=edu.name,
city=edu.city,
start_date=edu.start_date,
end_date=edu.end_date,
)
for edu in educations
]
@app.post("/user/education", tags=["education"], status_code=status.HTTP_201_CREATED)
def add_user_education(
request: EducationSchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
edu = EducationModel(**request.dict(), user_id=user.id)
session.add(edu)
session.commit()
session.refresh(edu)
@app.put("/user/education", tags=["education"])
def edit_user_education(
id: int,
request: EducationSchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
education = (
session.query(EducationModel)
.filter(EducationModel.user_id == user.id)
.filter(EducationModel.id == id)
.one_or_none()
)
if education:
education.edu_type = request.edu_type
education.name = request.name
education.city = request.city
education.start_date = request.start_date
education.end_date = request.end_date
session.commit()
session.refresh(education)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/education", tags=["education"])
def remove_user_education(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(EducationModel).filter(EducationModel.user_id == user.id).filter(EducationModel.id == id).delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
@app.get("/user/language", tags=["language"], response_model=list[LanguageDB])
def get_user_language(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
languages = session.query(LanguageModel).filter(LanguageModel.user_id == user.id).all()
return [LanguageDB(id=lang.id, language=lang.language, level=lang.level.value) for lang in languages]
@app.post("/user/language", tags=["language"], status_code=status.HTTP_201_CREATED)
def add_user_language(
request: LanguageSchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
edu = LanguageModel(**request.dict(), user_id=user.id)
session.add(edu)
session.commit()
session.refresh(edu)
@app.put("/user/language", tags=["language"])
def edit_user_language(
id: int,
request: LanguageSchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
lang = (
session.query(LanguageModel)
.filter(LanguageModel.user_id == user.id)
.filter(LanguageModel.id == id)
.one_or_none()
)
if lang:
lang.level = request.level
lang.language = request.language
session.commit()
session.refresh(lang)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/language", tags=["language"])
def remove_user_language(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(LanguageModel).filter(LanguageModel.user_id == user.id).filter(LanguageModel.id == id).delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
@app.get("/user/disability", tags=["disability"], response_model=list[DisabilityDB])
def get_user_language(user: User = Depends(fastapi_users.current_user()), session: Session = Depends(db_session)):
disabilities = session.query(DisabilityModel).filter(DisabilityModel.user_id == user.id).all()
return [DisabilityDB(id=dis.id, type=dis.type.value, level=dis.level.value) for dis in disabilities]
@app.post("/user/disability", tags=["disability"], status_code=status.HTTP_201_CREATED)
def add_user_language(
request: DisabilitySchema,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
edu = DisabilityModel(**request.dict(), user_id=user.id)
session.add(edu)
session.commit()
session.refresh(edu)
@app.put("/user/disability", tags=["disability"])
def edit_user_language(
id: int,
request: DisabilitySchema,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
dis = (
session.query(DisabilityModel)
.filter(DisabilityModel.user_id == user.id)
.filter(DisabilityModel.id == id)
.one_or_none()
)
if dis:
dis.level = request.level
dis.type = request.type
session.commit()
session.refresh(dis)
return
response.status_code = status.HTTP_404_NOT_FOUND
@app.delete("/user/disability", tags=["disability"])
def remove_user_language(
id: int,
response: Response,
user: User = Depends(fastapi_users.current_user()),
session: Session = Depends(db_session),
):
deleted = (
session.query(DisabilityModel)
.filter(DisabilityModel.user_id == user.id)
.filter(DisabilityModel.id == id)
.delete()
)
if not deleted:
response.status_code = status.HTTP_404_NOT_FOUND
return
session.commit()
| [
"fastapi_users.authentication.JWTAuthentication",
"fastapi_users.FastAPIUsers",
"fastapi.Depends",
"os.getenv",
"fastapi.FastAPI"
] | [((1057, 1080), 'os.getenv', 'os.getenv', (['"""APP_SECRET"""'], {}), "('APP_SECRET')\n", (1066, 1080), False, 'import os\n'), ((1103, 1195), 'fastapi_users.authentication.JWTAuthentication', 'JWTAuthentication', ([], {'secret': 'APP_SECRET', 'lifetime_seconds': '(3600)', 'tokenUrl': '"""/auth/jwt/login"""'}), "(secret=APP_SECRET, lifetime_seconds=3600, tokenUrl=\n '/auth/jwt/login')\n", (1120, 1195), False, 'from fastapi_users.authentication import JWTAuthentication\n'), ((1198, 1207), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (1205, 1207), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((1224, 1309), 'fastapi_users.FastAPIUsers', 'FastAPIUsers', (['user_db', '[jwt_authentication]', 'User', 'UserCreate', 'UserUpdate', 'UserDB'], {}), '(user_db, [jwt_authentication], User, UserCreate, UserUpdate,\n UserDB)\n', (1236, 1309), False, 'from fastapi_users import FastAPIUsers\n'), ((2278, 2297), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (2285, 2297), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((2940, 2959), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (2947, 2959), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((3338, 3357), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (3345, 3357), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((4175, 4194), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (4182, 4194), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((4665, 4684), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (4672, 4684), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((5275, 5294), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (5282, 5294), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((5647, 5666), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (5654, 5666), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((6409, 6428), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (6416, 6428), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((6865, 6884), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (6872, 6884), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((7302, 7321), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (7309, 7321), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((7669, 7688), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (7676, 7688), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((8268, 8287), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (8275, 8287), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((8727, 8746), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (8734, 8746), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((9176, 9195), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (9183, 9195), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((9551, 9570), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (9558, 9570), False, 'from fastapi import Depends, FastAPI, Response, status\n'), ((10147, 10166), 'fastapi.Depends', 'Depends', (['db_session'], {}), '(db_session)\n', (10154, 10166), False, 'from fastapi import Depends, FastAPI, Response, status\n')] |
"""Utility functions."""
import subprocess
import logging
import os
import shutil
import stat
import itertools
from collections import OrderedDict
from pkg_resources import resource_string
import pandas as pd
from genometools.expression import ExpGeneTable
from genometools import gtf
import singlecell
_LOGGER = logging.getLogger(__name__)
def get_readable_gene_identifiers(gene_table: ExpGeneTable):
"""Return unique gene identifiers that primarily use the genes' names."""
# count occurrences for each of gene name
counts = gene_table['name'].value_counts()
gene_counts = counts.loc[gene_table['name']]
gene_ids = gene_table.index.tolist()
gene_ids = [name if c == 1 else '%s_%s' % (name, gene_ids[i])
for i, (name, c) in enumerate(gene_counts.items())]
return gene_ids
def get_edit_sequences(seq, num_edits, bases=None):
"""Return all nucleotide sequences with a given hamming distance."""
if num_edits > len(seq):
raise ValueError('Asked to make make more edits (%d) than the length '
'of the sequence (%d nt).' % (num_edits, len(seq)))
if bases is None:
bases = set('ACGT')
length = len(seq)
all_bases = [bases for i in range(num_edits)]
seq_list = [nt for nt in seq]
mismatch = []
for comb in itertools.combinations(range(length), num_edits):
for subs in itertools.product(*all_bases):
mut = seq_list[:]
valid = True
for pos, nt in zip(comb, subs):
if mut[pos] == nt:
valid = False
break
mut[pos] = nt
if valid:
mismatch.append(''.join(mut))
return sorted(mismatch)
def concatenate_files(input_files, output_file, append=False):
write_mode = 'wb'
if append:
write_mode = 'ab'
with open(output_file, write_mode) as ofh:
for f in input_files:
with open(f, 'rb') as ifh:
shutil.copyfileobj(ifh, ofh, 16*1024*1024)
def make_file_executable(path):
"""Sets the user executable flag for a file."""
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def zcat_subproc(path):
"""Creates a subprocess for decompressing a gzip file.
TODO: docstring"""
subproc = subprocess.Popen('gunzip -c "%s"' % path, shell=True,
stdout=subprocess.PIPE)
return subproc
def get_all_kmers(k, kmer='', kmer_list=None):
"""Returns all possible k-mer sequences (for A/C/G/T alphabet).
TODO: docstring"""
if kmer_list is None:
kmer_list = []
if len(kmer) == k:
kmer_list.append(kmer)
else:
for nuc in ['A', 'C', 'G', 'T']:
var = kmer + nuc
get_all_kmers(k, var, kmer_list)
if not kmer:
return kmer_list
def get_mismatch_sequences(seq):
"""Generates all nucleotide sequences with hamming distance 1 to `seq`.
TODO: docstring"""
for pos in range(len(seq)):
for nuc in ['A', 'C', 'G', 'T']:
if nuc != seq[pos]:
mm = seq[:pos] + nuc + seq[(pos+1):]
yield mm
def get_reverse_complement(seq):
"""Returns the reverse complement of a nucleotide sequence.
TODO: docstring"""
rc = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
}
compseq = ''.join([rc[nuc] for nuc in seq[::-1]])
return compseq
def get_gene_exons(gene_table, genome_annotation_file, chunksize=10000):
"""Parse GTF file and get a dictionary of gene=>list of exon intervals.
(Only for protein-coding genes.)
TODO: docstring"""
# get gene names that are guaranteed to be unique
#gene_names = get_readable_gene_identifiers(gene_table)
# series with index = Ensembl ID, value = unique gene name
#genes = pd.Series(index=gene_table.index, data=gene_names)
# sort genes by chromosome, strand, and then position
sorted_gene_ids = sorted(
[id_ for id_ in gene_table.index],
key=lambda id_: [gene_table.loc[id_, 'chromosome'],
gene_table.loc[id_, 'position'] < 0,
abs(gene_table.loc[id_, 'position'])])
#genes = genes.loc[sorted_gene_ids]
gene_table = gene_table.loc[sorted_gene_ids]
# dictionary for holding list of intervals for each gene
gene_exons = OrderedDict([id_, []] for id_ in gene_table.index)
valid = 0
total = 0
_LOGGER.info('Parsing GTF file "%s" in chunks...', genome_annotation_file)
for i, df in enumerate(pd.read_csv(
genome_annotation_file, dtype={0: str},
sep='\t', comment='#', header=None, chunksize=chunksize)):
# select only exon entries
df_sel = df.loc[df.iloc[:, 2] == 'exon']
# extract gene IDs
gene_ids = df_sel.iloc[:, 8].apply(
lambda x: gtf.parse_attributes(x)['gene_id'])
for id_, chrom, start, end in zip(
gene_ids,
df_sel.iloc[:, 0], df_sel.iloc[:, 3], df_sel.iloc[:, 4]):
total += 1
try:
gene = gene_table.loc[id_]
except KeyError:
# this gene is not contained in the gene table
continue
gene_chrom = gene_table.loc[id_, 'chromosome']
if chrom != gene_chrom:
_LOGGER.warning('%s exon ignored (wrong chromosome: '
'%s instead of %s).',
id_, chrom, gene_chrom)
else:
valid += 1
gene_exons[id_].append([start-1, end])
_LOGGER.info('%d / %d exons from valid genes (%.1f %%).',
valid, total, 100*(valid/float(total)))
return gene_exons
def merge_intervals(intervals):
"""Merge overlapping intervals.
TODO: docstring"""
if not intervals:
return []
# sort intervals by start position
intervals = sorted(intervals, key=lambda x:x[0])
merged = []
cur = list(intervals[0])
for iv in intervals[1:]:
# interval starts inside/right after current interval
if iv[0] <= cur[1]:
if iv[1] > cur[1]: # interval ends after current interval
cur[1] = iv[1]
else:
merged.append(cur)
cur = list(iv)
merged.append(cur)
return merged
def get_mitochondrial_genes(species='human'):
"""Get a list of all mitochondrial genes for a given species.
"Mitochondrial genes" are defined here as all genes on the mitochondrial
chromosome.
TODO: docstring
"""
path = os.path.join(singlecell._root,
'data', 'gene_lists', 'mitochondrial_%s.tsv' % species)
with open(path) as fh:
return fh.read().split('\n')
def get_ribosomal_genes(species='human'):
"""Get a list of all ribosomal genes for a given species.
"Ribosomal genes" are defined here as all protein-coding genes whose
protein products are a structural component of the small or large ribosomal
subunit (including fusion genes).
TODO: docstring
"""
path = os.path.join(singlecell._root,
'data', 'gene_lists', 'ribosomal_%s.tsv' % species)
with open(path) as fh:
return fh.read().split('\n')
def get_plotly_js():
"""Return the plotly javascript code.
TODO: docstring
"""
# resource_string?
path = 'package_data/plotly.min.js'
return resource_string('plotly', path).decode('utf-8')
def is_empty_dir(dir_):
"""Tests whether a directory is empty.
Note: Also returns True if the directory doesn't exist.
TODO: docstring
"""
is_empty = True
try:
_, dirnames, filenames = next(os.walk(dir_))
if dirnames or filenames:
is_empty = False
except StopIteration:
pass
return is_empty
| [
"subprocess.Popen",
"os.chmod",
"os.stat",
"pandas.read_csv",
"os.walk",
"pkg_resources.resource_string",
"itertools.product",
"collections.OrderedDict",
"genometools.gtf.parse_attributes",
"shutil.copyfileobj",
"os.path.join",
"logging.getLogger"
] | [((318, 345), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (335, 345), False, 'import logging\n'), ((2162, 2175), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (2169, 2175), False, 'import os\n'), ((2180, 2221), 'os.chmod', 'os.chmod', (['path', '(st.st_mode | stat.S_IEXEC)'], {}), '(path, st.st_mode | stat.S_IEXEC)\n', (2188, 2221), False, 'import os\n'), ((2349, 2426), 'subprocess.Popen', 'subprocess.Popen', (['(\'gunzip -c "%s"\' % path)'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(\'gunzip -c "%s"\' % path, shell=True, stdout=subprocess.PIPE)\n', (2365, 2426), False, 'import subprocess\n'), ((4457, 4507), 'collections.OrderedDict', 'OrderedDict', (['([id_, []] for id_ in gene_table.index)'], {}), '([id_, []] for id_ in gene_table.index)\n', (4468, 4507), False, 'from collections import OrderedDict\n'), ((6748, 6838), 'os.path.join', 'os.path.join', (['singlecell._root', '"""data"""', '"""gene_lists"""', "('mitochondrial_%s.tsv' % species)"], {}), "(singlecell._root, 'data', 'gene_lists', 'mitochondrial_%s.tsv' %\n species)\n", (6760, 6838), False, 'import os\n'), ((7266, 7352), 'os.path.join', 'os.path.join', (['singlecell._root', '"""data"""', '"""gene_lists"""', "('ribosomal_%s.tsv' % species)"], {}), "(singlecell._root, 'data', 'gene_lists', 'ribosomal_%s.tsv' %\n species)\n", (7278, 7352), False, 'import os\n'), ((1408, 1437), 'itertools.product', 'itertools.product', (['*all_bases'], {}), '(*all_bases)\n', (1425, 1437), False, 'import itertools\n'), ((4649, 4763), 'pandas.read_csv', 'pd.read_csv', (['genome_annotation_file'], {'dtype': '{(0): str}', 'sep': '"""\t"""', 'comment': '"""#"""', 'header': 'None', 'chunksize': 'chunksize'}), "(genome_annotation_file, dtype={(0): str}, sep='\\t', comment='#',\n header=None, chunksize=chunksize)\n", (4660, 4763), True, 'import pandas as pd\n'), ((7609, 7640), 'pkg_resources.resource_string', 'resource_string', (['"""plotly"""', 'path'], {}), "('plotly', path)\n", (7624, 7640), False, 'from pkg_resources import resource_string\n'), ((7883, 7896), 'os.walk', 'os.walk', (['dir_'], {}), '(dir_)\n', (7890, 7896), False, 'import os\n'), ((2024, 2070), 'shutil.copyfileobj', 'shutil.copyfileobj', (['ifh', 'ofh', '(16 * 1024 * 1024)'], {}), '(ifh, ofh, 16 * 1024 * 1024)\n', (2042, 2070), False, 'import shutil\n'), ((4964, 4987), 'genometools.gtf.parse_attributes', 'gtf.parse_attributes', (['x'], {}), '(x)\n', (4984, 4987), False, 'from genometools import gtf\n')] |
import os
import frida
from flask import Flask, jsonify, request
from hook import start_hook
REMOTE_DEVICE = os.getenv('REMOTE_DEVICE', '')
app = Flask(__name__)
api = start_hook(REMOTE_DEVICE)
@app.route('/sign')
def sign():
global api
url = request.args.get('url', '')
headers = dict(request.headers)
try:
data = api.exports.sign(url, headers)
except frida.InvalidOperationError as e:
print(f'app crash: {e}')
api = start_hook(REMOTE_DEVICE)
data = api.exports.sign(url, headers)
return jsonify({
'url': url,
'headers': headers,
'sign': data,
})
if __name__ == '__main__':
app.run()
| [
"flask.request.args.get",
"flask.Flask",
"flask.jsonify",
"os.getenv",
"hook.start_hook"
] | [((111, 141), 'os.getenv', 'os.getenv', (['"""REMOTE_DEVICE"""', '""""""'], {}), "('REMOTE_DEVICE', '')\n", (120, 141), False, 'import os\n'), ((150, 165), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (155, 165), False, 'from flask import Flask, jsonify, request\n'), ((172, 197), 'hook.start_hook', 'start_hook', (['REMOTE_DEVICE'], {}), '(REMOTE_DEVICE)\n', (182, 197), False, 'from hook import start_hook\n'), ((257, 284), 'flask.request.args.get', 'request.args.get', (['"""url"""', '""""""'], {}), "('url', '')\n", (273, 284), False, 'from flask import Flask, jsonify, request\n'), ((551, 606), 'flask.jsonify', 'jsonify', (["{'url': url, 'headers': headers, 'sign': data}"], {}), "({'url': url, 'headers': headers, 'sign': data})\n", (558, 606), False, 'from flask import Flask, jsonify, request\n'), ((468, 493), 'hook.start_hook', 'start_hook', (['REMOTE_DEVICE'], {}), '(REMOTE_DEVICE)\n', (478, 493), False, 'from hook import start_hook\n')] |
import PySimpleGUI as sg
from ..model.objectdata import ObjectData
from ..model.search import SearchableList
from ..view import newproduction
from . import get_string_unit, RACES, filter_listbox
from myconfigparser import Section
def open_window(data):
options = SearchableList()
for u in data:
unit = Section(data[u])
if 'peon' in unit['type'].lower() and u != 'e000' and u != 'udr' and 'A00J' not in unit['abilList']:
options.append('{name} [{code}]'.format(code=u, name=unit['Name'][1:-1]))
window = sg.Window('New Production', newproduction.get_layout(), default_element_size=(40, 1), grab_anywhere=False).Finalize()
window.find_element('Options').Update(sorted(options))
while True:
event, values = window.read()
if event is None:
break
elif event == 'Submit':
try:
ObjectData(data).create_production(values['Name'], get_string_unit(values['Options'][0]), RACES[values['ProdRace']])
sg.popup('Success')
except Exception as e:
sg.popup(str(e),title='Error')
filter_listbox(data, window, values, '', options)
| [
"myconfigparser.Section",
"PySimpleGUI.popup"
] | [((320, 336), 'myconfigparser.Section', 'Section', (['data[u]'], {}), '(data[u])\n', (327, 336), False, 'from myconfigparser import Section\n'), ((1030, 1049), 'PySimpleGUI.popup', 'sg.popup', (['"""Success"""'], {}), "('Success')\n", (1038, 1049), True, 'import PySimpleGUI as sg\n')] |
# Generated by Django 3.2.3 on 2021-08-10 02:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('artists', '0002_artist_bio'),
('beats', '0002_instrumental_img_file'),
]
operations = [
migrations.AlterField(
model_name='instrumental',
name='producer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='artists.artist'),
),
migrations.AlterField(
model_name='instrumentalcollection',
name='instrumentals',
field=models.ManyToManyField(blank=True, related_name='_beats_instrumentalcollection_instrumentals_+', to='beats.Instrumental'),
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField"
] | [((435, 567), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'related_name': '"""+"""', 'to': '"""artists.artist"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.DO_NOTHING, related_name='+', to='artists.artist')\n", (452, 567), False, 'from django.db import migrations, models\n'), ((712, 838), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""_beats_instrumentalcollection_instrumentals_+"""', 'to': '"""beats.Instrumental"""'}), "(blank=True, related_name=\n '_beats_instrumentalcollection_instrumentals_+', to='beats.Instrumental')\n", (734, 838), False, 'from django.db import migrations, models\n')] |
import os
import sys
sys.path.append(os.path.join(sys.path[0], '../'))
from smart_pipeline import Pipeline
data = [1,2,3,4,5]
# Define a data function
def onlyOdd(item):
return False if item%2==0 else True
pl = Pipeline()
# Adds function into pipeline
pl.addDataPipe(onlyOdd)
res = pl(data)
for item in res:
print(item) | [
"os.path.join",
"smart_pipeline.Pipeline"
] | [((220, 230), 'smart_pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (228, 230), False, 'from smart_pipeline import Pipeline\n'), ((38, 70), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../"""'], {}), "(sys.path[0], '../')\n", (50, 70), False, 'import os\n')] |
import tensorflow as tf
batch_size = 4
feature_num = 3
csv1 = [
"harden|james|curry",
"wrestbrook|harden|durant",
"paul|towns",
]
csv2 = [
"curry",
"wrestbrook|harden|durant",
"paul|towns",
]
csv3 = [
"harden|james|curry",
"durant",
"paul|towns",
]
csv4 = [
"wrestbrook|harden|durant",
"wrestbrook|harden|durant",
"wrestbrook|harden|durant"
]
csv_s= [csv1,csv2,csv3,csv4]
X = tf.placeholder(shape=[None,feature_num],dtype=tf.string)
one_feature = tf.contrib.layers.sparse_column_with_hash_bucket(
column_name="zhengquan_test",
hash_bucket_size=10,
combiner="sum",
dtype=tf.string
# dtype=tf.dtypes.int32
)
res = tf.contrib.layers.embedding_column(one_feature,
# initializer=my_initializer,
combiner="mean",
dimension=3)
#除了有下面这种方法还有tf.unstack的方法
# for i in range(batch_size):
# for j in range(feature_num):
# one_feature = X[i][j]
# one_feature = tf.reshape(one_feature,shape=[1])
# split_tag = tf.string_split(one_feature, "|")
# one_sparse = tf.SparseTensor(
# indices=split_tag.indices,
# values= split_tag.values,
# dense_shape=split_tag.dense_shape
# )
#
# current_mapping = {'zhengquan_test': one_sparse}
# one_feature_embedding_res = tf.feature_column.input_layer(current_mapping, res)
# #[[ 0.08187684, 0.22063671, -0.16549297]]
#用unstack证明也是可行的,但是placeholder的第一个dimension不能是None,需要是一个确切的数值,不然unstack函数不能解析
# exp_X = tf.expand_dims(X,axis=-1)
# example_list = tf.unstack(exp_X,axis = 0)
# for one_example in example_list:
# features = tf.unstack(one_example,axis = 0)
# feature = features[0]
# for one_feature in features:
# # one_feature = tf.reshape(one_feature,shape=[1])
# split_tag = tf.string_split(one_feature, "|")
# one_sparse = tf.SparseTensor(
# indices=split_tag.indices,
# values= split_tag.values,
# dense_shape=split_tag.dense_shape
# )
#
# current_mapping = {'zhengquan_test': one_sparse}
# one_feature_embedding_res = tf.feature_column.input_layer(current_mapping, res)
#[[-0.10367388, 0.25915673, -0.00741819]]
def my_function(one_example):
features = tf.unstack(one_example,axis = 0)
for one_feature in features:
split_tag = tf.string_split(one_feature, "|")
one_sparse = tf.SparseTensor(
indices=split_tag.indices,
values= split_tag.values,
dense_shape=split_tag.dense_shape
)
current_mapping = {'zhengquan_test': one_sparse}
one_feature_embedding_res = tf.feature_column.input_layer(current_mapping, res)
return one_feature_embedding_res
exp_X = tf.expand_dims(X,axis=-1)
res = tf.map_fn(fn=my_function,elems=exp_X,dtype=tf.float32)
print(tf.shape(res))
import pdb
pdb.set_trace()
# res_seq = tf.squeeze(res,squeeze_dims=[-1])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess_res = sess.run([res],feed_dict={X:csv_s})
print(type(sess_res))
print(sess_res)
| [
"tensorflow.contrib.layers.sparse_column_with_hash_bucket",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.layers.embedding_column",
"tensorflow.Session",
"tensorflow.string_split",
"tensorflow.feature_column.input_layer",
"tensorflow.placeholder",
"tensorflow.shape",
"pdb.set_trace",
"tensorflow.SparseTensor",
"tensorflow.map_fn",
"tensorflow.unstack",
"tensorflow.expand_dims"
] | [((392, 450), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, feature_num]', 'dtype': 'tf.string'}), '(shape=[None, feature_num], dtype=tf.string)\n', (406, 450), True, 'import tensorflow as tf\n'), ((464, 601), 'tensorflow.contrib.layers.sparse_column_with_hash_bucket', 'tf.contrib.layers.sparse_column_with_hash_bucket', ([], {'column_name': '"""zhengquan_test"""', 'hash_bucket_size': '(10)', 'combiner': '"""sum"""', 'dtype': 'tf.string'}), "(column_name=\n 'zhengquan_test', hash_bucket_size=10, combiner='sum', dtype=tf.string)\n", (512, 601), True, 'import tensorflow as tf\n'), ((698, 775), 'tensorflow.contrib.layers.embedding_column', 'tf.contrib.layers.embedding_column', (['one_feature'], {'combiner': '"""mean"""', 'dimension': '(3)'}), "(one_feature, combiner='mean', dimension=3)\n", (732, 775), True, 'import tensorflow as tf\n'), ((2888, 2914), 'tensorflow.expand_dims', 'tf.expand_dims', (['X'], {'axis': '(-1)'}), '(X, axis=-1)\n', (2902, 2914), True, 'import tensorflow as tf\n'), ((2920, 2976), 'tensorflow.map_fn', 'tf.map_fn', ([], {'fn': 'my_function', 'elems': 'exp_X', 'dtype': 'tf.float32'}), '(fn=my_function, elems=exp_X, dtype=tf.float32)\n', (2929, 2976), True, 'import tensorflow as tf\n'), ((3007, 3022), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3020, 3022), False, 'import pdb\n'), ((2406, 2437), 'tensorflow.unstack', 'tf.unstack', (['one_example'], {'axis': '(0)'}), '(one_example, axis=0)\n', (2416, 2437), True, 'import tensorflow as tf\n'), ((2981, 2994), 'tensorflow.shape', 'tf.shape', (['res'], {}), '(res)\n', (2989, 2994), True, 'import tensorflow as tf\n'), ((3076, 3088), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3086, 3088), True, 'import tensorflow as tf\n'), ((2492, 2525), 'tensorflow.string_split', 'tf.string_split', (['one_feature', '"""|"""'], {}), "(one_feature, '|')\n", (2507, 2525), True, 'import tensorflow as tf\n'), ((2547, 2653), 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 'split_tag.indices', 'values': 'split_tag.values', 'dense_shape': 'split_tag.dense_shape'}), '(indices=split_tag.indices, values=split_tag.values,\n dense_shape=split_tag.dense_shape)\n', (2562, 2653), True, 'import tensorflow as tf\n'), ((2790, 2841), 'tensorflow.feature_column.input_layer', 'tf.feature_column.input_layer', (['current_mapping', 'res'], {}), '(current_mapping, res)\n', (2819, 2841), True, 'import tensorflow as tf\n'), ((3111, 3144), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3142, 3144), True, 'import tensorflow as tf\n')] |
"""
Commands for index operations
"""
import os
import re
import sys
import time
from typing import List
from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime as DateTime
import click
from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data
from mnotes.notes.index import NoteIndex
from mnotes.notes.markdown_notes import NoteInfo
valid_chars_pattern = re.compile(r"[^a-z0-9\-]")
@click.group(name="index", invoke_without_command=True)
@click.pass_context
@pass_env
def main(env: MnoteEnvironment, ctx: click.core.Context):
""" Manage M-Notes' global directory of indices. Indices represent folders containing indexed notes."""
style = env.config.styles
env.global_index.load_all()
echo_line(" * index mode")
if len(env.global_index.indices) == 0 and ctx.invoked_subcommand != "create":
echo_line(" * there are ", style.warning("no indices"), " in the global directory")
echo_line(" -> to create an index navigate to the folder containing notes you want to add")
echo_line(" -> then use the 'mnote index create <name>' command")
sys.exit()
else:
echo_line(" * there are ", style.visible(f"{len(env.global_index.indices)}"),
" indices in the global directory")
if ctx.invoked_subcommand is None:
# Update the global index
start_time = time.time()
env.global_index.load_all()
end_time = time.time()
click.echo(style.success(f" * updated all indices, took {end_time - start_time:0.2f} seconds"))
click.echo()
echo_line(click.style("Current Indices in Global Directory:", bold=True))
for index in env.global_index.indices.values():
echo_line(" * ", style.visible(index.name), f" ({len(index.notes)} notes): {index.path}")
echo_line()
echo_line(style.visible(" (use 'mnote index reload' to rebuild with checksums)"))
@main.command(name="zip")
@click.argument("names", type=str, nargs=-1)
@pass_env
def zip_cmd(env: MnoteEnvironment, names: List[str]):
"""
Archive an index or multiple/all indices in zip files
Creates archives of the markdown notes (text files only, no resources) of the indices by compressing them into zip
files. The files will be named with the index name and the current date and time and saved in the current
directory. This command can be run from anywhere on the machine, it does not need to be run from inside any of the
index folders.
You can specify a single index by name, several indices, or leave the 'name' argument blank in order to back up
all of them at once.
"""
style = env.config.styles
click.echo()
failed = False
for index_name in names:
if index_name not in env.global_index.indices:
echo_line(style.fail(f"There is no index named '{index_name}' to archive!"))
failed = True
if failed:
return
if not names:
echo_line(style.visible("No index(s) specified, so zipping all of them..."))
names = [i.name for i in env.global_index.indices.values()]
start = time.time()
for name in names:
echo_line()
echo_line(click.style("Zipping index ", bold=True), style.visible(f"'{name}'", bold=True))
index: NoteIndex = env.global_index.indices[name]
now = DateTime.now().strftime("%Y-%m-%d-%H-%M-%S")
output_name = os.path.join(env.cwd, f"{name}-{now}.zip")
with ZipFile(output_name, "w") as zip_handle:
with click.progressbar(index.notes.values()) as notes:
for note in notes:
note: NoteInfo
zip_handle.write(note.file_path,
arcname=os.path.relpath(note.file_path, start=index.path),
compress_type=ZIP_DEFLATED)
end = time.time()
echo_line()
echo_line(style.success(f"Operation completed in {end - start:0.1f} seconds"))
@main.command(name="reload")
@pass_env
def reload(env: MnoteEnvironment):
"""
Rebuild all indices using checksums.
M-Notes by default will verify the integrity of its cached data by looking at the file size and last modified
timestamp to guess at whether the file has changed since it was last read (this is similar to the method which
rsync uses) However, it's up to the file system to report these values accurately, so this option uses the SHA1
checksum to rebuild the indicies. It's faster than re-reading all of the files, but slower than simply looking at
the file size and timestamps.
"""
style = env.config.styles
start_time = time.time()
env.global_index.load_all(True)
end_time = time.time()
click.echo(style.success(f"Updated all indices with checksums, took {end_time - start_time:0.2f} seconds"))
@main.command(name="delete")
@click.argument("name", type=str)
@pass_env
def delete(env: MnoteEnvironment, name: str):
""" Delete an index from the global directory. """
style = env.config.styles
click.echo()
if name not in env.global_index.indices:
echo_line(style.fail(f"There is no index named '{name}' to remove!"))
return
# If we got to this point we can create the index!
click.echo()
echo_line(style.warning(f"You are about to remove the index named '{name}'", bold=True))
echo_line(style.warning(f"which maps to the folder '{env.cwd}'", bold=True))
click.echo()
if click.confirm(click.style(f"Apply this change?", bold=True)):
click.echo(style.success("User deleted index"))
del env.global_index.index_directory[name]
save_global_index_data(env.global_index)
else:
click.echo(style.fail("User rejected index creation"))
@main.command(name="create")
@click.argument("name", type=str)
@pass_env
def create(env: MnoteEnvironment, name: str):
""" Create a new index in the global directory with the specified name. """
style = env.config.styles
click.echo()
# Check if this folder is already part of another index
if env.index_of_cwd is not None:
echo_line(style.fail(f"The current working directory is already part of an index named "
f"'{env.index_of_cwd.name}'. Indexes cannot be contained by other indexes"))
return
# Check if this index would contain another index
contained = env.indices_in_cwd
if contained:
echo_line(style.fail("The following already-existing indices are subdirectories of the current working "
"directory. You can't create an index here because indexes cannot be contained by other "
"indexes."))
for index in contained:
echo_line(f" * {index.name}: {index.path}")
return
# Check if the name given is valid
if valid_chars_pattern.findall(name):
echo_line("The name ", style.fail(f"'{name}'"), " contains invalid characters for an index name")
click.echo()
echo_line("Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter "
"names are faster to type. Think of the index name as a nickname or an alias for the folder you"
"are adding to the global directory.")
return
if name in env.global_index.indices:
echo_line("The name ", style.fail(f"'{name}'"), " is already used by another index.")
click.echo()
echo_line("Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter "
"names are faster to type. Think of the index name as a nickname or an alias for the folder you"
"are adding to the global directory.")
# Check for conflicts before allowing M-Notes to add this as an index
conflicts = env.global_index.find_conflicts(env.cwd)
if conflicts:
echo_line(style.fail("There are ID conflicts which would be created if this folder is merged into the global"
"directory as it is."))
for id_, conflict in conflicts.items():
click.echo()
echo_line(style.warning(f"Conflict for ID {id_}:", bold=True))
for e in conflict.existing:
echo_line(style.visible(f" * Already in global: {e.file_path}"))
for c in conflict.conflicting:
echo_line(style.warning(f" * In this directory: {c.file_path}"))
return
# If we got to this point we can create the index!
click.echo()
echo_line(style.warning(f"You are about to create an index named '{name}'", bold=True))
echo_line(style.warning(f"which will be located in the folder '{env.cwd}'", bold=True))
click.echo()
if click.confirm(click.style(f"Apply this change?", bold=True)):
click.echo(style.success("User created index"))
env.global_index.index_directory[name] = {"path": env.cwd}
save_global_index_data(env.global_index)
else:
click.echo(style.fail("User rejected index creation"))
| [
"mnotes.environment.save_global_index_data",
"mnotes.environment.echo_line",
"zipfile.ZipFile",
"click.argument",
"click.echo",
"click.style",
"datetime.datetime.now",
"time.time",
"os.path.relpath",
"click.group",
"os.path.join",
"sys.exit",
"re.compile"
] | [((413, 439), 're.compile', 're.compile', (['"""[^a-z0-9\\\\-]"""'], {}), "('[^a-z0-9\\\\-]')\n", (423, 439), False, 'import re\n'), ((443, 497), 'click.group', 'click.group', ([], {'name': '"""index"""', 'invoke_without_command': '(True)'}), "(name='index', invoke_without_command=True)\n", (454, 497), False, 'import click\n'), ((1992, 2035), 'click.argument', 'click.argument', (['"""names"""'], {'type': 'str', 'nargs': '(-1)'}), "('names', type=str, nargs=-1)\n", (2006, 2035), False, 'import click\n'), ((4929, 4961), 'click.argument', 'click.argument', (['"""name"""'], {'type': 'str'}), "('name', type=str)\n", (4943, 4961), False, 'import click\n'), ((5853, 5885), 'click.argument', 'click.argument', (['"""name"""'], {'type': 'str'}), "('name', type=str)\n", (5867, 5885), False, 'import click\n'), ((761, 787), 'mnotes.environment.echo_line', 'echo_line', (['""" * index mode"""'], {}), "(' * index mode')\n", (770, 787), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((2719, 2731), 'click.echo', 'click.echo', ([], {}), '()\n', (2729, 2731), False, 'import click\n'), ((3166, 3177), 'time.time', 'time.time', ([], {}), '()\n', (3175, 3177), False, 'import time\n'), ((3920, 3931), 'time.time', 'time.time', ([], {}), '()\n', (3929, 3931), False, 'import time\n'), ((3936, 3947), 'mnotes.environment.echo_line', 'echo_line', ([], {}), '()\n', (3945, 3947), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((4710, 4721), 'time.time', 'time.time', ([], {}), '()\n', (4719, 4721), False, 'import time\n'), ((4773, 4784), 'time.time', 'time.time', ([], {}), '()\n', (4782, 4784), False, 'import time\n'), ((5107, 5119), 'click.echo', 'click.echo', ([], {}), '()\n', (5117, 5119), False, 'import click\n'), ((5319, 5331), 'click.echo', 'click.echo', ([], {}), '()\n', (5329, 5331), False, 'import click\n'), ((5510, 5522), 'click.echo', 'click.echo', ([], {}), '()\n', (5520, 5522), False, 'import click\n'), ((6056, 6068), 'click.echo', 'click.echo', ([], {}), '()\n', (6066, 6068), False, 'import click\n'), ((8622, 8634), 'click.echo', 'click.echo', ([], {}), '()\n', (8632, 8634), False, 'import click\n'), ((8823, 8835), 'click.echo', 'click.echo', ([], {}), '()\n', (8833, 8835), False, 'import click\n'), ((970, 1074), 'mnotes.environment.echo_line', 'echo_line', (['""" -> to create an index navigate to the folder containing notes you want to add"""'], {}), "(\n ' -> to create an index navigate to the folder containing notes you want to add'\n )\n", (979, 1074), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((1073, 1141), 'mnotes.environment.echo_line', 'echo_line', (['""" -> then use the \'mnote index create <name>\' command"""'], {}), '(" -> then use the \'mnote index create <name>\' command")\n', (1082, 1141), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((1150, 1160), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1158, 1160), False, 'import sys\n'), ((1407, 1418), 'time.time', 'time.time', ([], {}), '()\n', (1416, 1418), False, 'import time\n'), ((1474, 1485), 'time.time', 'time.time', ([], {}), '()\n', (1483, 1485), False, 'import time\n'), ((1599, 1611), 'click.echo', 'click.echo', ([], {}), '()\n', (1609, 1611), False, 'import click\n'), ((1861, 1872), 'mnotes.environment.echo_line', 'echo_line', ([], {}), '()\n', (1870, 1872), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((3209, 3220), 'mnotes.environment.echo_line', 'echo_line', ([], {}), '()\n', (3218, 3220), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((3461, 3503), 'os.path.join', 'os.path.join', (['env.cwd', 'f"""{name}-{now}.zip"""'], {}), "(env.cwd, f'{name}-{now}.zip')\n", (3473, 3503), False, 'import os\n'), ((5544, 5589), 'click.style', 'click.style', (['f"""Apply this change?"""'], {'bold': '(True)'}), "(f'Apply this change?', bold=True)\n", (5555, 5589), False, 'import click\n'), ((5707, 5747), 'mnotes.environment.save_global_index_data', 'save_global_index_data', (['env.global_index'], {}), '(env.global_index)\n', (5729, 5747), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((7066, 7078), 'click.echo', 'click.echo', ([], {}), '()\n', (7076, 7078), False, 'import click\n'), ((7087, 7336), 'mnotes.environment.echo_line', 'echo_line', (['"""Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter names are faster to type. Think of the index name as a nickname or an alias for the folder youare adding to the global directory."""'], {}), "(\n 'Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter names are faster to type. Think of the index name as a nickname or an alias for the folder youare adding to the global directory.'\n )\n", (7096, 7336), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((7528, 7540), 'click.echo', 'click.echo', ([], {}), '()\n', (7538, 7540), False, 'import click\n'), ((7549, 7798), 'mnotes.environment.echo_line', 'echo_line', (['"""Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter names are faster to type. Think of the index name as a nickname or an alias for the folder youare adding to the global directory."""'], {}), "(\n 'Index names may contain numbers, lowercase letters, and dashes only. Also consider that shorter names are faster to type. Think of the index name as a nickname or an alias for the folder youare adding to the global directory.'\n )\n", (7558, 7798), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((8857, 8902), 'click.style', 'click.style', (['f"""Apply this change?"""'], {'bold': '(True)'}), "(f'Apply this change?', bold=True)\n", (8868, 8902), False, 'import click\n'), ((9036, 9076), 'mnotes.environment.save_global_index_data', 'save_global_index_data', (['env.global_index'], {}), '(env.global_index)\n', (9058, 9076), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((1630, 1692), 'click.style', 'click.style', (['"""Current Indices in Global Directory:"""'], {'bold': '(True)'}), "('Current Indices in Global Directory:', bold=True)\n", (1641, 1692), False, 'import click\n'), ((3239, 3279), 'click.style', 'click.style', (['"""Zipping index """'], {'bold': '(True)'}), "('Zipping index ', bold=True)\n", (3250, 3279), False, 'import click\n'), ((3517, 3542), 'zipfile.ZipFile', 'ZipFile', (['output_name', '"""w"""'], {}), "(output_name, 'w')\n", (3524, 3542), False, 'from zipfile import ZipFile, ZIP_DEFLATED\n'), ((6811, 6854), 'mnotes.environment.echo_line', 'echo_line', (['f""" * {index.name}: {index.path}"""'], {}), "(f' * {index.name}: {index.path}')\n", (6820, 6854), False, 'from mnotes.environment import MnoteEnvironment, pass_env, echo_line, save_global_index_data\n'), ((8212, 8224), 'click.echo', 'click.echo', ([], {}), '()\n', (8222, 8224), False, 'import click\n'), ((3394, 3408), 'datetime.datetime.now', 'DateTime.now', ([], {}), '()\n', (3406, 3408), True, 'from datetime import datetime as DateTime\n'), ((3793, 3842), 'os.path.relpath', 'os.path.relpath', (['note.file_path'], {'start': 'index.path'}), '(note.file_path, start=index.path)\n', (3808, 3842), False, 'import os\n')] |
import numpy as np
import os
import torch
import torch.nn as nn
import pytorch_lightning as pl
from data.VOCdevkit.vocdata import VOCDataset
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur
from torchvision.transforms.functional import InterpolationMode
from skimage.measure import label
class EvaluateAttnMaps(pl.callbacks.Callback):
def __init__(self,
voc_root: str,
train_input_height: int,
attn_batch_size: int,
num_workers: int,
threshold: float = 0.6):
# Setup transforms and dataloader pvoc
image_transforms = Compose([Resize((train_input_height, train_input_height)),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
target_transforms = Compose([Resize((train_input_height, train_input_height),
interpolation=InterpolationMode.NEAREST),
ToTensor()])
self.dataset = VOCDataset(root=os.path.join(voc_root, "VOCSegmentation"), image_set="val",
transform=image_transforms, target_transform=target_transforms)
self.loader = DataLoader(self.dataset, batch_size=attn_batch_size, shuffle=False, num_workers=num_workers,
drop_last=True, pin_memory=True)
self.threshold = threshold
def on_validation_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Evaluate attention maps.
if pl_module.global_rank == 0 and pl_module.local_rank == 0:
print("\n" + "#" * 20 + "Evaluating attention maps on VOC2012 with threshold: " +
str(self.threshold) + "#" * 20)
jacs_merged_attn = 0
jacs_all_heads = 0
# If teacher is present use teacher attention as it is also used during training
if hasattr(pl_module, 'teacher'):
patch_size = pl_module.teacher.patch_size
model = pl_module.teacher
else:
patch_size = pl_module.model.patch_size
model = pl_module.model
model.eval()
for i, (imgs, maps) in enumerate(self.loader):
w_featmap = imgs.shape[-2] // patch_size
h_featmap = imgs.shape[-1] // patch_size
with torch.no_grad():
attentions = model.get_last_selfattention(imgs.to(pl_module.device))
bs = attentions.shape[0]
attentions = attentions[..., 0, 1:]
# Evaluate two different protocols: merged attention and best head
jacs_merged_attn += self.evaluate_merged_attentions(attentions, bs, w_featmap, h_featmap, patch_size,
maps)
jacs_all_heads += self.evaluate_best_head(attentions, bs, w_featmap, h_featmap, patch_size, maps)
jacs_merged_attn /= len(self.dataset)
jacs_all_heads /= len(self.dataset)
print(f"Merged Jaccard on VOC12: {jacs_merged_attn.item()}")
print(f"All heads Jaccard on VOC12: {jacs_all_heads.item()}")
pl_module.logger.experiment.log_metric('attn_jacs_voc', jacs_merged_attn.item())
pl_module.logger.experiment.log_metric('all_heads_jacs_voc', jacs_all_heads.item())
def evaluate_best_head(self, attentions: torch.Tensor, bs: int, w_featmap: int, h_featmap: int, patch_size: int,
maps: torch.Tensor) -> torch.Tensor:
jacs = 0
nh = attentions.shape[1] # number of heads
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=-1, keepdim=True)
cumval = torch.cumsum(val, dim=-1)
th_attn = cumval > (1 - self.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[:, head] = torch.gather(th_attn[:, head], dim=1, index=idx2[:, head])
th_attn = th_attn.reshape(bs, nh, w_featmap, h_featmap).float()
# interpolate
th_attn = nn.functional.interpolate(th_attn, scale_factor=patch_size, mode="nearest").cpu().numpy()
# Calculate IoU for each image
for k, map in enumerate(maps):
jac = 0
objects = np.unique(map)
objects = np.delete(objects, [0, -1])
for o in objects:
masko = map == o
intersection = masko * th_attn[k]
intersection = torch.sum(torch.sum(intersection, dim=-1), dim=-1)
union = (masko + th_attn[k]) > 0
union = torch.sum(torch.sum(union, dim=-1), dim=-1)
jaco = intersection / union
jac += max(jaco)
if len(objects) != 0:
jac /= len(objects)
jacs += jac
return jacs
def evaluate_merged_attentions(self, attentions: torch.Tensor, bs: int, w_featmap: int, h_featmap: int,
patch_size: int, maps: torch.Tensor) -> torch.Tensor:
jacs = 0
# Average attentions
attentions = sum(attentions[:, i] * 1 / attentions.size(1) for i in range(attentions.size(1)))
nh = 1 # number of heads is one as we merged all heads
# Gaussian blurring
attentions = GaussianBlur(7, sigma=(.6))(attentions.reshape(bs * nh, 1, w_featmap, h_featmap))\
.reshape(bs, nh, -1)
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=-1, keepdim=True)
cumval = torch.cumsum(val, dim=-1)
th_attn = cumval > (1 - self.threshold)
idx2 = torch.argsort(idx)
th_attn[:, 0] = torch.gather(th_attn[:, 0], dim=1, index=idx2[:, 0])
th_attn = th_attn.reshape(bs, nh, w_featmap, h_featmap).float()
# remove components that are less then 3 pixels
for j, th_att in enumerate(th_attn):
labelled = label(th_att.cpu().numpy())
for k in range(1, np.max(labelled) + 1):
mask = labelled == k
if np.sum(mask) <= 2:
th_attn[j, 0][mask] = 0
# interpolate
th_attn = nn.functional.interpolate(th_attn, scale_factor=patch_size, mode="nearest").cpu().numpy()
# Calculate IoU for each image
for k, map in enumerate(maps):
gt_fg_mask = (map != 0.).float()
intersection = gt_fg_mask * th_attn[k]
intersection = torch.sum(torch.sum(intersection, dim=-1), dim=-1)
union = (gt_fg_mask + th_attn[k]) > 0
union = torch.sum(torch.sum(union, dim=-1), dim=-1)
jacs += intersection / union
return jacs
| [
"numpy.delete",
"torch.gather",
"torch.utils.data.DataLoader",
"os.path.join",
"numpy.sum",
"torch.argsort",
"torchvision.transforms.ToTensor",
"torch.cumsum",
"numpy.max",
"torch.nn.functional.interpolate",
"torchvision.transforms.GaussianBlur",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.sum",
"torch.sort",
"numpy.unique",
"torchvision.transforms.Resize"
] | [((1354, 1483), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': 'attn_batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(self.dataset, batch_size=attn_batch_size, shuffle=False,\n num_workers=num_workers, drop_last=True, pin_memory=True)\n', (1364, 1483), False, 'from torch.utils.data import DataLoader\n'), ((3871, 3893), 'torch.sort', 'torch.sort', (['attentions'], {}), '(attentions)\n', (3881, 3893), False, 'import torch\n'), ((3909, 3945), 'torch.sum', 'torch.sum', (['val'], {'dim': '(-1)', 'keepdim': '(True)'}), '(val, dim=-1, keepdim=True)\n', (3918, 3945), False, 'import torch\n'), ((3963, 3988), 'torch.cumsum', 'torch.cumsum', (['val'], {'dim': '(-1)'}), '(val, dim=-1)\n', (3975, 3988), False, 'import torch\n'), ((4052, 4070), 'torch.argsort', 'torch.argsort', (['idx'], {}), '(idx)\n', (4065, 4070), False, 'import torch\n'), ((5736, 5758), 'torch.sort', 'torch.sort', (['attentions'], {}), '(attentions)\n', (5746, 5758), False, 'import torch\n'), ((5774, 5810), 'torch.sum', 'torch.sum', (['val'], {'dim': '(-1)', 'keepdim': '(True)'}), '(val, dim=-1, keepdim=True)\n', (5783, 5810), False, 'import torch\n'), ((5828, 5853), 'torch.cumsum', 'torch.cumsum', (['val'], {'dim': '(-1)'}), '(val, dim=-1)\n', (5840, 5853), False, 'import torch\n'), ((5917, 5935), 'torch.argsort', 'torch.argsort', (['idx'], {}), '(idx)\n', (5930, 5935), False, 'import torch\n'), ((5960, 6012), 'torch.gather', 'torch.gather', (['th_attn[:, 0]'], {'dim': '(1)', 'index': 'idx2[:, 0]'}), '(th_attn[:, 0], dim=1, index=idx2[:, 0])\n', (5972, 6012), False, 'import torch\n'), ((4133, 4191), 'torch.gather', 'torch.gather', (['th_attn[:, head]'], {'dim': '(1)', 'index': 'idx2[:, head]'}), '(th_attn[:, head], dim=1, index=idx2[:, head])\n', (4145, 4191), False, 'import torch\n'), ((4515, 4529), 'numpy.unique', 'np.unique', (['map'], {}), '(map)\n', (4524, 4529), True, 'import numpy as np\n'), ((4552, 4579), 'numpy.delete', 'np.delete', (['objects', '[0, -1]'], {}), '(objects, [0, -1])\n', (4561, 4579), True, 'import numpy as np\n'), ((712, 760), 'torchvision.transforms.Resize', 'Resize', (['(train_input_height, train_input_height)'], {}), '((train_input_height, train_input_height))\n', (718, 760), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((798, 808), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (806, 808), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((846, 910), 'torchvision.transforms.Normalize', 'Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (855, 910), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((950, 1044), 'torchvision.transforms.Resize', 'Resize', (['(train_input_height, train_input_height)'], {'interpolation': 'InterpolationMode.NEAREST'}), '((train_input_height, train_input_height), interpolation=\n InterpolationMode.NEAREST)\n', (956, 1044), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((1122, 1132), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1130, 1132), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((1174, 1215), 'os.path.join', 'os.path.join', (['voc_root', '"""VOCSegmentation"""'], {}), "(voc_root, 'VOCSegmentation')\n", (1186, 1215), False, 'import os\n'), ((6753, 6784), 'torch.sum', 'torch.sum', (['intersection'], {'dim': '(-1)'}), '(intersection, dim=-1)\n', (6762, 6784), False, 'import torch\n'), ((6874, 6898), 'torch.sum', 'torch.sum', (['union'], {'dim': '(-1)'}), '(union, dim=-1)\n', (6883, 6898), False, 'import torch\n'), ((2522, 2537), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2535, 2537), False, 'import torch\n'), ((4734, 4765), 'torch.sum', 'torch.sum', (['intersection'], {'dim': '(-1)'}), '(intersection, dim=-1)\n', (4743, 4765), False, 'import torch\n'), ((4858, 4882), 'torch.sum', 'torch.sum', (['union'], {'dim': '(-1)'}), '(union, dim=-1)\n', (4867, 4882), False, 'import torch\n'), ((5544, 5570), 'torchvision.transforms.GaussianBlur', 'GaussianBlur', (['(7)'], {'sigma': '(0.6)'}), '(7, sigma=0.6)\n', (5556, 5570), False, 'from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur\n'), ((6268, 6284), 'numpy.max', 'np.max', (['labelled'], {}), '(labelled)\n', (6274, 6284), True, 'import numpy as np\n'), ((6347, 6359), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (6353, 6359), True, 'import numpy as np\n'), ((4304, 4379), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['th_attn'], {'scale_factor': 'patch_size', 'mode': '"""nearest"""'}), "(th_attn, scale_factor=patch_size, mode='nearest')\n", (4329, 4379), True, 'import torch.nn as nn\n'), ((6451, 6526), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['th_attn'], {'scale_factor': 'patch_size', 'mode': '"""nearest"""'}), "(th_attn, scale_factor=patch_size, mode='nearest')\n", (6476, 6526), True, 'import torch.nn as nn\n')] |
import socket, threading
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "127.0.0.1"
port = 9090
server.bind((host, port))
server.listen(5)
clients = list()
end = list()
def get():
while True:
client, addr = server.accept()
clients.append(client)
print(f'сервер подключен через {addr}: количество клиентов: {len (clients)}', end = '\n')
def recv_data(client):
while True:
try:
indata = client.recv(1024)
except Exception:
clients.remove(client)
end.remove(client)
print( f'Сервер отключен: количество клиентов: {len (clients)}', end = '\n')
break
print(indata.decode('utf-8'))
for i in clients:
if i != client:
i.send(indata)
def send_mes():
while True:
print('')
outdata = input('')
print()
for client in clients:
client.send(f"Сервер: {outdata}".encode('utf-8)'))
def get_mes():
while True:
for i in clients:
if i in end:
continue
index = threading.Thread(target=recv_data, args=(i,))
index.start()
end.append(i)
t1 = threading.Thread(target=send_mes, name='input')
t1.start()
t2 = threading.Thread(target=get_mes, name='out')
t2.start()
t3 = threading.Thread(target=get(), name='get')
t3.start()
t2.join()
for i in clients:
i.close() | [
"threading.Thread",
"socket.socket"
] | [((35, 84), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (48, 84), False, 'import socket, threading\n'), ((1215, 1262), 'threading.Thread', 'threading.Thread', ([], {'target': 'send_mes', 'name': '"""input"""'}), "(target=send_mes, name='input')\n", (1231, 1262), False, 'import socket, threading\n'), ((1279, 1323), 'threading.Thread', 'threading.Thread', ([], {'target': 'get_mes', 'name': '"""out"""'}), "(target=get_mes, name='out')\n", (1295, 1323), False, 'import socket, threading\n'), ((1111, 1156), 'threading.Thread', 'threading.Thread', ([], {'target': 'recv_data', 'args': '(i,)'}), '(target=recv_data, args=(i,))\n', (1127, 1156), False, 'import socket, threading\n')] |
# Autoencoder using convolutional layers
# Dataset : MNIST
# Requires : PIL, matplotlib
# Inspired by https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# To compress data : net.encode(data)
# To decompress data : net.decode(data)
# To mutate data : net(data)
import os
import numpy as np
import matplotlib.pyplot as plt
import torch as T
from torch import nn
from torch import cuda
import torch.nn.functional as F
from torchvision import transforms
import torchvision
from torchvision.datasets import MNIST
from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d
import PIL.Image as im
from utils import dataset_dir, models_dir
# Displays an image (1 dim tensor)
# t has values in [0, 1]
def imshow(t):
transforms.ToPILImage()(t).show()
# Show in matplotlib
def gridshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class Net(nn.Module):
def __init__(self, hidden_size, latent_size):
super().__init__()
self.latent_size = latent_size
self.encodeConv1 = Conv2d(1, 16, 4)
self.encodeConv2 = Conv2d(16, 32, 2)
self.encodeFC1 = Linear(800, hidden_size)
self.encodeFC2 = Linear(hidden_size, self.latent_size)
self.decodeFC1 = Linear(self.latent_size, 13 * 13)
self.decodeConv1 = ConvTranspose2d(1, 1, 2)
self.decodeFC2 = Linear(14 * 14, 28 * 28)
def encode(self, x):
x = MaxPool2d(2)(F.relu(self.encodeConv1(x)))
x = MaxPool2d(2)(F.relu(self.encodeConv2(x)))
x = x.view(-1, 800)
x = F.relu(self.encodeFC1(x))
x = T.sigmoid(self.encodeFC2(x))
return x
def decode(self, x):
x = F.relu(self.decodeFC1(x))
x = x.view(-1, 1, 13, 13)
x = F.relu(self.decodeConv1(x))
x = x.view(-1, 14 * 14)
x = T.sigmoid(self.decodeFC2(x))
x = x.view(-1, 1, 28, 28)
return x
def forward(self, x):
return self.decode(self.encode(x))
# Hyper params
latent_size = 10
hidden_size = 256
epochs = 3
batch_size = 10
learning_rate = .0002
train_or_test = 'test'
path = models_dir + '/deep_autoencoder'
# Training device
device = T.device('cuda:0' if cuda.is_available() else 'cpu')
# Dataset
trans = transforms.ToTensor()
dataset = MNIST(root=dataset_dir, train=True, download=True, transform=trans)
loader = T.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# Model
net = Net(hidden_size, latent_size)
net.to(device)
if train_or_test == 'train':
# Load
if os.path.exists(path):
net.load_state_dict(T.load(path))
print('Model loaded')
# Train
optim = T.optim.Adam(net.parameters(), lr=learning_rate, betas=(.9, .999))
criterion = nn.MSELoss()
for e in range(epochs):
avg_loss = 0
for i, data in enumerate(loader, 0):
# Only inputs (no labels)
inputs, _ = data
# Zero the parameter gradients
optim.zero_grad()
# Predictions
x = inputs.to(device)
y = net(x)
# Back prop
loss = criterion(y, x)
loss.backward()
optim.step()
avg_loss += loss.item()
# Stats
print_freq = 100
if i % print_freq == print_freq - 1:
print(f'Epoch {e + 1:2d}, Batch {i + 1:5d}, Loss {avg_loss / print_freq:.3f}')
avg_loss = 0.0
# Save
T.save(net.state_dict(), path)
print('Model trained and saved')
else:
# Load
net.load_state_dict(T.load(path))
# Test
dataiter = iter(loader)
images, _ = dataiter.next()
# Show ground truth
gridshow(torchvision.utils.make_grid(images))
# Show predictions
with T.no_grad():
preds = T.cat([net(images[i].view(1, 1, 28, 28).to(device)).view(1, 1, 28, 28).cpu() for i in range(batch_size)])
preds = T.tensor(preds)
gridshow(torchvision.utils.make_grid(preds))
| [
"torch.nn.MSELoss",
"matplotlib.pyplot.show",
"torch.nn.ConvTranspose2d",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.Conv2d",
"os.path.exists",
"numpy.transpose",
"torchvision.transforms.ToPILImage",
"torchvision.utils.make_grid",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.no_grad",
"torchvision.datasets.MNIST",
"torch.tensor",
"torchvision.transforms.ToTensor"
] | [((2274, 2295), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2293, 2295), False, 'from torchvision import transforms\n'), ((2306, 2373), 'torchvision.datasets.MNIST', 'MNIST', ([], {'root': 'dataset_dir', 'train': '(True)', 'download': '(True)', 'transform': 'trans'}), '(root=dataset_dir, train=True, download=True, transform=trans)\n', (2311, 2373), False, 'from torchvision.datasets import MNIST\n'), ((2383, 2471), 'torch.utils.data.DataLoader', 'T.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset, batch_size=batch_size, shuffle=True,\n num_workers=0)\n', (2406, 2471), True, 'import torch as T\n'), ((901, 911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (909, 911), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2596), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2590, 2596), False, 'import os\n'), ((2778, 2790), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2788, 2790), False, 'from torch import nn\n'), ((3978, 3993), 'torch.tensor', 'T.tensor', (['preds'], {}), '(preds)\n', (3986, 3993), True, 'import torch as T\n'), ((865, 895), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (877, 895), True, 'import numpy as np\n'), ((1081, 1097), 'torch.nn.Conv2d', 'Conv2d', (['(1)', '(16)', '(4)'], {}), '(1, 16, 4)\n', (1087, 1097), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1125, 1142), 'torch.nn.Conv2d', 'Conv2d', (['(16)', '(32)', '(2)'], {}), '(16, 32, 2)\n', (1131, 1142), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1168, 1192), 'torch.nn.Linear', 'Linear', (['(800)', 'hidden_size'], {}), '(800, hidden_size)\n', (1174, 1192), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1218, 1255), 'torch.nn.Linear', 'Linear', (['hidden_size', 'self.latent_size'], {}), '(hidden_size, self.latent_size)\n', (1224, 1255), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1282, 1315), 'torch.nn.Linear', 'Linear', (['self.latent_size', '(13 * 13)'], {}), '(self.latent_size, 13 * 13)\n', (1288, 1315), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1343, 1367), 'torch.nn.ConvTranspose2d', 'ConvTranspose2d', (['(1)', '(1)', '(2)'], {}), '(1, 1, 2)\n', (1358, 1367), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1393, 1417), 'torch.nn.Linear', 'Linear', (['(14 * 14)', '(28 * 28)'], {}), '(14 * 14, 28 * 28)\n', (1399, 1417), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((2223, 2242), 'torch.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (2240, 2242), False, 'from torch import cuda\n'), ((3637, 3649), 'torch.load', 'T.load', (['path'], {}), '(path)\n', (3643, 3649), True, 'import torch as T\n'), ((3761, 3796), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {}), '(images)\n', (3788, 3796), False, 'import torchvision\n'), ((3831, 3842), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (3840, 3842), True, 'import torch as T\n'), ((4007, 4041), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['preds'], {}), '(preds)\n', (4034, 4041), False, 'import torchvision\n'), ((1456, 1468), 'torch.nn.MaxPool2d', 'MaxPool2d', (['(2)'], {}), '(2)\n', (1465, 1468), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((1510, 1522), 'torch.nn.MaxPool2d', 'MaxPool2d', (['(2)'], {}), '(2)\n', (1519, 1522), False, 'from torch.nn import ReLU, Linear, Sigmoid, Conv2d, ConvTranspose2d, MaxPool2d\n'), ((2626, 2638), 'torch.load', 'T.load', (['path'], {}), '(path)\n', (2632, 2638), True, 'import torch as T\n'), ((750, 773), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (771, 773), False, 'from torchvision import transforms\n')] |
# Monocyte - Search and Destroy unwanted AWS Resources relentlessly.
# Copyright 2015 Immobilien Scout GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from boto import ec2
from boto.exception import EC2ResponseError
from monocyte.handler import Resource, Handler
class Instance(Handler):
VALID_TARGET_STATES = ["terminated", "shutting-down"]
def fetch_region_names(self):
return [region.name for region in ec2.regions()]
def fetch_unwanted_resources(self):
for region_name in self.region_names:
connection = ec2.connect_to_region(region_name)
resources = connection.get_only_instances() or []
for resource in resources:
resource_wrapper = Resource(resource=resource,
resource_type=self.resource_type,
resource_id=resource.id,
creation_date=resource.launch_time,
region=region_name)
if resource.id in self.ignored_resources:
self.logger.info('IGNORE ' + self.to_string(resource_wrapper))
continue
yield resource_wrapper
def to_string(self, resource):
return "ec2 instance found in {region.name}, " \
"with identifier {id}, instance type is {instance_type}, created {launch_time}, " \
"dnsname is {public_dns_name}, key {key_name}, with state {_state}".format(**vars(resource.wrapped))
def delete(self, resource):
if resource.wrapped.state in Instance.VALID_TARGET_STATES:
raise Warning("state '{0}' is a valid target state, skipping".format(
resource.wrapped.state))
connection = ec2.connect_to_region(resource.region)
if self.dry_run:
try:
connection.terminate_instances([resource.wrapped.id], dry_run=True)
except EC2ResponseError as exc:
if exc.status == 412: # Precondition Failed
raise Warning("Termination {message}".format(**vars(exc)))
raise
else:
instances = connection.terminate_instances([resource.wrapped.id], dry_run=False)
self.logger.info("Initiating shutdown sequence for {0}".format(instances))
return instances
class Volume(Handler):
def fetch_region_names(self):
return [region.name for region in ec2.regions()]
def fetch_unwanted_resources(self):
for region_name in self.region_names:
connection = ec2.connect_to_region(region_name)
resources = connection.get_all_volumes() or []
for resource in resources:
resource_wrapper = Resource(resource=resource,
resource_type=self.resource_type,
resource_id=resource.id,
creation_date=resource.create_time,
region=region_name)
if resource.id in self.ignored_resources:
self.logger.info('IGNORE ' + self.to_string(resource_wrapper))
continue
yield resource_wrapper
def to_string(self, resource):
return "ebs volume found in {region.name}, " \
"with identifier {id}, created {create_time}, " \
"with state {status}".format(**vars(resource.wrapped))
def delete(self, resource):
connection = ec2.connect_to_region(resource.region)
if self.dry_run:
try:
connection.delete_volume(resource.wrapped.id, dry_run=True)
except EC2ResponseError as exc:
if exc.status == 412: # Precondition Failed
warnings.warn(Warning("Termination {message}".format(**vars(exc))))
raise
else:
self.logger.info("Initiating deletion of EBS volume {0}".format(resource.wrapped.id))
connection.delete_volume(resource.wrapped.id, dry_run=False)
| [
"monocyte.handler.Resource",
"boto.ec2.regions",
"boto.ec2.connect_to_region"
] | [((2322, 2360), 'boto.ec2.connect_to_region', 'ec2.connect_to_region', (['resource.region'], {}), '(resource.region)\n', (2343, 2360), False, 'from boto import ec2\n'), ((4121, 4159), 'boto.ec2.connect_to_region', 'ec2.connect_to_region', (['resource.region'], {}), '(resource.region)\n', (4142, 4159), False, 'from boto import ec2\n'), ((1071, 1105), 'boto.ec2.connect_to_region', 'ec2.connect_to_region', (['region_name'], {}), '(region_name)\n', (1092, 1105), False, 'from boto import ec2\n'), ((3145, 3179), 'boto.ec2.connect_to_region', 'ec2.connect_to_region', (['region_name'], {}), '(region_name)\n', (3166, 3179), False, 'from boto import ec2\n'), ((944, 957), 'boto.ec2.regions', 'ec2.regions', ([], {}), '()\n', (955, 957), False, 'from boto import ec2\n'), ((1242, 1389), 'monocyte.handler.Resource', 'Resource', ([], {'resource': 'resource', 'resource_type': 'self.resource_type', 'resource_id': 'resource.id', 'creation_date': 'resource.launch_time', 'region': 'region_name'}), '(resource=resource, resource_type=self.resource_type, resource_id=\n resource.id, creation_date=resource.launch_time, region=region_name)\n', (1250, 1389), False, 'from monocyte.handler import Resource, Handler\n'), ((3018, 3031), 'boto.ec2.regions', 'ec2.regions', ([], {}), '()\n', (3029, 3031), False, 'from boto import ec2\n'), ((3313, 3460), 'monocyte.handler.Resource', 'Resource', ([], {'resource': 'resource', 'resource_type': 'self.resource_type', 'resource_id': 'resource.id', 'creation_date': 'resource.create_time', 'region': 'region_name'}), '(resource=resource, resource_type=self.resource_type, resource_id=\n resource.id, creation_date=resource.create_time, region=region_name)\n', (3321, 3460), False, 'from monocyte.handler import Resource, Handler\n')] |
from pathlib import Path
import requests
import string
import urllib
import random
import json
import sys
def acapyla(quote, voicename="willfromafar"):
try:
voiceid = "enu_" + voicename + "_22k_ns.bvcu"
except IndexError:
voiceid = "enu_willfromafar_22k_ns.bvcu"
letters = string.ascii_lowercase
premail = ''.join(random.choice(letters) for i in range(64))
email = premail + "@gmail.com"
noncerl = "https://acapelavoices.acapela-group.com/index/getnonce/"
noncedata = {'googleid':email}
noncer = requests.post(url = noncerl, data = noncedata)
nonce = noncer.text[10:50]
synthrl = "http://www.acapela-group.com:8080/webservices/1-34-01-Mobility/Synthesizer"
synthdata = "req_voice=" + voiceid + "&cl_pwd=&cl_vers=1-30&req_echo=ON&cl_login=AcapelaGroup&req_comment=%7B%22nonce%22%3A%22" + nonce + "%22%2C%22user%22%3A%22" + email + "%22%7D&req_text=" + quote + "&cl_env=ACAPELA_VOICES&prot_vers=2&cl_app=AcapelaGroup_WebDemo_Android"
headers = {'content-type': 'application/x-www-form-urlencoded'}
synthr = requests.post(url = synthrl, data = synthdata, headers = headers)
minuspre = synthr.text[synthr.text.find('http://'):]
minussuf = minuspre.split(".mp3", 1)[0]
synthresult = minussuf + ".mp3"
urllib.request.urlretrieve(synthresult, str(Path.home()) + "/.dominae/out/tts/" + email[:8] + ".mp3")
return email[:8] | [
"requests.post",
"random.choice",
"pathlib.Path.home"
] | [((545, 587), 'requests.post', 'requests.post', ([], {'url': 'noncerl', 'data': 'noncedata'}), '(url=noncerl, data=noncedata)\n', (558, 587), False, 'import requests\n'), ((1074, 1133), 'requests.post', 'requests.post', ([], {'url': 'synthrl', 'data': 'synthdata', 'headers': 'headers'}), '(url=synthrl, data=synthdata, headers=headers)\n', (1087, 1133), False, 'import requests\n'), ((347, 369), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (360, 369), False, 'import random\n'), ((1325, 1336), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1334, 1336), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropJobDefinitionAuthorizationConfig(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.AuthorizationConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html
Property Document:
- ``p_AccessPointId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-accesspointid
- ``p_Iam``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-iam
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.AuthorizationConfig"
p_AccessPointId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AccessPointId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-accesspointid"""
p_Iam: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Iam"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-authorizationconfig.html#cfn-batch-jobdefinition-authorizationconfig-iam"""
@attr.s
class PropJobDefinitionResourceRequirement(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.ResourceRequirement"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html
Property Document:
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-type
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-value
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.ResourceRequirement"
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-type"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-resourcerequirement.html#cfn-batch-jobdefinition-resourcerequirement-value"""
@attr.s
class PropJobDefinitionEnvironment(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Environment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-name
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-value
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Environment"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-name"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-environment.html#cfn-batch-jobdefinition-environment-value"""
@attr.s
class PropJobDefinitionVolumesHost(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.VolumesHost"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html
Property Document:
- ``p_SourcePath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html#cfn-batch-jobdefinition-volumeshost-sourcepath
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.VolumesHost"
p_SourcePath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SourcePath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumeshost.html#cfn-batch-jobdefinition-volumeshost-sourcepath"""
@attr.s
class PropJobQueueComputeEnvironmentOrder(Property):
"""
AWS Object Type = "AWS::Batch::JobQueue.ComputeEnvironmentOrder"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html
Property Document:
- ``rp_ComputeEnvironment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-computeenvironment
- ``rp_Order``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-order
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobQueue.ComputeEnvironmentOrder"
rp_ComputeEnvironment: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironment"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-computeenvironment"""
rp_Order: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Order"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobqueue-computeenvironmentorder.html#cfn-batch-jobqueue-computeenvironmentorder-order"""
@attr.s
class PropJobDefinitionSecret(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Secret"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-name
- ``rp_ValueFrom``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-valuefrom
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Secret"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-name"""
rp_ValueFrom: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ValueFrom"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-secret.html#cfn-batch-jobdefinition-secret-valuefrom"""
@attr.s
class PropJobDefinitionNetworkConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.NetworkConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html
Property Document:
- ``p_AssignPublicIp``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration-assignpublicip
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NetworkConfiguration"
p_AssignPublicIp: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AssignPublicIp"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-networkconfiguration.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration-assignpublicip"""
@attr.s
class PropJobDefinitionLogConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.LogConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html
Property Document:
- ``rp_LogDriver``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-logdriver
- ``p_Options``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-options
- ``p_SecretOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-secretoptions
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.LogConfiguration"
rp_LogDriver: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LogDriver"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-logdriver"""
p_Options: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Options"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-options"""
p_SecretOptions: typing.List[typing.Union['PropJobDefinitionSecret', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionSecret.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionSecret), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecretOptions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html#cfn-batch-jobdefinition-containerproperties-logconfiguration-secretoptions"""
@attr.s
class PropComputeEnvironmentLaunchTemplateSpecification(Property):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment.LaunchTemplateSpecification"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html
Property Document:
- ``p_LaunchTemplateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplateid
- ``p_LaunchTemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplatename
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-version
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.LaunchTemplateSpecification"
p_LaunchTemplateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplateid"""
p_LaunchTemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-launchtemplatename"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-launchtemplatespecification.html#cfn-batch-computeenvironment-launchtemplatespecification-version"""
@attr.s
class PropJobDefinitionMountPoints(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.MountPoints"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html
Property Document:
- ``p_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-containerpath
- ``p_ReadOnly``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-readonly
- ``p_SourceVolume``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-sourcevolume
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.MountPoints"
p_ContainerPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-containerpath"""
p_ReadOnly: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ReadOnly"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-readonly"""
p_SourceVolume: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SourceVolume"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-mountpoints.html#cfn-batch-jobdefinition-mountpoints-sourcevolume"""
@attr.s
class PropSchedulingPolicyShareAttributes(Property):
"""
AWS Object Type = "AWS::Batch::SchedulingPolicy.ShareAttributes"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html
Property Document:
- ``p_ShareIdentifier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-shareidentifier
- ``p_WeightFactor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-weightfactor
"""
AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy.ShareAttributes"
p_ShareIdentifier: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ShareIdentifier"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-shareidentifier"""
p_WeightFactor: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "WeightFactor"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-shareattributes.html#cfn-batch-schedulingpolicy-shareattributes-weightfactor"""
@attr.s
class PropJobDefinitionEvaluateOnExit(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.EvaluateOnExit"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html
Property Document:
- ``rp_Action``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-action
- ``p_OnExitCode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onexitcode
- ``p_OnReason``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onreason
- ``p_OnStatusReason``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onstatusreason
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.EvaluateOnExit"
rp_Action: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Action"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-action"""
p_OnExitCode: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OnExitCode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onexitcode"""
p_OnReason: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OnReason"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onreason"""
p_OnStatusReason: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OnStatusReason"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-evaluateonexit.html#cfn-batch-jobdefinition-evaluateonexit-onstatusreason"""
@attr.s
class PropJobDefinitionUlimit(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Ulimit"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html
Property Document:
- ``rp_HardLimit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-hardlimit
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-name
- ``rp_SoftLimit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-softlimit
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Ulimit"
rp_HardLimit: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "HardLimit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-hardlimit"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-name"""
rp_SoftLimit: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "SoftLimit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-ulimit.html#cfn-batch-jobdefinition-ulimit-softlimit"""
@attr.s
class PropJobDefinitionFargatePlatformConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.FargatePlatformConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html
Property Document:
- ``p_PlatformVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration-platformversion
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.FargatePlatformConfiguration"
p_PlatformVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-fargateplatformconfiguration.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration-platformversion"""
@attr.s
class PropJobDefinitionTimeout(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Timeout"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html
Property Document:
- ``p_AttemptDurationSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html#cfn-batch-jobdefinition-timeout-attemptdurationseconds
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Timeout"
p_AttemptDurationSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "AttemptDurationSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-timeout.html#cfn-batch-jobdefinition-timeout-attemptdurationseconds"""
@attr.s
class PropJobDefinitionTmpfs(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Tmpfs"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html
Property Document:
- ``rp_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-containerpath
- ``rp_Size``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-size
- ``p_MountOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-mountoptions
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Tmpfs"
rp_ContainerPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-containerpath"""
rp_Size: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Size"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-size"""
p_MountOptions: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "MountOptions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-tmpfs.html#cfn-batch-jobdefinition-tmpfs-mountoptions"""
@attr.s
class PropJobDefinitionEfsVolumeConfiguration(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.EfsVolumeConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html
Property Document:
- ``rp_FileSystemId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-filesystemid
- ``p_AuthorizationConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-authorizationconfig
- ``p_RootDirectory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-rootdirectory
- ``p_TransitEncryption``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryption
- ``p_TransitEncryptionPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryptionport
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.EfsVolumeConfiguration"
rp_FileSystemId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "FileSystemId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-filesystemid"""
p_AuthorizationConfig: typing.Union['PropJobDefinitionAuthorizationConfig', dict] = attr.ib(
default=None,
converter=PropJobDefinitionAuthorizationConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionAuthorizationConfig)),
metadata={AttrMeta.PROPERTY_NAME: "AuthorizationConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-authorizationconfig"""
p_RootDirectory: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RootDirectory"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-rootdirectory"""
p_TransitEncryption: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TransitEncryption"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryption"""
p_TransitEncryptionPort: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "TransitEncryptionPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-efsvolumeconfiguration.html#cfn-batch-jobdefinition-efsvolumeconfiguration-transitencryptionport"""
@attr.s
class PropJobDefinitionDevice(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Device"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html
Property Document:
- ``p_ContainerPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-containerpath
- ``p_HostPath``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-hostpath
- ``p_Permissions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-permissions
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Device"
p_ContainerPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-containerpath"""
p_HostPath: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "HostPath"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-hostpath"""
p_Permissions: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Permissions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-device.html#cfn-batch-jobdefinition-device-permissions"""
@attr.s
class PropComputeEnvironmentEc2ConfigurationObject(Property):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html
Property Document:
- ``rp_ImageType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imagetype
- ``p_ImageIdOverride``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imageidoverride
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject"
rp_ImageType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ImageType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imagetype"""
p_ImageIdOverride: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageIdOverride"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-ec2configurationobject.html#cfn-batch-computeenvironment-ec2configurationobject-imageidoverride"""
@attr.s
class PropJobDefinitionVolumes(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.Volumes"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html
Property Document:
- ``p_EfsVolumeConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-efsvolumeconfiguration
- ``p_Host``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-host
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-name
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.Volumes"
p_EfsVolumeConfiguration: typing.Union['PropJobDefinitionEfsVolumeConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionEfsVolumeConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionEfsVolumeConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "EfsVolumeConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-efsvolumeconfiguration"""
p_Host: typing.Union['PropJobDefinitionVolumesHost', dict] = attr.ib(
default=None,
converter=PropJobDefinitionVolumesHost.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionVolumesHost)),
metadata={AttrMeta.PROPERTY_NAME: "Host"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-host"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-volumes.html#cfn-batch-jobdefinition-volumes-name"""
@attr.s
class PropSchedulingPolicyFairsharePolicy(Property):
"""
AWS Object Type = "AWS::Batch::SchedulingPolicy.FairsharePolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html
Property Document:
- ``p_ComputeReservation``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-computereservation
- ``p_ShareDecaySeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedecayseconds
- ``p_ShareDistribution``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedistribution
"""
AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy.FairsharePolicy"
p_ComputeReservation: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeReservation"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-computereservation"""
p_ShareDecaySeconds: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "ShareDecaySeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedecayseconds"""
p_ShareDistribution: typing.List[typing.Union['PropSchedulingPolicyShareAttributes', dict]] = attr.ib(
default=None,
converter=PropSchedulingPolicyShareAttributes.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropSchedulingPolicyShareAttributes), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ShareDistribution"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-schedulingpolicy-fairsharepolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy-sharedistribution"""
@attr.s
class PropComputeEnvironmentComputeResources(Property):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment.ComputeResources"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html
Property Document:
- ``rp_MaxvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus
- ``rp_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type
- ``p_AllocationStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy
- ``p_BidPercentage``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage
- ``p_DesiredvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-desiredvcpus
- ``p_Ec2Configuration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration
- ``p_Ec2KeyPair``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair
- ``p_ImageId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid
- ``p_InstanceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole
- ``p_InstanceTypes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes
- ``p_LaunchTemplate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate
- ``p_MinvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus
- ``p_PlacementGroup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup
- ``p_SecurityGroupIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids
- ``p_SpotIamFleetRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-spotiamfleetrole
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment.ComputeResources"
rp_MaxvCpus: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MaxvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus"""
rp_Subnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Subnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets"""
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type"""
p_AllocationStrategy: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AllocationStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy"""
p_BidPercentage: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "BidPercentage"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage"""
p_DesiredvCpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DesiredvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-desiredvcpus"""
p_Ec2Configuration: typing.List[typing.Union['PropComputeEnvironmentEc2ConfigurationObject', dict]] = attr.ib(
default=None,
converter=PropComputeEnvironmentEc2ConfigurationObject.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropComputeEnvironmentEc2ConfigurationObject), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Ec2Configuration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration"""
p_Ec2KeyPair: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Ec2KeyPair"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair"""
p_ImageId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ImageId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid"""
p_InstanceRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole"""
p_InstanceTypes: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "InstanceTypes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes"""
p_LaunchTemplate: typing.Union['PropComputeEnvironmentLaunchTemplateSpecification', dict] = attr.ib(
default=None,
converter=PropComputeEnvironmentLaunchTemplateSpecification.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropComputeEnvironmentLaunchTemplateSpecification)),
metadata={AttrMeta.PROPERTY_NAME: "LaunchTemplate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate"""
p_MinvCpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MinvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus"""
p_PlacementGroup: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlacementGroup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup"""
p_SecurityGroupIds: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroupIds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids"""
p_SpotIamFleetRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SpotIamFleetRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-spotiamfleetrole"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags"""
@attr.s
class PropJobDefinitionRetryStrategy(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.RetryStrategy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html
Property Document:
- ``p_Attempts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-attempts
- ``p_EvaluateOnExit``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-evaluateonexit
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.RetryStrategy"
p_Attempts: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Attempts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-attempts"""
p_EvaluateOnExit: typing.List[typing.Union['PropJobDefinitionEvaluateOnExit', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionEvaluateOnExit.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionEvaluateOnExit), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "EvaluateOnExit"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-retrystrategy.html#cfn-batch-jobdefinition-retrystrategy-evaluateonexit"""
@attr.s
class PropJobDefinitionLinuxParameters(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.LinuxParameters"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html
Property Document:
- ``p_Devices``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-devices
- ``p_InitProcessEnabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-initprocessenabled
- ``p_MaxSwap``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-maxswap
- ``p_SharedMemorySize``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-sharedmemorysize
- ``p_Swappiness``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-swappiness
- ``p_Tmpfs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-tmpfs
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.LinuxParameters"
p_Devices: typing.List[typing.Union['PropJobDefinitionDevice', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionDevice.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionDevice), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Devices"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-devices"""
p_InitProcessEnabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "InitProcessEnabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-initprocessenabled"""
p_MaxSwap: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxSwap"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-maxswap"""
p_SharedMemorySize: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SharedMemorySize"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-sharedmemorysize"""
p_Swappiness: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Swappiness"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-swappiness"""
p_Tmpfs: typing.List[typing.Union['PropJobDefinitionTmpfs', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionTmpfs.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionTmpfs), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tmpfs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-linuxparameters.html#cfn-batch-jobdefinition-containerproperties-linuxparameters-tmpfs"""
@attr.s
class PropJobDefinitionContainerProperties(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.ContainerProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html
Property Document:
- ``rp_Image``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-image
- ``p_Command``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-command
- ``p_Environment``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-environment
- ``p_ExecutionRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-executionrolearn
- ``p_FargatePlatformConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration
- ``p_InstanceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-instancetype
- ``p_JobRoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-jobrolearn
- ``p_LinuxParameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-linuxparameters
- ``p_LogConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-logconfiguration
- ``p_Memory``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-memory
- ``p_MountPoints``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-mountpoints
- ``p_NetworkConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration
- ``p_Privileged``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged
- ``p_ReadonlyRootFilesystem``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-readonlyrootfilesystem
- ``p_ResourceRequirements``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-resourcerequirements
- ``p_Secrets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-secrets
- ``p_Ulimits``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-ulimits
- ``p_User``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-user
- ``p_Vcpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-vcpus
- ``p_Volumes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-volumes
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.ContainerProperties"
rp_Image: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Image"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-image"""
p_Command: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Command"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-command"""
p_Environment: typing.List[typing.Union['PropJobDefinitionEnvironment', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionEnvironment.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionEnvironment), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Environment"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-environment"""
p_ExecutionRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ExecutionRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-executionrolearn"""
p_FargatePlatformConfiguration: typing.Union['PropJobDefinitionFargatePlatformConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionFargatePlatformConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionFargatePlatformConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "FargatePlatformConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-fargateplatformconfiguration"""
p_InstanceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-instancetype"""
p_JobRoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "JobRoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-jobrolearn"""
p_LinuxParameters: typing.Union['PropJobDefinitionLinuxParameters', dict] = attr.ib(
default=None,
converter=PropJobDefinitionLinuxParameters.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionLinuxParameters)),
metadata={AttrMeta.PROPERTY_NAME: "LinuxParameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-linuxparameters"""
p_LogConfiguration: typing.Union['PropJobDefinitionLogConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionLogConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionLogConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "LogConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-logconfiguration"""
p_Memory: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Memory"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-memory"""
p_MountPoints: typing.List[typing.Union['PropJobDefinitionMountPoints', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionMountPoints.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionMountPoints), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "MountPoints"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-mountpoints"""
p_NetworkConfiguration: typing.Union['PropJobDefinitionNetworkConfiguration', dict] = attr.ib(
default=None,
converter=PropJobDefinitionNetworkConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionNetworkConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "NetworkConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-networkconfiguration"""
p_Privileged: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Privileged"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged"""
p_ReadonlyRootFilesystem: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ReadonlyRootFilesystem"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-readonlyrootfilesystem"""
p_ResourceRequirements: typing.List[typing.Union['PropJobDefinitionResourceRequirement', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionResourceRequirement.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionResourceRequirement), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ResourceRequirements"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-resourcerequirements"""
p_Secrets: typing.List[typing.Union['PropJobDefinitionSecret', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionSecret.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionSecret), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Secrets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-secrets"""
p_Ulimits: typing.List[typing.Union['PropJobDefinitionUlimit', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionUlimit.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionUlimit), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Ulimits"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-ulimits"""
p_User: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "User"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-user"""
p_Vcpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Vcpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-vcpus"""
p_Volumes: typing.List[typing.Union['PropJobDefinitionVolumes', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionVolumes.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionVolumes), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Volumes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-volumes"""
@attr.s
class PropJobDefinitionNodeRangeProperty(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.NodeRangeProperty"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html
Property Document:
- ``rp_TargetNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-targetnodes
- ``p_Container``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-container
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NodeRangeProperty"
rp_TargetNodes: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TargetNodes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-targetnodes"""
p_Container: typing.Union['PropJobDefinitionContainerProperties', dict] = attr.ib(
default=None,
converter=PropJobDefinitionContainerProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionContainerProperties)),
metadata={AttrMeta.PROPERTY_NAME: "Container"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-noderangeproperty.html#cfn-batch-jobdefinition-noderangeproperty-container"""
@attr.s
class PropJobDefinitionNodeProperties(Property):
"""
AWS Object Type = "AWS::Batch::JobDefinition.NodeProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html
Property Document:
- ``rp_MainNode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-mainnode
- ``rp_NodeRangeProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-noderangeproperties
- ``rp_NumNodes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-numnodes
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition.NodeProperties"
rp_MainNode: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "MainNode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-mainnode"""
rp_NodeRangeProperties: typing.List[typing.Union['PropJobDefinitionNodeRangeProperty', dict]] = attr.ib(
default=None,
converter=PropJobDefinitionNodeRangeProperty.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobDefinitionNodeRangeProperty), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "NodeRangeProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-noderangeproperties"""
rp_NumNodes: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "NumNodes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-nodeproperties.html#cfn-batch-jobdefinition-nodeproperties-numnodes"""
#--- Resource declaration ---
@attr.s
class JobQueue(Resource):
"""
AWS Object Type = "AWS::Batch::JobQueue"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html
Property Document:
- ``rp_ComputeEnvironmentOrder``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-computeenvironmentorder
- ``rp_Priority``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-priority
- ``p_JobQueueName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-jobqueuename
- ``p_SchedulingPolicyArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-schedulingpolicyarn
- ``p_State``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-state
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobQueue"
rp_ComputeEnvironmentOrder: typing.List[typing.Union['PropJobQueueComputeEnvironmentOrder', dict]] = attr.ib(
default=None,
converter=PropJobQueueComputeEnvironmentOrder.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropJobQueueComputeEnvironmentOrder), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironmentOrder"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-computeenvironmentorder"""
rp_Priority: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "Priority"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-priority"""
p_JobQueueName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "JobQueueName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-jobqueuename"""
p_SchedulingPolicyArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SchedulingPolicyArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-schedulingpolicyarn"""
p_State: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "State"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-state"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html#cfn-batch-jobqueue-tags"""
@attr.s
class JobDefinition(Resource):
"""
AWS Object Type = "AWS::Batch::JobDefinition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html
Property Document:
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-type
- ``p_ContainerProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-containerproperties
- ``p_JobDefinitionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-jobdefinitionname
- ``p_NodeProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-nodeproperties
- ``p_Parameters``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-parameters
- ``p_PlatformCapabilities``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-platformcapabilities
- ``p_PropagateTags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-propagatetags
- ``p_RetryStrategy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-retrystrategy
- ``p_SchedulingPriority``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-schedulingpriority
- ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-timeout
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::JobDefinition"
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-type"""
p_ContainerProperties: typing.Union['PropJobDefinitionContainerProperties', dict] = attr.ib(
default=None,
converter=PropJobDefinitionContainerProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionContainerProperties)),
metadata={AttrMeta.PROPERTY_NAME: "ContainerProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-containerproperties"""
p_JobDefinitionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "JobDefinitionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-jobdefinitionname"""
p_NodeProperties: typing.Union['PropJobDefinitionNodeProperties', dict] = attr.ib(
default=None,
converter=PropJobDefinitionNodeProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionNodeProperties)),
metadata={AttrMeta.PROPERTY_NAME: "NodeProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-nodeproperties"""
p_Parameters: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Parameters"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-parameters"""
p_PlatformCapabilities: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PlatformCapabilities"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-platformcapabilities"""
p_PropagateTags: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "PropagateTags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-propagatetags"""
p_RetryStrategy: typing.Union['PropJobDefinitionRetryStrategy', dict] = attr.ib(
default=None,
converter=PropJobDefinitionRetryStrategy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionRetryStrategy)),
metadata={AttrMeta.PROPERTY_NAME: "RetryStrategy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-retrystrategy"""
p_SchedulingPriority: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SchedulingPriority"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-schedulingpriority"""
p_Timeout: typing.Union['PropJobDefinitionTimeout', dict] = attr.ib(
default=None,
converter=PropJobDefinitionTimeout.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropJobDefinitionTimeout)),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-timeout"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html#cfn-batch-jobdefinition-tags"""
@attr.s
class SchedulingPolicy(Resource):
"""
AWS Object Type = "AWS::Batch::SchedulingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html
Property Document:
- ``p_FairsharePolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-name
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::SchedulingPolicy"
p_FairsharePolicy: typing.Union['PropSchedulingPolicyFairsharePolicy', dict] = attr.ib(
default=None,
converter=PropSchedulingPolicyFairsharePolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropSchedulingPolicyFairsharePolicy)),
metadata={AttrMeta.PROPERTY_NAME: "FairsharePolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-fairsharepolicy"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-name"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#cfn-batch-schedulingpolicy-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-schedulingpolicy.html#aws-resource-batch-schedulingpolicy-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class ComputeEnvironment(Resource):
"""
AWS Object Type = "AWS::Batch::ComputeEnvironment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html
Property Document:
- ``rp_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-type
- ``p_ComputeEnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeenvironmentname
- ``p_ComputeResources``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeresources
- ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-servicerole
- ``p_State``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-state
- ``p_UnmanagedvCpus``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-unmanagedvcpus
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-tags
"""
AWS_OBJECT_TYPE = "AWS::Batch::ComputeEnvironment"
rp_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-type"""
p_ComputeEnvironmentName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeEnvironmentName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeenvironmentname"""
p_ComputeResources: typing.Union['PropComputeEnvironmentComputeResources', dict] = attr.ib(
default=None,
converter=PropComputeEnvironmentComputeResources.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropComputeEnvironmentComputeResources)),
metadata={AttrMeta.PROPERTY_NAME: "ComputeResources"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-computeresources"""
p_ServiceRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ServiceRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-servicerole"""
p_State: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "State"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-state"""
p_UnmanagedvCpus: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "UnmanagedvCpus"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-unmanagedvcpus"""
p_Tags: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html#cfn-batch-computeenvironment-tags"""
| [
"attr.validators.instance_of"
] | [((6867, 6924), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (6894, 6924), False, 'import attr\n'), ((7264, 7296), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (7291, 7296), False, 'import attr\n'), ((8313, 8370), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (8340, 8370), False, 'import attr\n'), ((8681, 8738), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (8708, 8738), False, 'import attr\n'), ((11330, 11387), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (11357, 11387), False, 'import attr\n'), ((20333, 20390), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (20360, 20390), False, 'import attr\n'), ((22833, 22865), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (22860, 22865), False, 'import attr\n'), ((23181, 23238), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (23208, 23238), False, 'import attr\n'), ((23530, 23562), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (23557, 23562), False, 'import attr\n'), ((26930, 26987), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (26957, 26987), False, 'import attr\n'), ((27290, 27322), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (27317, 27322), False, 'import attr\n'), ((29670, 29727), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (29697, 29727), False, 'import attr\n'), ((35083, 35140), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (35110, 35140), False, 'import attr\n'), ((44730, 44762), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (44757, 44762), False, 'import attr\n'), ((45623, 45680), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (45650, 45680), False, 'import attr\n'), ((63736, 63793), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (63763, 63793), False, 'import attr\n'), ((74840, 74897), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (74867, 74897), False, 'import attr\n'), ((76724, 76756), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (76751, 76756), False, 'import attr\n'), ((77714, 77746), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (77741, 77746), False, 'import attr\n'), ((79922, 79954), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (79949, 79954), False, 'import attr\n'), ((83849, 83906), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (83876, 83906), False, 'import attr\n'), ((92531, 92588), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (92558, 92588), False, 'import attr\n'), ((1141, 1198), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (1168, 1198), False, 'import attr\n'), ((1572, 1629), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (1599, 1629), False, 'import attr\n'), ((2763, 2820), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (2790, 2820), False, 'import attr\n'), ((3178, 3235), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (3205, 3235), False, 'import attr\n'), ((4309, 4366), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (4336, 4366), False, 'import attr\n'), ((4708, 4765), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (4735, 4765), False, 'import attr\n'), ((5666, 5723), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (5693, 5723), False, 'import attr\n'), ((9762, 9819), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (9789, 9819), False, 'import attr\n'), ((11772, 11805), 'attr.validators.instance_of', 'attr.validators.instance_of', (['dict'], {}), '(dict)\n', (11799, 11805), False, 'import attr\n'), ((14029, 14086), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (14056, 14086), False, 'import attr\n'), ((14507, 14564), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (14534, 14564), False, 'import attr\n'), ((14978, 15035), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (15005, 15035), False, 'import attr\n'), ((16361, 16418), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (16388, 16418), False, 'import attr\n'), ((16763, 16796), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (16790, 16796), False, 'import attr\n'), ((17153, 17210), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (17180, 17210), False, 'import attr\n'), ((18385, 18442), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (18412, 18442), False, 'import attr\n'), ((18810, 18844), 'attr.validators.instance_of', 'attr.validators.instance_of', (['float'], {}), '(float)\n', (18837, 18844), False, 'import attr\n'), ((20746, 20803), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (20773, 20803), False, 'import attr\n'), ((21166, 21223), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (21193, 21223), False, 'import attr\n'), ((21588, 21645), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (21615, 21645), False, 'import attr\n'), ((24637, 24694), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (24664, 24694), False, 'import attr\n'), ((25682, 25714), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (25709, 25714), False, 'import attr\n'), ((30222, 30287), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionAuthorizationConfig'], {}), '(PropJobDefinitionAuthorizationConfig)\n', (30249, 30287), False, 'import attr\n'), ((30689, 30746), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (30716, 30746), False, 'import attr\n'), ((31140, 31197), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (31167, 31197), False, 'import attr\n'), ((31584, 31616), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (31611, 31616), False, 'import attr\n'), ((32898, 32955), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (32925, 32955), False, 'import attr\n'), ((33308, 33365), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (33335, 33365), False, 'import attr\n'), ((35533, 35590), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (35560, 35590), False, 'import attr\n'), ((36993, 37061), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionEfsVolumeConfiguration'], {}), '(PropJobDefinitionEfsVolumeConfiguration)\n', (37020, 37061), False, 'import attr\n'), ((37516, 37573), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionVolumesHost'], {}), '(PropJobDefinitionVolumesHost)\n', (37543, 37573), False, 'import attr\n'), ((37906, 37963), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (37933, 37963), False, 'import attr\n'), ((39329, 39363), 'attr.validators.instance_of', 'attr.validators.instance_of', (['float'], {}), '(float)\n', (39356, 39363), False, 'import attr\n'), ((39742, 39776), 'attr.validators.instance_of', 'attr.validators.instance_of', (['float'], {}), '(float)\n', (39769, 39776), False, 'import attr\n'), ((46054, 46111), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (46081, 46111), False, 'import attr\n'), ((46490, 46522), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (46517, 46522), False, 'import attr\n'), ((46890, 46922), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (46917, 46922), False, 'import attr\n'), ((48008, 48065), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (48035, 48065), False, 'import attr\n'), ((48441, 48498), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (48468, 48498), False, 'import attr\n'), ((48873, 48930), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (48900, 48930), False, 'import attr\n'), ((50005, 50083), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropComputeEnvironmentLaunchTemplateSpecification'], {}), '(PropComputeEnvironmentLaunchTemplateSpecification)\n', (50032, 50083), False, 'import attr\n'), ((50449, 50481), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (50476, 50481), False, 'import attr\n'), ((50860, 50917), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (50887, 50917), False, 'import attr\n'), ((51879, 51936), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (51906, 51936), False, 'import attr\n'), ((52303, 52336), 'attr.validators.instance_of', 'attr.validators.instance_of', (['dict'], {}), '(dict)\n', (52330, 52336), False, 'import attr\n'), ((53439, 53471), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (53466, 53471), False, 'import attr\n'), ((56910, 56943), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (56937, 56943), False, 'import attr\n'), ((57344, 57376), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (57371, 57376), False, 'import attr\n'), ((57764, 57796), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (57791, 57796), False, 'import attr\n'), ((58196, 58228), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (58223, 58228), False, 'import attr\n'), ((65337, 65394), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (65364, 65394), False, 'import attr\n'), ((65919, 65993), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionFargatePlatformConfiguration'], {}), '(PropJobDefinitionFargatePlatformConfiguration)\n', (65946, 65993), False, 'import attr\n'), ((66406, 66463), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (66433, 66463), False, 'import attr\n'), ((66842, 66899), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (66869, 66899), False, 'import attr\n'), ((67373, 67434), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionLinuxParameters'], {}), '(PropJobDefinitionLinuxParameters)\n', (67400, 67434), False, 'import attr\n'), ((67921, 67983), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionLogConfiguration'], {}), '(PropJobDefinitionLogConfiguration)\n', (67948, 67983), False, 'import attr\n'), ((68347, 68379), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (68374, 68379), False, 'import attr\n'), ((69496, 69562), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionNetworkConfiguration'], {}), '(PropJobDefinitionNetworkConfiguration)\n', (69523, 69562), False, 'import attr\n'), ((69939, 69972), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (69966, 69972), False, 'import attr\n'), ((70341, 70374), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (70368, 70374), False, 'import attr\n'), ((72672, 72729), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (72699, 72729), False, 'import attr\n'), ((73068, 73100), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (73095, 73100), False, 'import attr\n'), ((75370, 75435), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionContainerProperties'], {}), '(PropJobDefinitionContainerProperties)\n', (75397, 75435), False, 'import attr\n'), ((80274, 80331), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (80301, 80331), False, 'import attr\n'), ((80667, 80724), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (80694, 80724), False, 'import attr\n'), ((81060, 81117), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (81087, 81117), False, 'import attr\n'), ((81406, 81439), 'attr.validators.instance_of', 'attr.validators.instance_of', (['dict'], {}), '(dict)\n', (81433, 81439), False, 'import attr\n'), ((84337, 84402), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionContainerProperties'], {}), '(PropJobDefinitionContainerProperties)\n', (84364, 84402), False, 'import attr\n'), ((84760, 84817), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (84787, 84817), False, 'import attr\n'), ((85260, 85320), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionNodeProperties'], {}), '(PropJobDefinitionNodeProperties)\n', (85287, 85320), False, 'import attr\n'), ((85643, 85676), 'attr.validators.instance_of', 'attr.validators.instance_of', (['dict'], {}), '(dict)\n', (85670, 85676), False, 'import attr\n'), ((86529, 86562), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (86556, 86562), False, 'import attr\n'), ((86994, 87053), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionRetryStrategy'], {}), '(PropJobDefinitionRetryStrategy)\n', (87021, 87053), False, 'import attr\n'), ((87381, 87413), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (87408, 87413), False, 'import attr\n'), ((87837, 87890), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionTimeout'], {}), '(PropJobDefinitionTimeout)\n', (87864, 87890), False, 'import attr\n'), ((88193, 88226), 'attr.validators.instance_of', 'attr.validators.instance_of', (['dict'], {}), '(dict)\n', (88220, 88226), False, 'import attr\n'), ((89466, 89530), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropSchedulingPolicyFairsharePolicy'], {}), '(PropSchedulingPolicyFairsharePolicy)\n', (89493, 89530), False, 'import attr\n'), ((89873, 89930), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (89900, 89930), False, 'import attr\n'), ((92930, 92987), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (92957, 92987), False, 'import attr\n'), ((93466, 93533), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropComputeEnvironmentComputeResources'], {}), '(PropComputeEnvironmentComputeResources)\n', (93493, 93533), False, 'import attr\n'), ((93889, 93946), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (93916, 93946), False, 'import attr\n'), ((94286, 94343), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (94313, 94343), False, 'import attr\n'), ((94661, 94693), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (94688, 94693), False, 'import attr\n'), ((95020, 95053), 'attr.validators.instance_of', 'attr.validators.instance_of', (['dict'], {}), '(dict)\n', (95047, 95053), False, 'import attr\n'), ((45169, 45226), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (45196, 45226), False, 'import attr\n'), ((45247, 45280), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (45274, 45280), False, 'import attr\n'), ((77259, 77322), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionNodeRangeProperty'], {}), '(PropJobDefinitionNodeRangeProperty)\n', (77286, 77322), False, 'import attr\n'), ((77343, 77376), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (77370, 77376), False, 'import attr\n'), ((79500, 79564), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobQueueComputeEnvironmentOrder'], {}), '(PropJobQueueComputeEnvironmentOrder)\n', (79527, 79564), False, 'import attr\n'), ((79585, 79618), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (79612, 79618), False, 'import attr\n'), ((12347, 12399), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionSecret'], {}), '(PropJobDefinitionSecret)\n', (12374, 12399), False, 'import attr\n'), ((12420, 12453), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (12447, 12453), False, 'import attr\n'), ((27718, 27775), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (27745, 27775), False, 'import attr\n'), ((27796, 27829), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (27823, 27829), False, 'import attr\n'), ((33771, 33828), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (33798, 33828), False, 'import attr\n'), ((33849, 33882), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (33876, 33882), False, 'import attr\n'), ((40330, 40394), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropSchedulingPolicyShareAttributes'], {}), '(PropSchedulingPolicyShareAttributes)\n', (40357, 40394), False, 'import attr\n'), ((40415, 40448), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (40442, 40448), False, 'import attr\n'), ((47489, 47562), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropComputeEnvironmentEc2ConfigurationObject'], {}), '(PropComputeEnvironmentEc2ConfigurationObject)\n', (47516, 47562), False, 'import attr\n'), ((47583, 47616), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (47610, 47616), False, 'import attr\n'), ((49376, 49433), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (49403, 49433), False, 'import attr\n'), ((49454, 49487), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (49481, 49487), False, 'import attr\n'), ((51370, 51427), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (51397, 51427), False, 'import attr\n'), ((51448, 51481), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (51475, 51481), False, 'import attr\n'), ((53986, 54046), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionEvaluateOnExit'], {}), '(PropJobDefinitionEvaluateOnExit)\n', (54013, 54046), False, 'import attr\n'), ((54067, 54100), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (54094, 54100), False, 'import attr\n'), ((56412, 56464), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionDevice'], {}), '(PropJobDefinitionDevice)\n', (56439, 56464), False, 'import attr\n'), ((56485, 56518), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (56512, 56518), False, 'import attr\n'), ((58764, 58815), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionTmpfs'], {}), '(PropJobDefinitionTmpfs)\n', (58791, 58815), False, 'import attr\n'), ((58836, 58869), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (58863, 58869), False, 'import attr\n'), ((64214, 64271), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (64241, 64271), False, 'import attr\n'), ((64292, 64325), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (64319, 64325), False, 'import attr\n'), ((64842, 64899), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionEnvironment'], {}), '(PropJobDefinitionEnvironment)\n', (64869, 64899), False, 'import attr\n'), ((64920, 64953), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (64947, 64953), False, 'import attr\n'), ((68893, 68950), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionMountPoints'], {}), '(PropJobDefinitionMountPoints)\n', (68920, 68950), False, 'import attr\n'), ((68971, 69004), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (68998, 69004), False, 'import attr\n'), ((70945, 71010), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionResourceRequirement'], {}), '(PropJobDefinitionResourceRequirement)\n', (70972, 71010), False, 'import attr\n'), ((71031, 71064), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (71058, 71064), False, 'import attr\n'), ((71593, 71645), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionSecret'], {}), '(PropJobDefinitionSecret)\n', (71620, 71645), False, 'import attr\n'), ((71666, 71699), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (71693, 71699), False, 'import attr\n'), ((72202, 72254), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionUlimit'], {}), '(PropJobDefinitionUlimit)\n', (72229, 72254), False, 'import attr\n'), ((72275, 72308), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (72302, 72308), False, 'import attr\n'), ((73600, 73653), 'attr.validators.instance_of', 'attr.validators.instance_of', (['PropJobDefinitionVolumes'], {}), '(PropJobDefinitionVolumes)\n', (73627, 73653), False, 'import attr\n'), ((73674, 73707), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (73701, 73707), False, 'import attr\n'), ((86079, 86136), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (86106, 86136), False, 'import attr\n'), ((86157, 86190), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (86184, 86190), False, 'import attr\n'), ((90312, 90344), 'attr.validators.instance_of', 'attr.validators.instance_of', (['str'], {}), '(str)\n', (90339, 90344), False, 'import attr\n'), ((90362, 90419), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (90389, 90419), False, 'import attr\n')] |
#!/usr/bin/env python
# coding: utf-8
# __buildin__ modules
import smtplib
from email.mime.text import MIMEText
from monitor.utils.settings import EMAIL_SERVER
from monitor.utils.settings import EMAIL_PORT
from monitor.utils.settings import EMAIL_USER
from monitor.utils.settings import EMAIL_PASS
from monitor.utils.settings import EMAIL_FROM_ADDR
from monitor.utils.email_list import EMAIL_LIST
def _sendmail(to_list, subject, content):
"""
params:
to_addr[list]:
subject[str]:
content[str]: plain content
"""
msg = MIMEText(content, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = EMAIL_FROM_ADDR
msg['To'] = ', '.join(to_list)
smtp = smtplib.SMTP_SSL()
smtp.set_debuglevel(0)
smtp.connect(EMAIL_SERVER, EMAIL_PORT)
smtp.login(EMAIL_USER, EMAIL_PASS)
smtp.sendmail(EMAIL_FROM_ADDR, to_list, msg.as_string())
smtp.quit()
def sendmail(subject, content):
"""
params:
subject[str]:
content[str]: plain content
"""
if EMAIL_LIST:
_sendmail(EMAIL_LIST, subject, content)
else:
raise ValueError('email list is empty')
| [
"email.mime.text.MIMEText",
"smtplib.SMTP_SSL"
] | [((562, 597), 'email.mime.text.MIMEText', 'MIMEText', (['content', '"""plain"""', '"""utf-8"""'], {}), "(content, 'plain', 'utf-8')\n", (570, 597), False, 'from email.mime.text import MIMEText\n'), ((708, 726), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', ([], {}), '()\n', (724, 726), False, 'import smtplib\n')] |
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx
import numpy as np
@Phylanx
def foo():
local_a = np.array((2, 1))
local_a[0] += 55
return local_a
assert (np.array((57, 1)) == foo()).any()
| [
"numpy.array"
] | [((297, 313), 'numpy.array', 'np.array', (['(2, 1)'], {}), '((2, 1))\n', (305, 313), True, 'import numpy as np\n'), ((364, 381), 'numpy.array', 'np.array', (['(57, 1)'], {}), '((57, 1))\n', (372, 381), True, 'import numpy as np\n')] |
import importlib
import json
import os
import sys
from pprint import pprint as pp
import pytest
import utils as util
from ixload import IxLoadTestSettings as TestSettings
from ixload import IxLoadUtils as IxLoadUtils
from ixload import IxRestUtils as IxRestUtils
from ixnetwork_restpy import SessionAssistant
from ixnetwork_restpy.testplatform.testplatform import TestPlatform
targets_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "targets"))
sys.path.insert(0, targets_dir)
@pytest.fixture(scope="session")
def tbinfo(request):
"""Create and return testbed information"""
from credentials import CREDENTIALS as CR
from testbed import TESTBED as TB
TB["CR"] = CR
return TB
@pytest.fixture(name="smartnics", scope="session")
def fixture_smartnics(tbinfo):
test_type = tbinfo['stateless'][0]['dpu'][0]['type']
if test_type:
modname = test_type.lower() + "." + test_type.lower()
else:
raise Exception('Fail to load module %s' % modname)
try:
imod = importlib.import_module(modname)
cls = getattr(imod, test_type.title() + "Test")
return cls(**tbinfo)
except:
raise Exception('Fail to load module %s' % modname)
@pytest.fixture(scope="session")
def utils():
return util
@pytest.fixture
def create_ixload_session_url(tbinfo):
ixload_settings = {}
tb = tbinfo['stateful'][0]
tg = {
'chassis_list': tb['server'],
'tgen': tb['tgen'],
'vxlan': tb['vxlan'],
'dpu': tb
}
# Helper Functions
def create_test_settings():
# TEST CONFIG
test_settings = TestSettings.IxLoadTestSettings()
test_settings.gatewayServer = tbinfo['stateful'][0]['server'][0]['addr']
test_settings.apiVersion = "v0"
test_settings.ixLoadVersion = "9.20.0.279"
slot1 = tg['tgen'][0][1]
port1 = tg['tgen'][0][2]
slot2 = tg['tgen'][1][1]
port2 = tg['tgen'][1][2]
test_settings.portListPerCommunity = {
# format: { community name : [ port list ] }
"Traffic1@Network1": [(1, slot1, port1)],
"Traffic2@Network2": [(1, slot2, port2)]
}
chassisList = tg['tgen'][0][0]
test_settings.chassisList = [chassisList]
#test_settings.chassisList = ["10.36.79.165"]
return test_settings
def create_session(test_settings):
connection = IxRestUtils.getConnection(
test_settings.gatewayServer,
test_settings.gatewayPort,
httpRedirect=test_settings.httpRedirect,
version=test_settings.apiVersion
)
return connection
test_settings = create_test_settings()
connection = create_session(test_settings)
connection.setApiKey(test_settings.apiKey)
ixload_settings['connection'] = connection
#ixload_settings['session_url'] = session_url
ixload_settings['test_settings'] = test_settings
yield ixload_settings
def getTestClass(*args, **kwargs):
if test_type:
modname = test_type.lower() + "." + test_type.lower()
else:
raise Exception('Fail to load module %s' % modname)
try:
imod = importlib.import_module(modname)
cls = getattr(imod, test_type.title() + "Test")
return cls(*args, **kwargs)
except:
raise Exception('Fail to load module %s' % modname)
| [
"ixload.IxRestUtils.getConnection",
"ixload.IxLoadTestSettings.IxLoadTestSettings",
"importlib.import_module",
"os.path.dirname",
"pytest.fixture",
"sys.path.insert"
] | [((467, 498), 'sys.path.insert', 'sys.path.insert', (['(0)', 'targets_dir'], {}), '(0, targets_dir)\n', (482, 498), False, 'import sys\n'), ((502, 533), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (516, 533), False, 'import pytest\n'), ((722, 771), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""smartnics"""', 'scope': '"""session"""'}), "(name='smartnics', scope='session')\n", (736, 771), False, 'import pytest\n'), ((1227, 1258), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1241, 1258), False, 'import pytest\n'), ((422, 447), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (437, 447), False, 'import os\n'), ((1034, 1066), 'importlib.import_module', 'importlib.import_module', (['modname'], {}), '(modname)\n', (1057, 1066), False, 'import importlib\n'), ((1635, 1668), 'ixload.IxLoadTestSettings.IxLoadTestSettings', 'TestSettings.IxLoadTestSettings', ([], {}), '()\n', (1666, 1668), True, 'from ixload import IxLoadTestSettings as TestSettings\n'), ((2432, 2598), 'ixload.IxRestUtils.getConnection', 'IxRestUtils.getConnection', (['test_settings.gatewayServer', 'test_settings.gatewayPort'], {'httpRedirect': 'test_settings.httpRedirect', 'version': 'test_settings.apiVersion'}), '(test_settings.gatewayServer, test_settings.\n gatewayPort, httpRedirect=test_settings.httpRedirect, version=\n test_settings.apiVersion)\n', (2457, 2598), True, 'from ixload import IxRestUtils as IxRestUtils\n'), ((3201, 3233), 'importlib.import_module', 'importlib.import_module', (['modname'], {}), '(modname)\n', (3224, 3233), False, 'import importlib\n')] |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import datetime
from dateutil.tz import tzutc
def plot_water_levels(station, dates, levels):
"""Task 2E: Plots water level against time"""
#Assign variables
range_high = [station.typical_range[1]]*len(dates)
range_low = [station.typical_range[0]]*len(dates)
# Plot
plt.plot(dates, levels, label="Water Level")
plt.plot(dates, range_high, label="Typical High")
plt.plot(dates, range_low, label="Typical Low")
# Add axis labels, add legend, rotate date labels and add plot title
plt.xlabel('Date')
plt.ylabel('Water Level (m)')
plt.legend()
plt.xticks(rotation=45)
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
return plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
"""Task 2F: Plots the water level data and the best-fit polynomial"""
# Convert dates to floats
dates_float = matplotlib.dates.date2num(dates)
# Create a shifted time list
dates_shifted = []
for i in range(len(dates_float)):
dates_shifted.append(dates_float[i] - dates_float[0])
# Find coefficients of best-fit polynomial f(x) of degree p
p_coeff = np.polyfit(dates_shifted, levels, p)
# Convert coefficient into a polynomial that can be evaluated,
# e.g. poly(0.3)
poly = np.poly1d(p_coeff)
# Plot original data points
plt.plot(dates_shifted, levels, '.', label='Data Points')
# Plot polynomial fit and typical range low/high at 30 points along interval
# (note that polynomial is evaluated using the date shift)
x = np.linspace(dates_shifted[0], dates_shifted[-1], 30)
range_high = [station.typical_range[1]]*len(x)
range_low = [station.typical_range[0]]*len(x)
plt.plot(x, poly(x - x[0]), label="Polynomial Fit")
plt.plot(x, range_high, label="Typical High")
plt.plot(x, range_low, label="Typical Low")
# Add axis labels, add legend, rotate date labels and add plot title
plt.xlabel('Dates from {}'.format(dates[-1]))
plt.ylabel('Water Level (m)')
plt.legend()
plt.xticks(rotation=45)
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
return plt.show() | [
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"numpy.linspace",
"matplotlib.dates.date2num",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] | [((363, 407), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {'label': '"""Water Level"""'}), "(dates, levels, label='Water Level')\n", (371, 407), True, 'import matplotlib.pyplot as plt\n'), ((412, 461), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'range_high'], {'label': '"""Typical High"""'}), "(dates, range_high, label='Typical High')\n", (420, 461), True, 'import matplotlib.pyplot as plt\n'), ((466, 513), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'range_low'], {'label': '"""Typical Low"""'}), "(dates, range_low, label='Typical Low')\n", (474, 513), True, 'import matplotlib.pyplot as plt\n'), ((592, 610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (602, 610), True, 'import matplotlib.pyplot as plt\n'), ((615, 644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water Level (m)"""'], {}), "('Water Level (m)')\n", (625, 644), True, 'import matplotlib.pyplot as plt\n'), ((649, 661), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (659, 661), True, 'import matplotlib.pyplot as plt\n'), ((666, 689), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (676, 689), True, 'import matplotlib.pyplot as plt\n'), ((694, 717), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (703, 717), True, 'import matplotlib.pyplot as plt\n'), ((742, 760), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (758, 760), True, 'import matplotlib.pyplot as plt\n'), ((826, 836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (834, 836), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1051), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (1044, 1051), False, 'import matplotlib\n'), ((1288, 1324), 'numpy.polyfit', 'np.polyfit', (['dates_shifted', 'levels', 'p'], {}), '(dates_shifted, levels, p)\n', (1298, 1324), True, 'import numpy as np\n'), ((1425, 1443), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (1434, 1443), True, 'import numpy as np\n'), ((1481, 1538), 'matplotlib.pyplot.plot', 'plt.plot', (['dates_shifted', 'levels', '"""."""'], {'label': '"""Data Points"""'}), "(dates_shifted, levels, '.', label='Data Points')\n", (1489, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1744), 'numpy.linspace', 'np.linspace', (['dates_shifted[0]', 'dates_shifted[-1]', '(30)'], {}), '(dates_shifted[0], dates_shifted[-1], 30)\n', (1703, 1744), True, 'import numpy as np\n'), ((1911, 1956), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'range_high'], {'label': '"""Typical High"""'}), "(x, range_high, label='Typical High')\n", (1919, 1956), True, 'import matplotlib.pyplot as plt\n'), ((1961, 2004), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'range_low'], {'label': '"""Typical Low"""'}), "(x, range_low, label='Typical Low')\n", (1969, 2004), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water Level (m)"""'], {}), "('Water Level (m)')\n", (2143, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2179), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2177, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2184, 2207), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2194, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2235), 'matplotlib.pyplot.title', 'plt.title', (['station.name'], {}), '(station.name)\n', (2221, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2260, 2278), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2276, 2278), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2352, 2354), True, 'import matplotlib.pyplot as plt\n')] |
from app import database
def get_single_json_entity(entity_query):
query_result_proxy = database.session.execute(entity_query)
database.session.commit()
row_proxies = [r for r in query_result_proxy]
if len(row_proxies) == 1:
json_entity = {k: v for k, v in row_proxies[0].items()}
else:
json_entity = {}
return json_entity
| [
"app.database.session.commit",
"app.database.session.execute"
] | [((94, 132), 'app.database.session.execute', 'database.session.execute', (['entity_query'], {}), '(entity_query)\n', (118, 132), False, 'from app import database\n'), ((137, 162), 'app.database.session.commit', 'database.session.commit', ([], {}), '()\n', (160, 162), False, 'from app import database\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
def resource_file_to_bytecode(input_dir, input_file, output_path):
with open(os.path.join(input_dir, input_file), 'rb')\
as resource_file_object:
with open(output_path, 'a') as cpp_file_object:
length = 0;
all_the_content = resource_file_object.read();
template0 = "#include <stdint.h>\n";
template1 = "const uint8_t _binary_$1_start[$2] = {$3};\n";
template2 = \
"const uint8_t* _binary_$1_end = _binary_$1_start + $2;";
formats = ","
seq = []
for content in all_the_content:
seq.append(str(hex(content)))
length = length + 1
byte_code = formats.join(seq);
input_file = input_file.replace(".", "_")
template1 = template1.replace("$1", str(input_file)) \
.replace("$2", str(length)) \
.replace("$3", str(byte_code))
template2 = template2.replace("$1", str(input_file)) \
.replace("$2", str(length))
cpp_file_object.seek(0)
cpp_file_object.truncate();
cpp_file_object.write(template0 + template1 + template2);
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--objcopy', type=str, required=False)
parser.add_argument('--input', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--arch', type=str, required=False)
args = parser.parse_args()
input_dir, input_file = os.path.split(args.input)
output_path = os.path.abspath(args.output)
resource_file_to_bytecode(input_dir, input_file, output_path)
if __name__ == '__main__':
sys.exit(main())
| [
"os.path.abspath",
"os.path.split",
"os.path.join",
"argparse.ArgumentParser"
] | [((1913, 1938), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1936, 1938), False, 'import argparse\n'), ((2244, 2269), 'os.path.split', 'os.path.split', (['args.input'], {}), '(args.input)\n', (2257, 2269), False, 'import os\n'), ((2288, 2316), 'os.path.abspath', 'os.path.abspath', (['args.output'], {}), '(args.output)\n', (2303, 2316), False, 'import os\n'), ((756, 791), 'os.path.join', 'os.path.join', (['input_dir', 'input_file'], {}), '(input_dir, input_file)\n', (768, 791), False, 'import os\n')] |
from basicts.utils.registry import SCALER_REGISTRY
"""
data normalization and re-normalization
"""
# ====================================== re-normalizations ====================================== #
@SCALER_REGISTRY.register()
def re_max_min_normalization(x, **kwargs):
_min, _max = kwargs['min'], kwargs['max']
x = (x + 1.) / 2.
x = 1. * x * (_max - _min) + _min
return x
@SCALER_REGISTRY.register()
def standard_re_transform(x, **kwargs):
mean, std = kwargs['mean'], kwargs['std']
x = x * std
x = x + mean
return x
# ====================================== normalizations ====================================== #
# omitted to avoid redundancy, as they should only be used in data preprocessing in `scripts/data_preparation`
| [
"basicts.utils.registry.SCALER_REGISTRY.register"
] | [((202, 228), 'basicts.utils.registry.SCALER_REGISTRY.register', 'SCALER_REGISTRY.register', ([], {}), '()\n', (226, 228), False, 'from basicts.utils.registry import SCALER_REGISTRY\n'), ((393, 419), 'basicts.utils.registry.SCALER_REGISTRY.register', 'SCALER_REGISTRY.register', ([], {}), '()\n', (417, 419), False, 'from basicts.utils.registry import SCALER_REGISTRY\n')] |
from jinja2 import Environment, PackageLoader, select_autoescape
import yaml
import json
import pkg_resources
import os
env = Environment(
loader=PackageLoader('kite_metrics', 'schemas'),
)
cache = {}
def _schema_exists(filename):
return pkg_resources.resource_exists('kite_metrics', 'schemas/{}'.format(filename))
def _schema_open(filename):
return pkg_resources.resource_stream('kite_metrics', 'schemas/{}'.format(filename))
def load_context(key):
filename = '{}.ctx.yaml'.format(key)
if filename not in cache:
ctx = {}
if _schema_exists(filename):
ctx = yaml.load(_schema_open(filename), yaml.FullLoader)
cache[filename] = ctx
return cache[filename]
def load_schema(key):
filename = '{}.yaml.tmpl'.format(key)
if filename not in cache:
ctx = load_context(key)
cache[filename] = yaml.load(env.get_template(filename).render(ctx), Loader=yaml.FullLoader)
return cache[filename]
def load_json_schema(key, extra_ctx=None):
filename = '{}.schema.json'.format(key)
if filename not in cache:
if _schema_exists(filename):
cache[filename] = json.load(_schema_open(filename))
else:
tmpl_filename = '{}.schema.json.tmpl'.format(key)
ctx = {'schema': load_schema(key)}
if extra_ctx:
ctx.update(extra_ctx)
rendered = env.get_template(tmpl_filename).render(ctx)
try:
cache[filename] = json.loads(rendered)
except json.decoder.JSONDecodeError:
print("Error decoding schema JSON:\n{}".format(rendered))
return cache[filename]
| [
"jinja2.PackageLoader",
"json.loads"
] | [((152, 192), 'jinja2.PackageLoader', 'PackageLoader', (['"""kite_metrics"""', '"""schemas"""'], {}), "('kite_metrics', 'schemas')\n", (165, 192), False, 'from jinja2 import Environment, PackageLoader, select_autoescape\n'), ((1501, 1521), 'json.loads', 'json.loads', (['rendered'], {}), '(rendered)\n', (1511, 1521), False, 'import json\n')] |
###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from base.utils.configuration import Configuration
from base.utils.origins import Origin
from scan.link_finders.find_links import FindLinks
class FindLinksForDisks(FindLinks):
# per future ceph releases this might need revisions
DB_PARTITION_PATH_ATT = 'bluefs_db_partition_path'
BLK_PARTITION_PATH_ATT = 'bluestore_bdev_partition_path'
def __init__(self):
super().__init__()
self.environment_type = None
self.hosts = []
self.osds = []
self.disks = []
self.partitions = []
def setup(self, env, origin: Origin = None):
super().setup(env, origin)
self.configuration = Configuration()
self.environment_type = self.configuration.get_env_type()
def add_links(self):
self.log.info("adding links of types: host-osd, osd-partition, partition-disk")
self.hosts = self.inv.find_items({
"environment": self.configuration.env_name,
"type": "host"
})
self.osds = self.inv.find_items({
"environment": self.get_env(),
"type": "osd"
})
self.partitions = self.inv.find_items({
"environment": self.get_env(),
"type": "partition"
})
self.disks = self.inv.find_items({
"environment": self.get_env(),
"type": "disk"
})
for osd in self.osds:
self.add_link_for_hosts(osd)
for partition in self.partitions:
self.add_link_for_osds(partition)
for disk in self.disks:
self.add_link_for_partitions(disk)
def add_link_for_hosts(self, osd):
# link_type: "host-osd"
metadata = osd.get('metadata', '')
for host in self.hosts:
if host.get('id', 'None') == osd.get('host', ''):
self.add_links_with_specifics(host, osd,
extra_att={"osd_data": metadata.get('osd_data', '')})
def add_link_for_osds(self, partition):
# link_type: "osd-partition"
for osd in self.osds:
metadata = osd.get('metadata', '')
if ((metadata.get(self.DB_PARTITION_PATH_ATT, 'None') == partition.get('device', '')) and (
osd.get('host', 'None') == partition.get('host', ''))) or ((
metadata.get(self.BLK_PARTITION_PATH_ATT, 'None') == partition.get('device', '')) and (
osd.get('host', 'None') == partition.get('host', ''))) or (
metadata.get('osd_data', 'None') == partition.get('mount_point', '')):
self.add_links_with_specifics(osd, partition,
extra_att={"osd_objectstore": metadata.get('osd_objectstore', '')})
def add_link_for_partitions(self, disk):
# link_type: "partition-disk"
for partition in self.partitions:
if (partition.get('master_disk', 'None') == disk.get('name', '')) and (
partition.get('host', 'None') == disk.get('host', 'None')):
self.add_links_with_specifics(partition, disk,
extra_att={"partition_type": partition.get('label', '')})
def add_links_with_specifics(self, source, target, extra_att=None):
link_name = '{}-{}'.format(source.get('name', 'None'), target.get('name', ''))
source_label = '{}-{}-{}'.format(source.get('cvim_region', ''), source.get('cvim_metro', ''),
source.get('id', ''))
target_label = target.get('id', '')
extra = {"source_label": source_label, "target_label": target_label}
if extra_att:
extra.update(extra_att)
self.link_items(source, target, link_name=link_name, extra_attributes=extra)
| [
"base.utils.configuration.Configuration"
] | [((1360, 1375), 'base.utils.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1373, 1375), False, 'from base.utils.configuration import Configuration\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-11-02 11:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activities', '0027_contributionvalue'),
('time_based', '0019_auto_20201030_1317'),
]
operations = [
migrations.CreateModel(
name='OnADateApplication',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
],
options={
'verbose_name': 'On a date application',
'verbose_name_plural': 'On a date application',
'permissions': (('api_read_onadateapplication', 'Can view application through the API'), ('api_add_onadateapplication', 'Can add application through the API'), ('api_change_onadateapplication', 'Can change application through the API'), ('api_delete_onadateapplication', 'Can delete application through the API'), ('api_read_own_onadateapplication', 'Can view own application through the API'), ('api_add_own_onadateapplication', 'Can add own application through the API'), ('api_change_own_onadateapplication', 'Can change own application through the API'), ('api_delete_own_onadateapplication', 'Can delete own application through the API')),
},
bases=('activities.contribution',),
),
migrations.CreateModel(
name='PeriodApplication',
fields=[
('contribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='activities.Contribution')),
('current_period', models.DateField(blank=True, null=True)),
],
options={
'verbose_name': 'Period application',
'verbose_name_plural': 'Period application',
'permissions': (('api_read_periodapplication', 'Can view application through the API'), ('api_add_periodapplication', 'Can add application through the API'), ('api_change_periodapplication', 'Can change application through the API'), ('api_delete_periodapplication', 'Can delete application through the API'), ('api_read_own_periodapplication', 'Can view own application through the API'), ('api_add_own_periodapplication', 'Can add own application through the API'), ('api_change_own_periodapplication', 'Can change own application through the API'), ('api_delete_own_periodapplication', 'Can delete own application through the API')),
},
bases=('activities.contribution',),
),
migrations.RemoveField(
model_name='application',
name='current_period',
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.OneToOneField",
"django.db.models.DateField"
] | [((2833, 2904), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""application"""', 'name': '"""current_period"""'}), "(model_name='application', name='current_period')\n", (2855, 2904), False, 'from django.db import migrations, models\n'), ((508, 685), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""activities.Contribution"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'activities.Contribution')\n", (528, 685), False, 'from django.db import migrations, models\n'), ((1699, 1876), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'auto_created': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'to': '"""activities.Contribution"""'}), "(auto_created=True, on_delete=django.db.models.deletion\n .CASCADE, parent_link=True, primary_key=True, serialize=False, to=\n 'activities.Contribution')\n", (1719, 1876), False, 'from django.db import migrations, models\n'), ((1904, 1943), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1920, 1943), False, 'from django.db import migrations, models\n')] |
from __future__ import annotations
from amulet.world_interface.chunk.interfaces.leveldb.leveldb_14.interface import (
LevelDB14Interface,
)
class LevelDB15Interface(LevelDB14Interface):
def __init__(self):
LevelDB14Interface.__init__(self)
self.features["chunk_version"] = 15
INTERFACE_CLASS = LevelDB15Interface
| [
"amulet.world_interface.chunk.interfaces.leveldb.leveldb_14.interface.LevelDB14Interface.__init__"
] | [((225, 258), 'amulet.world_interface.chunk.interfaces.leveldb.leveldb_14.interface.LevelDB14Interface.__init__', 'LevelDB14Interface.__init__', (['self'], {}), '(self)\n', (252, 258), False, 'from amulet.world_interface.chunk.interfaces.leveldb.leveldb_14.interface import LevelDB14Interface\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
import jobson_systemtests
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(description='Run jobson system tests')
parser.add_argument(
'specs_dir',
type=str,
help='Path to directory containing jobson specs (and tests)')
parser.add_argument(
'host',
type=str,
help='The host running the server (e.g. localhost)')
parser.add_argument(
'port',
type=int,
help='The port the Jobson API is listening on (e.g. 8080)')
parser.add_argument(
'login',
type=str,
help='The login to use to access the API')
parser.add_argument(
'password',
type=str,
help='The password to use the access the API')
args = parser.parse_args(argv[1:])
jobson_systemtests.run(
specs_dir=args.specs_dir,
host=args.host,
port=args.port,
login=args.login,
password=args.password)
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"jobson_systemtests.run",
"argparse.ArgumentParser"
] | [((727, 789), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run jobson system tests"""'}), "(description='Run jobson system tests')\n", (750, 789), False, 'import argparse\n'), ((1446, 1573), 'jobson_systemtests.run', 'jobson_systemtests.run', ([], {'specs_dir': 'args.specs_dir', 'host': 'args.host', 'port': 'args.port', 'login': 'args.login', 'password': 'args.password'}), '(specs_dir=args.specs_dir, host=args.host, port=args.\n port, login=args.login, password=args.password)\n', (1468, 1573), False, 'import jobson_systemtests\n')] |
from discord.ext import commands
import discord
import yaml
import os
class Bot(commands.Bot):
async def get_prefix(self, bot, message):
"""Fetches the current prefix in the guild from the database."""
return commands.when_mentioned_or("onyx ")
def __init__(self):
"""Initialize the bot and load all extensions."""
with open("C:/onyx/config.yml", encoding = "UTF-8") as f:
self.config = yaml.safe_load(f)
super().__init__(
command_prefix = self.get_prefix,
intents = discord.Intents.default()
)
client = Bot()
client.load_extension("jishaku") # Load the debugging cog.
@client.check
async def check(ctx):
return True
token = os.getenv("TOKEN")
client.run(token)
| [
"discord.ext.commands.when_mentioned_or",
"discord.Intents.default",
"yaml.safe_load",
"os.getenv"
] | [((728, 746), 'os.getenv', 'os.getenv', (['"""TOKEN"""'], {}), "('TOKEN')\n", (737, 746), False, 'import os\n'), ((230, 265), 'discord.ext.commands.when_mentioned_or', 'commands.when_mentioned_or', (['"""onyx """'], {}), "('onyx ')\n", (256, 265), False, 'from discord.ext import commands\n'), ((442, 459), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (456, 459), False, 'import yaml\n'), ((555, 580), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (578, 580), False, 'import discord\n')] |
import urllib.parse
from django.shortcuts import render
#from ckan_model import stub as ckan
from ckan_model import production as ckan
#from ..ckan_model import production as ckan
import django.http
def index(request: django.http.HttpRequest):
payload = {}
google = ckan.Search()
payload['top_tags'] = google.top_tags()
return render(request, 'index.html', context=payload)
def search(request: django.http.HttpRequest):
# it's a search engine!
google = ckan.Search()
payload = {}
payload['tags'] = google.tags_list()
payload['unis'] = google.university_list()
page = int(request.GET.get('page', 1))
page_size = 10
start_pos = (page - 1) * page_size
if request.GET:
response = google.students(request.GET.getlist('selected_tags'),
request.GET.getlist('selected_unis'),
start=start_pos,
rows=page_size
)
else:
response = google.students()
total = response['total']
pages_count = total//10+bool(total%10)
actual_page = start_pos//page_size + 1
parsed_url = list(urllib.parse.urlparse(request.get_full_path()))
options = dict(urllib.parse.parse_qsl(parsed_url[4]))
def change_url(n):
options['page'] = n
parsed_url[4] = urllib.parse.urlencode(options)
return urllib.parse.urlunparse(parsed_url)
pages = [{'number': n,
'url': change_url(n),
'active': n == actual_page} for n in range(1, pages_count)]
payload["pagination"] = { "pages": pages,
"prev": actual_page > 1,
"next": actual_page < pages_count,
}
payload['results'] = response['results']
return render(request, 'search.html', payload) | [
"django.shortcuts.render",
"ckan_model.production.Search"
] | [((280, 293), 'ckan_model.production.Search', 'ckan.Search', ([], {}), '()\n', (291, 293), True, 'from ckan_model import production as ckan\n'), ((349, 395), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {'context': 'payload'}), "(request, 'index.html', context=payload)\n", (355, 395), False, 'from django.shortcuts import render\n'), ((485, 498), 'ckan_model.production.Search', 'ckan.Search', ([], {}), '()\n', (496, 498), True, 'from ckan_model import production as ckan\n'), ((1857, 1896), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', 'payload'], {}), "(request, 'search.html', payload)\n", (1863, 1896), False, 'from django.shortcuts import render\n')] |
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)):
Rlow = np.power((1.0 + z), n1)
Rhigh = np.power((1.0 + z), n2)
rbrk = np.power((1.0 + z1), n1 - n2)
R = Rlow * (z <= z1) + rbrk * Rhigh * (z > z1)
R *= n0 / R[0]
return z, R
z, R = Redshift(0.84, 2.07, -0.7)
plt.plot(z,R,'-k')
plt.xlabel(r'$z$')
plt.ylabel(r'$\mathcal{R}(z)$')
plt.grid()
#plt.gca().set_yscale('log')
plt.show()
#### This computes E(z) and int_0^z dz'/E(z') and saves to file
def Efunc(z):
Omega_m = 0.274
Omega_lambda = 0.726
E = np.sqrt(Omega_m * np.power((1 + z), 3) + Omega_lambda)
return E
def Efuncinv(z):
return 1.0 / Efunc(z)
z = np.linspace(0,10,num=1001)
dz = z[1] - z[0]
E = Efunc(z)
Eics = np.zeros(E.shape)
for i in range(len(Eics)):
Eics[i] = (quad(Efuncinv, 0, z[i])[0])**2.0
#Eics = np.square(np.cumsum(1.0 / E) * dz)
#Eics[1:] = Eics[:-1]
#Eics[0] = 0
Eall = Eics / E;
z = z.reshape(z.shape[0],1)
E = E.reshape(E.shape[0],1)
Eics = Eics.reshape(Eics.shape[0],1)
Eall = Eall.reshape(Eall.shape[0],1)
d = np.concatenate((z,E,Eics,Eall),axis=1)
np.savetxt('support_data/splines_Ez.txt',d,fmt='%0.9lf')
z2, R = Redshift(0.84, 2.07, -0.7, z=z)
Rp = R / (1+z) * Eall
plt.plot(z,Rp,'-k')
plt.plot(z,R/(1+z),'--b')
plt.plot(z,Eall,'-.r')
#plt.plot(z,np.cumsum(Eall),'-g')
plt.xlabel(r'$z$')
plt.grid()
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.quad",
"numpy.power",
"numpy.savetxt",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.concatenate"
] | [((365, 385), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'R', '"""-k"""'], {}), "(z, R, '-k')\n", (373, 385), True, 'import matplotlib.pyplot as plt\n'), ((384, 401), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {}), "('$z$')\n", (394, 401), True, 'import matplotlib.pyplot as plt\n'), ((403, 434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathcal{R}(z)$"""'], {}), "('$\\\\mathcal{R}(z)$')\n", (413, 434), True, 'import matplotlib.pyplot as plt\n'), ((435, 445), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (443, 445), True, 'import matplotlib.pyplot as plt\n'), ((475, 485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (483, 485), True, 'import matplotlib.pyplot as plt\n'), ((723, 751), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(1001)'}), '(0, 10, num=1001)\n', (734, 751), True, 'import numpy as np\n'), ((789, 806), 'numpy.zeros', 'np.zeros', (['E.shape'], {}), '(E.shape)\n', (797, 806), True, 'import numpy as np\n'), ((1112, 1154), 'numpy.concatenate', 'np.concatenate', (['(z, E, Eics, Eall)'], {'axis': '(1)'}), '((z, E, Eics, Eall), axis=1)\n', (1126, 1154), True, 'import numpy as np\n'), ((1152, 1210), 'numpy.savetxt', 'np.savetxt', (['"""support_data/splines_Ez.txt"""', 'd'], {'fmt': '"""%0.9lf"""'}), "('support_data/splines_Ez.txt', d, fmt='%0.9lf')\n", (1162, 1210), True, 'import numpy as np\n'), ((1274, 1295), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'Rp', '"""-k"""'], {}), "(z, Rp, '-k')\n", (1282, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1325), 'matplotlib.pyplot.plot', 'plt.plot', (['z', '(R / (1 + z))', '"""--b"""'], {}), "(z, R / (1 + z), '--b')\n", (1302, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1344), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'Eall', '"""-.r"""'], {}), "(z, Eall, '-.r')\n", (1328, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1394), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {}), "('$z$')\n", (1387, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1406), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1415, 1417), True, 'import matplotlib.pyplot as plt\n'), ((120, 148), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {'num': '(1001)'}), '(0, 10, num=1001)\n', (131, 148), True, 'import numpy as np\n'), ((157, 178), 'numpy.power', 'np.power', (['(1.0 + z)', 'n1'], {}), '(1.0 + z, n1)\n', (165, 178), True, 'import numpy as np\n'), ((190, 211), 'numpy.power', 'np.power', (['(1.0 + z)', 'n2'], {}), '(1.0 + z, n2)\n', (198, 211), True, 'import numpy as np\n'), ((222, 249), 'numpy.power', 'np.power', (['(1.0 + z1)', '(n1 - n2)'], {}), '(1.0 + z1, n1 - n2)\n', (230, 249), True, 'import numpy as np\n'), ((846, 869), 'scipy.integrate.quad', 'quad', (['Efuncinv', '(0)', 'z[i]'], {}), '(Efuncinv, 0, z[i])\n', (850, 869), False, 'from scipy.integrate import quad\n'), ((630, 648), 'numpy.power', 'np.power', (['(1 + z)', '(3)'], {}), '(1 + z, 3)\n', (638, 648), True, 'import numpy as np\n')] |
from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage
from callback import callback_reply_keyboard
def generate_keyboard_place():
kb_content = QuickReplyContentText(
header="Куда вы сегодня хотите сходить?",
text="Выберите из предложенного списка",
caption=""
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(name="place", id="1"), content=kb_content)
kb.add(KeyboardButton(title='Кинотеатр')).add(KeyboardButton(title='Ресторан'))
return kb
def generate_keyboard_cinema_time():
kb_content = QuickReplyContentText(
header="Кинотеатр",
text="Выберите удобное для Вас время",
caption=""
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(name="cinema_time", id="2"), content=kb_content)
kb.add(KeyboardButton(title='18:00')).add(KeyboardButton(title='20:00'))
return kb
def generate_keyboard_restaurant_time():
kb_content = QuickReplyContentText(
header="Ресторан",
text="Выберите удобное для Вас время",
caption="",
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(name="restaurant_time", id="2"), content=kb_content)
kb.add(KeyboardButton(title='18:30')).add(KeyboardButton(title='21:00'))
return kb
def generate_keyboard_image(): # Можно отправить клавиатуру с изображением, вместо заголовка. В примере не использовалось
kb_content = QuickReplyContentImage(
url="https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg",
text="this is the body",
caption="this is the footer"
)
kb = QuickReply(callback_data=callback_reply_keyboard.new(type="start", id="1"), content=kb_content)
kb.add(KeyboardButton(title='Сменить ресторан')).add(KeyboardButton(title='Новый ресторан'))
return kb
| [
"waio.keyboard.reply.QuickReplyContentText",
"waio.keyboard.reply.KeyboardButton",
"callback.callback_reply_keyboard.new",
"waio.keyboard.reply.QuickReplyContentImage"
] | [((201, 322), 'waio.keyboard.reply.QuickReplyContentText', 'QuickReplyContentText', ([], {'header': '"""Куда вы сегодня хотите сходить?"""', 'text': '"""Выберите из предложенного списка"""', 'caption': '""""""'}), "(header='Куда вы сегодня хотите сходить?', text=\n 'Выберите из предложенного списка', caption='')\n", (222, 322), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((608, 705), 'waio.keyboard.reply.QuickReplyContentText', 'QuickReplyContentText', ([], {'header': '"""Кинотеатр"""', 'text': '"""Выберите удобное для Вас время"""', 'caption': '""""""'}), "(header='Кинотеатр', text=\n 'Выберите удобное для Вас время', caption='')\n", (629, 705), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((994, 1090), 'waio.keyboard.reply.QuickReplyContentText', 'QuickReplyContentText', ([], {'header': '"""Ресторан"""', 'text': '"""Выберите удобное для Вас время"""', 'caption': '""""""'}), "(header='Ресторан', text=\n 'Выберите удобное для Вас время', caption='')\n", (1015, 1090), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((1466, 1629), 'waio.keyboard.reply.QuickReplyContentImage', 'QuickReplyContentImage', ([], {'url': '"""https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg"""', 'text': '"""this is the body"""', 'caption': '"""this is the footer"""'}), "(url=\n 'https://www.buildquickbots.com/whatsapp/media/sample/jpg/sample01.jpg',\n text='this is the body', caption='this is the footer')\n", (1488, 1629), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((512, 544), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""Ресторан"""'}), "(title='Ресторан')\n", (526, 544), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((888, 917), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""20:00"""'}), "(title='20:00')\n", (902, 917), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((1278, 1307), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""21:00"""'}), "(title='21:00')\n", (1292, 1307), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((1828, 1866), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""Новый ресторан"""'}), "(title='Новый ресторан')\n", (1842, 1866), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((382, 431), 'callback.callback_reply_keyboard.new', 'callback_reply_keyboard.new', ([], {'name': '"""place"""', 'id': '"""1"""'}), "(name='place', id='1')\n", (409, 431), False, 'from callback import callback_reply_keyboard\n'), ((765, 820), 'callback.callback_reply_keyboard.new', 'callback_reply_keyboard.new', ([], {'name': '"""cinema_time"""', 'id': '"""2"""'}), "(name='cinema_time', id='2')\n", (792, 820), False, 'from callback import callback_reply_keyboard\n'), ((1151, 1210), 'callback.callback_reply_keyboard.new', 'callback_reply_keyboard.new', ([], {'name': '"""restaurant_time"""', 'id': '"""2"""'}), "(name='restaurant_time', id='2')\n", (1178, 1210), False, 'from callback import callback_reply_keyboard\n'), ((1685, 1734), 'callback.callback_reply_keyboard.new', 'callback_reply_keyboard.new', ([], {'type': '"""start"""', 'id': '"""1"""'}), "(type='start', id='1')\n", (1712, 1734), False, 'from callback import callback_reply_keyboard\n'), ((464, 497), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""Кинотеатр"""'}), "(title='Кинотеатр')\n", (478, 497), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((853, 882), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""18:00"""'}), "(title='18:00')\n", (867, 882), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((1243, 1272), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""18:30"""'}), "(title='18:30')\n", (1257, 1272), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n'), ((1767, 1807), 'waio.keyboard.reply.KeyboardButton', 'KeyboardButton', ([], {'title': '"""Сменить ресторан"""'}), "(title='Сменить ресторан')\n", (1781, 1807), False, 'from waio.keyboard.reply import QuickReplyContentText, QuickReply, KeyboardButton, QuickReplyContentImage\n')] |
#!/usr/bin/env python
# Python script for fast text file searching using keyword index on disk.
#
# Author: <NAME> <<EMAIL>>
# Last Change: November 1, 2015
# URL: http://peterodding.com/code/vim/notes/
# License: MIT
#
# This Python script can be used by the notes.vim plug-in to perform fast
# keyword searches in the user's notes. It has two advantages over just
# using Vim's internal :vimgrep command to search all of the user's notes:
#
# - Very large notes don't slow searching down so much;
# - Hundreds of notes can be searched in less than a second.
#
# The keyword index is a Python dictionary that's persisted using the pickle
# module. The structure of the dictionary may seem very naive but it's quite
# fast. Also the pickle protocol makes sure repeating strings are stored only
# once, so it's not as bad as it may appear at first sight :-).
#
# For more information about the Vim plug-in see http://peterodding.com/code/vim/notes/.
"""
Usage: search_notes.py [OPTIONS] KEYWORD...
Search one or more directories of plain text files using a full text index,
updated automatically during each invocation of the program.
Valid options include:
-i, --ignore-case ignore case of keyword(s)
-l, --list=SUBSTR list keywords matching substring
-d, --database=FILE set path to keywords index file
-n, --notes=DIR set directory with user notes (can be repeated)
-e, --encoding=NAME set character encoding of notes
-v, --verbose make more noise
-h, --help show this message and exit
For more information see http://peterodding.com/code/vim/notes/
"""
# Standard library modules.
import codecs
import fnmatch
import getopt
import logging
import os
import re
import sys
import time
import pickle
from typing import List, Set
try:
import Levenshtein
except ImportError:
Levenshtein = None
# The version of the index format that's supported by this revision of the
# `search_notes.py' script; if an existing index file is found with an
# unsupported version, the script knows that it should rebuild the index.
INDEX_VERSION = 3
# Filename matching patterns of files to ignore during scans.
INCLUDE_PATTERNS = {'*.md', '*.txt'}
NOTES_DIRECTORIES = [os.path.expanduser('~/Dropbox/notes')]
INDEX_FILE_PATH = os.path.expanduser('~/notes-index.pickle')
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def load_index(index_location):
try:
load_timer = Timer()
logger.debug("Loading index from %s ..", index_location)
with open(index_location, 'rb') as handle:
index = pickle.load(handle)
logger.debug("Format version of index loaded from disk: %i", index['version'])
assert index['version'] == INDEX_VERSION, "Incompatible index format detected!"
logger.debug("Loaded %i notes from index in %s", len(index['files']), load_timer)
except Exception:
logger.warning("Failed to load index from file!", exc_info=True)
return {'keywords': {}, 'files': {}, 'version': INDEX_VERSION}
else:
return index
class TextIndex:
def __init__(self, index_location: str, notes_directories: List[str]):
self.index_location = index_location
self.notes_directories = notes_directories
self.index = load_index(self.index_location)
def search(self, query: str) -> List[str]:
"""Return names of files containing all of the given keywords."""
print('Searching index')
index = load_index(INDEX_FILE_PATH)
needles = query.split()
matches = None
normalized_db_keywords = [(k, k.lower()) for k in index['keywords']]
for word in needles:
submatches = set()
for original_db_kw, normalized_db_kw in normalized_db_keywords:
if word in normalized_db_kw:
submatches.update(index['keywords'][original_db_kw])
if matches is None:
matches = submatches
else:
matches &= submatches
return sorted(matches) if matches else []
def update_index(self):
"""Update the keyword index by scanning the notes directory."""
user_directories = self.notes_directories
index = self.index
# First we find the filenames and last modified times of the notes on disk.
notes_on_disk = {}
last_count = 0
for directory in user_directories:
for root, dirs, files in os.walk(directory):
for filename in files:
if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS):
abspath = os.path.join(root, filename)
notes_on_disk[abspath] = os.path.getmtime(abspath)
logger.info("Found %i notes in %s ..", len(notes_on_disk) - last_count, directory)
last_count = len(notes_on_disk)
logger.info("Found a total of %i notes ..", len(notes_on_disk))
# Check for updated and/or deleted notes since the last run?
if index:
for filename in set(index['files'].keys()):
if filename not in notes_on_disk:
# Forget a deleted note.
self.delete_note_from_index(index, filename)
else:
# Check whether previously seen note has changed?
last_modified_on_disk = notes_on_disk[filename]
last_modified_in_db = index['files'][filename]
if last_modified_on_disk > last_modified_in_db:
self.delete_note_from_index(index, filename)
self.add_note_to_index(index, filename, last_modified_on_disk)
# Already checked this note, we can forget about it.
del notes_on_disk[filename]
# Add new notes to index.
for filename, last_modified in notes_on_disk.items():
self.add_note_to_index(index, filename, last_modified)
# TODO: Only save if necessary.
self.save_index(INDEX_FILE_PATH, index)
def add_note_to_index(self, index, filename, last_modified):
"""Add a note to the index (assumes the note is not already indexed)."""
logger.info("Adding file to index: %s", filename)
index['files'][filename] = last_modified
with open(filename, encoding='utf-8') as handle:
raw = handle.read()
for kw in tokenize(raw):
if kw not in index['keywords']:
index['keywords'][kw] = [filename]
else:
index['keywords'][kw].append(filename)
def delete_note_from_index(self, index, filename):
"""Delete a note from given index."""
logger.info("Deleting file from index: %s", filename)
del index['files'][filename]
for kw in index['keywords']:
index['keywords'][kw] = [x for x in index['keywords'][kw] if x != filename]
def tokenize(self, text: str) -> Set[str]:
"""Tokenize a string into a list of normalized, unique keywords."""
return {w.strip() for w in re.findall(r'\w{3,}', text, re.UNICODE) if not w.isspace()}
def save_index(self, database_file: str, index):
"""Save the keyword index to disk."""
with open(database_file, 'wb') as handle:
pickle.dump(index, handle)
class NotesIndex:
def __init__(self, argv=None):
"""Entry point to the notes search."""
global_timer = Timer()
keywords = self.parse_args(argv or sys.argv[1:])
self.load_index()
self.update_index()
if self.dirty:
self.save_index()
if self.keyword_filter is not None:
self.list_keywords(self.keyword_filter)
logger.debug("Finished listing keywords in %s", global_timer)
else:
matches = self.search_index(keywords)
if matches:
print('\n'.join(sorted(matches)))
logger.debug("Finished searching index in %s", global_timer)
def parse_args(self, argv):
"""Parse the command line arguments."""
try:
opts, keywords = getopt.getopt(argv, 'il:d:n:e:vh', [
'ignore-case', 'list=', 'database=', 'notes=', 'encoding=',
'verbose', 'help',
])
except getopt.GetoptError as error:
print(str(error))
self.usage()
sys.exit(2)
# Define the command line option defaults.
self.database_file = '~/.vim/misc/notes/index.pickle'
self.user_directories = ['~/.vim/misc/notes/user/']
self.character_encoding = 'UTF-8'
self.case_sensitive = True
self.keyword_filter = None
# Map command line options to variables.
for opt, arg in opts:
if opt in ('-i', '--ignore-case'):
self.case_sensitive = False
logger.debug("Disabling case sensitivity")
elif opt in ('-l', '--list'):
self.keyword_filter = arg.strip().lower()
elif opt in ('-d', '--database'):
self.database_file = arg
elif opt in ('-n', '--notes'):
self.user_directories.append(arg)
elif opt in ('-e', '--encoding'):
self.character_encoding = arg
elif opt in ('-v', '--verbose'):
logger.setLevel(logging.DEBUG)
elif opt in ('-h', '--help'):
self.usage()
sys.exit(0)
else:
assert False, "Unhandled option"
logger.debug("Index file: %s", self.database_file)
logger.debug("Notes directories: %r", self.user_directories)
logger.debug("Character encoding: %s", self.character_encoding)
if self.keyword_filter is not None:
self.keyword_filter = self.decode(self.keyword_filter)
# Canonicalize pathnames, check validity.
self.database_file = self.munge_path(self.database_file)
self.user_directories = [self.munge_path(d) for d in self.user_directories if os.path.isdir(d)]
# Return tokenized keyword arguments.
return [self.normalize(k) for k in self.tokenize(' '.join(keywords))]
def load_index(self):
"""Load the keyword index or start with an empty one."""
try:
load_timer = Timer()
logger.debug("Loading index from %s ..", self.database_file)
with open(self.database_file, 'rb') as handle:
self.index = pickle.load(handle)
logger.debug("Format version of index loaded from disk: %i", self.index['version'])
assert self.index['version'] == INDEX_VERSION, "Incompatible index format detected!"
self.first_use = False
self.dirty = False
logger.debug("Loaded %i notes from index in %s", len(self.index['files']), load_timer)
except Exception:
logger.warn("Failed to load index from file!", exc_info=True)
self.first_use = True
self.dirty = True
self.index = {'keywords': {}, 'files': {}, 'version': INDEX_VERSION}
def save_index(self):
"""Save the keyword index to disk."""
save_timer = Timer()
with open(self.database_file, 'wb') as handle:
pickle.dump(self.index, handle)
logger.debug("Saved index to disk in %s", save_timer)
def update_index(self):
"""Update the keyword index by scanning the notes directory."""
update_timer = Timer()
# First we find the filenames and last modified times of the notes on disk.
notes_on_disk = {}
last_count = 0
for directory in self.user_directories:
print('Scanning', directory)
for root, dirs, files in os.walk(directory):
for filename in files:
if any(fnmatch.fnmatch(filename, pattern) for pattern in INCLUDE_PATTERNS):
abspath = os.path.join(root, filename)
notes_on_disk[abspath] = os.path.getmtime(abspath)
logger.info("Found %i notes in %s ..", len(notes_on_disk) - last_count, directory)
last_count = len(notes_on_disk)
logger.info("Found a total of %i notes ..", len(notes_on_disk))
# Check for updated and/or deleted notes since the last run?
if not self.first_use:
for filename in self.index['files'].keys():
if filename not in notes_on_disk:
# Forget a deleted note.
self.delete_note(filename)
else:
# Check whether previously seen note has changed?
last_modified_on_disk = notes_on_disk[filename]
last_modified_in_db = self.index['files'][filename]
if last_modified_on_disk > last_modified_in_db:
self.delete_note(filename)
self.add_note(filename, last_modified_on_disk)
# Already checked this note, we can forget about it.
del notes_on_disk[filename]
# Add new notes to index.
for filename, last_modified in notes_on_disk.items():
self.add_note(filename, last_modified)
logger.info("Updated index in %s", update_timer)
def add_note(self, filename, last_modified):
"""Add a note to the index (assumes the note is not already indexed)."""
logger.info("Adding file to index: %s", filename)
self.index['files'][filename] = last_modified
with open(filename, encoding='utf-8') as handle:
for kw in self.tokenize(handle.read()):
if kw not in self.index['keywords']:
self.index['keywords'][kw] = [filename]
else:
self.index['keywords'][kw].append(filename)
self.dirty = True
def delete_note(self, filename):
"""Remove a note from the index."""
logger.info("Removing file from index: %s", filename)
del self.index['files'][filename]
for kw in self.index['keywords']:
self.index['keywords'][kw] = [x for x in self.index['keywords'][kw] if x != filename]
self.dirty = True
def search_index(self, keywords):
"""Return names of files containing all of the given keywords."""
matches = None
normalized_db_keywords = [(k, self.normalize(k)) for k in self.index['keywords']]
for usr_kw in keywords:
submatches = set()
for original_db_kw, normalized_db_kw in normalized_db_keywords:
# Yes I'm using a nested for loop over all keywords in the index. If
# I really have to I'll probably come up with something more
# efficient, but really it doesn't seem to be needed -- I have over
# 850 notes (about 8 MB) and 25000 keywords and it's plenty fast.
if usr_kw in normalized_db_kw:
submatches.update(self.index['keywords'][original_db_kw])
if matches is None:
matches = submatches
else:
matches &= submatches
return list(matches) if matches else []
def list_keywords(self, substring, limit=25):
"""Print all (matching) keywords to standard output."""
print('listing keywords')
decorated = []
substring = self.normalize(substring)
for kw, filenames in self.index['keywords'].items():
normalized_kw = self.normalize(kw)
if substring in normalized_kw:
if Levenshtein is not None:
decorated.append((Levenshtein.distance(normalized_kw, substring), -len(filenames), kw))
else:
decorated.append((-len(filenames), kw))
decorated.sort()
selection = [d[-1] for d in decorated[:limit]]
print(selection)
print(self.encode(u'\n'.join(selection)))
def tokenize(self, text):
"""Tokenize a string into a list of normalized, unique keywords."""
words = set()
text = self.decode(text)
for word in re.findall(r'\w+', text, re.UNICODE):
word = word.strip()
if word != '' and not word.isspace() and len(word) >= 2:
words.add(word)
return words
def normalize(self, keyword):
"""Normalize the case of a keyword if configured to do so."""
return keyword if self.case_sensitive else keyword.lower()
def encode(self, text):
"""Encode a string in the user's preferred character encoding."""
if isinstance(text, str):
text = codecs.encode(text, self.character_encoding, 'ignore')
return text
def decode(self, text):
"""Decode a string in the user's preferred character encoding."""
if isinstance(text, bytes):
text = codecs.decode(text, self.character_encoding, 'ignore')
return text
def munge_path(self, path):
"""Canonicalize user-defined path, making it absolute."""
return os.path.abspath(os.path.expanduser(path))
def usage(self):
print(__doc__.strip())
class Timer:
"""Easy to use timer to keep track of long during operations."""
def __init__(self):
self.start_time = time.time()
def __str__(self):
return "%.2f seconds" % self.elapsed_time
@property
def elapsed_time(self):
return time.time() - self.start_time
if __name__ == '__main__':
NotesIndex()
| [
"codecs.encode",
"pickle.dump",
"getopt.getopt",
"logging.basicConfig",
"os.path.join",
"codecs.decode",
"os.path.isdir",
"Levenshtein.distance",
"os.walk",
"time.time",
"re.findall",
"pickle.load",
"os.path.getmtime",
"sys.exit",
"os.path.expanduser",
"fnmatch.fnmatch",
"logging.getLogger"
] | [((2271, 2313), 'os.path.expanduser', 'os.path.expanduser', (['"""~/notes-index.pickle"""'], {}), "('~/notes-index.pickle')\n", (2289, 2313), False, 'import os\n'), ((2315, 2355), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2334, 2355), False, 'import logging\n'), ((2365, 2392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2382, 2392), False, 'import logging\n'), ((2214, 2251), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Dropbox/notes"""'], {}), "('~/Dropbox/notes')\n", (2232, 2251), False, 'import os\n'), ((16299, 16335), 're.findall', 're.findall', (['"""\\\\w+"""', 'text', 're.UNICODE'], {}), "('\\\\w+', text, re.UNICODE)\n", (16309, 16335), False, 'import re\n'), ((17472, 17483), 'time.time', 'time.time', ([], {}), '()\n', (17481, 17483), False, 'import time\n'), ((2601, 2620), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2612, 2620), False, 'import pickle\n'), ((4496, 4514), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (4503, 4514), False, 'import os\n'), ((7381, 7407), 'pickle.dump', 'pickle.dump', (['index', 'handle'], {}), '(index, handle)\n', (7392, 7407), False, 'import pickle\n'), ((8209, 8328), 'getopt.getopt', 'getopt.getopt', (['argv', '"""il:d:n:e:vh"""', "['ignore-case', 'list=', 'database=', 'notes=', 'encoding=', 'verbose', 'help']"], {}), "(argv, 'il:d:n:e:vh', ['ignore-case', 'list=', 'database=',\n 'notes=', 'encoding=', 'verbose', 'help'])\n", (8222, 8328), False, 'import getopt\n'), ((11404, 11435), 'pickle.dump', 'pickle.dump', (['self.index', 'handle'], {}), '(self.index, handle)\n', (11415, 11435), False, 'import pickle\n'), ((11890, 11908), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (11897, 11908), False, 'import os\n'), ((16819, 16873), 'codecs.encode', 'codecs.encode', (['text', 'self.character_encoding', '"""ignore"""'], {}), "(text, self.character_encoding, 'ignore')\n", (16832, 16873), False, 'import codecs\n'), ((17052, 17106), 'codecs.decode', 'codecs.decode', (['text', 'self.character_encoding', '"""ignore"""'], {}), "(text, self.character_encoding, 'ignore')\n", (17065, 17106), False, 'import codecs\n'), ((17257, 17281), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (17275, 17281), False, 'import os\n'), ((17616, 17627), 'time.time', 'time.time', ([], {}), '()\n', (17625, 17627), False, 'import time\n'), ((7159, 7198), 're.findall', 're.findall', (['"""\\\\w{3,}"""', 'text', 're.UNICODE'], {}), "('\\\\w{3,}', text, re.UNICODE)\n", (7169, 7198), False, 'import re\n'), ((8483, 8494), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (8491, 8494), False, 'import sys\n'), ((10151, 10167), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (10164, 10167), False, 'import os\n'), ((10592, 10611), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (10603, 10611), False, 'import pickle\n'), ((4685, 4713), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4697, 4713), False, 'import os\n'), ((4763, 4788), 'os.path.getmtime', 'os.path.getmtime', (['abspath'], {}), '(abspath)\n', (4779, 4788), False, 'import os\n'), ((12079, 12107), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (12091, 12107), False, 'import os\n'), ((12157, 12182), 'os.path.getmtime', 'os.path.getmtime', (['abspath'], {}), '(abspath)\n', (12173, 12182), False, 'import os\n'), ((4582, 4616), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['filename', 'pattern'], {}), '(filename, pattern)\n', (4597, 4616), False, 'import fnmatch\n'), ((11976, 12010), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['filename', 'pattern'], {}), '(filename, pattern)\n', (11991, 12010), False, 'import fnmatch\n'), ((15810, 15856), 'Levenshtein.distance', 'Levenshtein.distance', (['normalized_kw', 'substring'], {}), '(normalized_kw, substring)\n', (15830, 15856), False, 'import Levenshtein\n'), ((9560, 9571), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9568, 9571), False, 'import sys\n')] |
"""Implementation of the FunctionInjectionWalker class."""
from inspect import (
signature as inspect_signature)
from types import (
CodeType,
FunctionType)
from typing import (
Iterator,
Optional)
from .abstract_injection_walker import (
AbstractInjectionWalker)
from .code_object_injection_walker import (
CodeObjectInjectionWalker)
from .doc_string_injection_walker import (
DocStringInjectionWalker)
from .failed_injection_walker import (
FailedInjectionWalker)
from .name_injection_walker import (
NameInjectionWalker)
class FunctionInjectionWalker(AbstractInjectionWalker):
"""Injection walker for a function.
This module will attempt to recover the source code from a function, via
access to its ``__code__`` attribute.
"""
INJECTION_RE = None
RESPONSE_RE = r'<function .+ at 0x[0-9a-fA-F]+>'
def __extra_init__(
self
) -> None:
self._code_walker: Optional[CodeObjectInjectionWalker] = None
self._name_walker: NameInjectionWalker = \
self.empty_instance(NameInjectionWalker)
self._docstring_walker: DocStringInjectionWalker = \
self.empty_instance(DocStringInjectionWalker)
self._src_code: Optional[str] = None
self._signature: Optional[str] = None
@property
def code_walker(
self
) -> Optional[CodeObjectInjectionWalker]:
"""The code object that this walker recovered from the target.
This attribute will only be populated after a call to :func:`walk`. If
the call to ``walk()`` cannot recover the object, then this attribute
will remain as ``None``.
"""
return self._code_walker
@property
def name_walker(
self
) -> NameInjectionWalker:
"""Walker used to recover this function's __name__."""
return self._name_walker
@property
def docstring_walker(
self
) -> DocStringInjectionWalker:
"""Walker used to recover this function's __doc__ string."""
return self._docstring_walker
@property
def src_code(
self
) -> Optional[str]:
"""The source code that this walker recovered from the target."""
return self._src_code
@property
def signature(
self
) -> Optional[str]:
"""The decompiled function's signature, if one was retrieved."""
return self._signature
def walk(
self
) -> Iterator[AbstractInjectionWalker]:
yield from self._walk_name()
if not self._name_walker.is_default:
if self._name_walker.value in self._engine.function_blacklist:
return
self._engine.function_blacklist.add(self._name_walker.value)
yield from self._walk_docstring()
code_obj_injection = f'{self._injection_str}.__code__'
raw_result = self._harness.send_injection(code_obj_injection)
if raw_result is None:
yield FailedInjectionWalker.msg(
'Unable to recover injection response from string '
f'{raw_result}')
return
walker = self.next_walker(code_obj_injection, raw_result)
if walker is None:
yield FailedInjectionWalker.msg(
'No matching walker found for injection response '
f'{raw_result}')
return
elif not isinstance(walker, CodeObjectInjectionWalker):
yield FailedInjectionWalker.msg(
f'Got {type(walker)} when injecting function __code__ '
'attribute; something is terribly wrong...')
return
for sub_walker in walker.walk():
yield sub_walker
if walker.code_obj is None or walker.src_code is None:
yield FailedInjectionWalker.msg(
'Unable to successfully recover code object from string '
f'{walker.injection_str}')
return
src_lines = ([] if walker.src_code is None else
walker.src_code.splitlines())
indented_src_lines = [f' {line}' for line in src_lines]
self._signature = self.__class__.code_obj_to_signature(
walker.code_obj)
self._src_code = f'{self._signature}\n'
if self._docstring_walker.value:
self._src_code += f' """{self._docstring_walker.value}"""\n'
self._src_code += '\n'.join(indented_src_lines)
yield self
def _walk_name(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the function's __name__ attribute."""
name_injection = f'{self._injection_str}.__qualname__!r'
result = self._harness.send_injection(name_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to read __name__ of function via injection '
f'{name_injection}')
return
walker = self.next_walker(name_injection, result)
if not isinstance(walker, NameInjectionWalker):
yield FailedInjectionWalker.msg(
f'Expected a name walker when sending {name_injection} '
f'but got {walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._name_walker = walker
def _walk_docstring(
self
) -> Iterator[AbstractInjectionWalker]:
"""Recover the function's __doc__ attribute."""
doc_string_injection = f'{self._injection_str}.__doc__!r'
result = self._harness.send_injection(doc_string_injection)
if result is None:
yield FailedInjectionWalker.msg(
'Unable to read __doc__ of function via injection '
f'{doc_string_injection}')
return
walker = self.next_walker(doc_string_injection, result)
if not isinstance(walker, DocStringInjectionWalker):
yield FailedInjectionWalker.msg(
f'Expected a doc walker when sending {doc_string_injection} '
f'but got {walker.__class__.__qualname__} instead')
return
yield from walker.walk()
self._docstring_walker = walker
@staticmethod
def code_obj_to_signature(
code_obj: CodeType
) -> str:
"""Get a function signature from a code object.
See:
https://stackoverflow.com/a/56761306/5094008
"""
try:
func = FunctionType(code_obj, {})
arg_sequence = inspect_signature(func)
return f'def {code_obj.co_name}{arg_sequence}:'
except TypeError:
# build our own signature
return f"""\
# exact argument names could not be reversed for below signature
def {code_obj.co_name}(*args, **kwargs):"""
def __str__(
self
) -> str:
return f'Injected function object with string {self._injection_str}'
| [
"types.FunctionType",
"inspect.signature"
] | [((6470, 6496), 'types.FunctionType', 'FunctionType', (['code_obj', '{}'], {}), '(code_obj, {})\n', (6482, 6496), False, 'from types import CodeType, FunctionType\n'), ((6524, 6547), 'inspect.signature', 'inspect_signature', (['func'], {}), '(func)\n', (6541, 6547), True, 'from inspect import signature as inspect_signature\n')] |
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/tracecode-toolkit/
# The TraceCode software is licensed under the Apache License version 2.0.
# Data generated with TraceCode require an acknowledgment.
# TraceCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with TraceCode or any TraceCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with TraceCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# TraceCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# TraceCode is a free and open source software analysis tool from nexB Inc. and others.
# Visit https://github.com/nexB/tracecode-toolkit/ for support and download.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from tracecode import pathutils
class TestPathUtils(unittest.TestCase):
def test_common_path_prefix1(self):
test = pathutils.common_path_prefix('/a/b/c', '/a/b/c')
assert ('a/b/c', 3) == test
def test_common_path_prefix2(self):
test = pathutils.common_path_prefix('/a/b/c', '/a/b')
assert ('a/b', 2) == test
def test_common_path_prefix3(self):
test = pathutils.common_path_prefix('/a/b', '/a/b/c')
assert ('a/b', 2) == test
def test_common_path_prefix4(self):
test = pathutils.common_path_prefix('/a', '/a')
assert ('a', 1) == test
def test_common_path_prefix_path_root(self):
test = pathutils.common_path_prefix('/a/b/c', '/')
assert (None, 0) == test
def test_common_path_prefix_root_path(self):
test = pathutils.common_path_prefix('/', '/a/b/c')
assert (None, 0) == test
def test_common_path_prefix_root_root(self):
test = pathutils.common_path_prefix('/', '/')
assert (None, 0) == test
def test_common_path_prefix_path_elements_are_similar(self):
test = pathutils.common_path_prefix('/a/b/c', '/a/b/d')
assert ('a/b', 2) == test
def test_common_path_prefix_no_match(self):
test = pathutils.common_path_prefix('/abc/d', '/abe/f')
assert (None, 0) == test
def test_common_path_prefix_ignore_training_slashes(self):
test = pathutils.common_path_prefix('/a/b/c/', '/a/b/c/')
assert ('a/b/c', 3) == test
def test_common_path_prefix8(self):
test = pathutils.common_path_prefix('/a/b/c/', '/a/b')
assert ('a/b', 2) == test
def test_common_path_prefix10(self):
test = pathutils.common_path_prefix('/a/b/c.txt',
'/a/b/b.txt')
assert ('a/b', 2) == test
def test_common_path_prefix11(self):
test = pathutils.common_path_prefix('/a/b/c.txt', '/a/b.txt')
assert ('a', 1) == test
def test_common_path_prefix12(self):
test = pathutils.common_path_prefix('/a/c/e/x.txt',
'/a/d/a.txt')
assert ('a', 1) == test
def test_common_path_prefix13(self):
test = pathutils.common_path_prefix('/a/c/e/x.txt', '/a/d/')
assert ('a', 1) == test
def test_common_path_prefix14(self):
test = pathutils.common_path_prefix('/a/c/e/', '/a/d/')
assert ('a', 1) == test
def test_common_path_prefix15(self):
test = pathutils.common_path_prefix('/a/c/e/', '/a/c/a.txt')
assert ('a/c', 2) == test
def test_common_path_prefix16(self):
test = pathutils.common_path_prefix('/a/c/e/', '/a/c/f/')
assert ('a/c', 2) == test
def test_common_path_prefix17(self):
test = pathutils.common_path_prefix('/a/a.txt', '/a/b.txt/')
assert ('a', 1) == test
def test_common_path_prefix18(self):
test = pathutils.common_path_prefix('/a/c/', '/a/')
assert ('a', 1) == test
def test_common_path_prefix19(self):
test = pathutils.common_path_prefix('/a/c.txt', '/a/')
assert ('a', 1) == test
def test_common_path_prefix20(self):
test = pathutils.common_path_prefix('/a/c/', '/a/d/')
assert ('a', 1) == test
def test_common_path_suffix(self):
test = pathutils.common_path_suffix('/a/b/c', '/a/b/c')
assert ('a/b/c', 3) == test
def test_common_path_suffix_absolute_relative(self):
test = pathutils.common_path_suffix('a/b/c', '/a/b/c')
assert ('a/b/c', 3) == test
def test_common_path_suffix_find_subpath(self):
test = pathutils.common_path_suffix('/z/b/c', '/a/b/c')
assert ('b/c', 2) == test
def test_common_path_suffix_handles_relative_path(self):
test = pathutils.common_path_suffix('a/b', 'a/b')
assert ('a/b', 2) == test
def test_common_path_suffix_handles_relative_subpath(self):
test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c',
'a//a/d//b/c')
assert ('b/c', 2) == test
def test_common_path_suffix_ignore_and_strip_trailing_slash(self):
test = pathutils.common_path_suffix('zsds/adsds/a/b/b/c/',
'a//a/d//b/c/')
assert ('b/c', 2) == test
def test_common_path_suffix_return_None_if_no_common_suffix(self):
test = pathutils.common_path_suffix('/a/b/c', '/')
assert (None, 0) == test
def test_common_path_suffix_return_None_if_no_common_suffix2(self):
test = pathutils.common_path_suffix('/', '/a/b/c')
assert (None, 0) == test
def test_common_path_suffix_match_only_whole_segments(self):
# only segments are honored, commonality within segment is ignored
test = pathutils.common_path_suffix(
'this/is/aaaa/great/path', 'this/is/aaaaa/great/path')
assert ('great/path', 2) == test
def test_common_path_suffix_two_root(self):
test = pathutils.common_path_suffix('/', '/')
assert (None, 0) == test
def test_common_path_suffix_empty_root(self):
test = pathutils.common_path_suffix('', '/')
assert (None, 0) == test
def test_common_path_suffix_root_empty(self):
test = pathutils.common_path_suffix('/', '')
assert (None, 0) == test
def test_common_path_suffix_empty_empty(self):
test = pathutils.common_path_suffix('', '')
assert (None, 0) == test
| [
"tracecode.pathutils.common_path_suffix",
"tracecode.pathutils.common_path_prefix"
] | [((1673, 1721), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c"""', '"""/a/b/c"""'], {}), "('/a/b/c', '/a/b/c')\n", (1701, 1721), False, 'from tracecode import pathutils\n'), ((1814, 1860), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c"""', '"""/a/b"""'], {}), "('/a/b/c', '/a/b')\n", (1842, 1860), False, 'from tracecode import pathutils\n'), ((1951, 1997), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b"""', '"""/a/b/c"""'], {}), "('/a/b', '/a/b/c')\n", (1979, 1997), False, 'from tracecode import pathutils\n'), ((2088, 2128), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a"""', '"""/a"""'], {}), "('/a', '/a')\n", (2116, 2128), False, 'from tracecode import pathutils\n'), ((2226, 2269), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c"""', '"""/"""'], {}), "('/a/b/c', '/')\n", (2254, 2269), False, 'from tracecode import pathutils\n'), ((2368, 2411), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/"""', '"""/a/b/c"""'], {}), "('/', '/a/b/c')\n", (2396, 2411), False, 'from tracecode import pathutils\n'), ((2510, 2548), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/"""', '"""/"""'], {}), "('/', '/')\n", (2538, 2548), False, 'from tracecode import pathutils\n'), ((2663, 2711), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c"""', '"""/a/b/d"""'], {}), "('/a/b/c', '/a/b/d')\n", (2691, 2711), False, 'from tracecode import pathutils\n'), ((2810, 2858), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/abc/d"""', '"""/abe/f"""'], {}), "('/abc/d', '/abe/f')\n", (2838, 2858), False, 'from tracecode import pathutils\n'), ((2971, 3021), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c/"""', '"""/a/b/c/"""'], {}), "('/a/b/c/', '/a/b/c/')\n", (2999, 3021), False, 'from tracecode import pathutils\n'), ((3114, 3161), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c/"""', '"""/a/b"""'], {}), "('/a/b/c/', '/a/b')\n", (3142, 3161), False, 'from tracecode import pathutils\n'), ((3253, 3309), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c.txt"""', '"""/a/b/b.txt"""'], {}), "('/a/b/c.txt', '/a/b/b.txt')\n", (3281, 3309), False, 'from tracecode import pathutils\n'), ((3445, 3499), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/b/c.txt"""', '"""/a/b.txt"""'], {}), "('/a/b/c.txt', '/a/b.txt')\n", (3473, 3499), False, 'from tracecode import pathutils\n'), ((3589, 3647), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/e/x.txt"""', '"""/a/d/a.txt"""'], {}), "('/a/c/e/x.txt', '/a/d/a.txt')\n", (3617, 3647), False, 'from tracecode import pathutils\n'), ((3781, 3834), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/e/x.txt"""', '"""/a/d/"""'], {}), "('/a/c/e/x.txt', '/a/d/')\n", (3809, 3834), False, 'from tracecode import pathutils\n'), ((3924, 3972), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/e/"""', '"""/a/d/"""'], {}), "('/a/c/e/', '/a/d/')\n", (3952, 3972), False, 'from tracecode import pathutils\n'), ((4062, 4115), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/e/"""', '"""/a/c/a.txt"""'], {}), "('/a/c/e/', '/a/c/a.txt')\n", (4090, 4115), False, 'from tracecode import pathutils\n'), ((4207, 4257), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/e/"""', '"""/a/c/f/"""'], {}), "('/a/c/e/', '/a/c/f/')\n", (4235, 4257), False, 'from tracecode import pathutils\n'), ((4349, 4402), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/a.txt"""', '"""/a/b.txt/"""'], {}), "('/a/a.txt', '/a/b.txt/')\n", (4377, 4402), False, 'from tracecode import pathutils\n'), ((4492, 4536), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/"""', '"""/a/"""'], {}), "('/a/c/', '/a/')\n", (4520, 4536), False, 'from tracecode import pathutils\n'), ((4626, 4673), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c.txt"""', '"""/a/"""'], {}), "('/a/c.txt', '/a/')\n", (4654, 4673), False, 'from tracecode import pathutils\n'), ((4763, 4809), 'tracecode.pathutils.common_path_prefix', 'pathutils.common_path_prefix', (['"""/a/c/"""', '"""/a/d/"""'], {}), "('/a/c/', '/a/d/')\n", (4791, 4809), False, 'from tracecode import pathutils\n'), ((4897, 4945), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""/a/b/c"""', '"""/a/b/c"""'], {}), "('/a/b/c', '/a/b/c')\n", (4925, 4945), False, 'from tracecode import pathutils\n'), ((5055, 5102), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""a/b/c"""', '"""/a/b/c"""'], {}), "('a/b/c', '/a/b/c')\n", (5083, 5102), False, 'from tracecode import pathutils\n'), ((5207, 5255), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""/z/b/c"""', '"""/a/b/c"""'], {}), "('/z/b/c', '/a/b/c')\n", (5235, 5255), False, 'from tracecode import pathutils\n'), ((5367, 5409), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""a/b"""', '"""a/b"""'], {}), "('a/b', 'a/b')\n", (5395, 5409), False, 'from tracecode import pathutils\n'), ((5524, 5589), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""zsds/adsds/a/b/b/c"""', '"""a//a/d//b/c"""'], {}), "('zsds/adsds/a/b/b/c', 'a//a/d//b/c')\n", (5552, 5589), False, 'from tracecode import pathutils\n'), ((5755, 5822), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""zsds/adsds/a/b/b/c/"""', '"""a//a/d//b/c/"""'], {}), "('zsds/adsds/a/b/b/c/', 'a//a/d//b/c/')\n", (5783, 5822), False, 'from tracecode import pathutils\n'), ((5988, 6031), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""/a/b/c"""', '"""/"""'], {}), "('/a/b/c', '/')\n", (6016, 6031), False, 'from tracecode import pathutils\n'), ((6153, 6196), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""/"""', '"""/a/b/c"""'], {}), "('/', '/a/b/c')\n", (6181, 6196), False, 'from tracecode import pathutils\n'), ((6386, 6473), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""this/is/aaaa/great/path"""', '"""this/is/aaaaa/great/path"""'], {}), "('this/is/aaaa/great/path',\n 'this/is/aaaaa/great/path')\n", (6414, 6473), False, 'from tracecode import pathutils\n'), ((6588, 6626), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""/"""', '"""/"""'], {}), "('/', '/')\n", (6616, 6626), False, 'from tracecode import pathutils\n'), ((6726, 6763), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['""""""', '"""/"""'], {}), "('', '/')\n", (6754, 6763), False, 'from tracecode import pathutils\n'), ((6863, 6900), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['"""/"""', '""""""'], {}), "('/', '')\n", (6891, 6900), False, 'from tracecode import pathutils\n'), ((7001, 7037), 'tracecode.pathutils.common_path_suffix', 'pathutils.common_path_suffix', (['""""""', '""""""'], {}), "('', '')\n", (7029, 7037), False, 'from tracecode import pathutils\n')] |
# Generated by Django 3.2.8 on 2021-10-24 01:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dim_sum', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dimsum',
name='history',
field=models.TextField(default='Write summary here.'),
),
]
| [
"django.db.models.TextField"
] | [((324, 371), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Write summary here."""'}), "(default='Write summary here.')\n", (340, 371), False, 'from django.db import migrations, models\n')] |
# Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from lifemonitor.auth.models import Subscription, User
from tests import utils
logger = logging.getLogger()
def test_workflow_subscription(user1: dict, valid_workflow: str):
_, workflow = utils.pick_and_register_workflow(user1, valid_workflow)
user: User = user1['user']
s: Subscription = user.subscribe(workflow)
logger.debug("Subscription: %r", s)
assert s, "Subscription should not be empty"
assert len(user.subscriptions) == 1, "Unexpected number of subscriptions"
s: Subscription = user.unsubscribe(workflow)
logger.debug("Subscription: %r", s)
assert s, "Subscription should not be empty"
assert len(user.subscriptions) == 0, "Unexpected number of subscriptions"
| [
"tests.utils.pick_and_register_workflow",
"logging.getLogger"
] | [((1196, 1215), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1213, 1215), False, 'import logging\n'), ((1302, 1357), 'tests.utils.pick_and_register_workflow', 'utils.pick_and_register_workflow', (['user1', 'valid_workflow'], {}), '(user1, valid_workflow)\n', (1334, 1357), False, 'from tests import utils\n')] |
# =============================================
# -*- coding: utf-8 -*-
# @Time : 2020/5/14 上午10:50
# @Author : xiao9616
# @Email : <EMAIL>
# @File : BaseModel.py
# @Software: PyCharm
# ============================================
import logging
import tensorflow as tf
import os
from src.yolo4.config import *
from src.yolo4.util import *
from src.yolo4.Net import YOLO4_NET
from src.yolo4.Loss import YOLO4_LOSS
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S', filename="./yolo4/logs/train.log", filemode='w+')
class BaseModel(object):
'''
一个自定义的类,需要重写方法:
'''
def data_generator(self):
'''
Returns:该方法可以重写, 并且返回一个tf.data对象
'''
txt_data = tf.data.TextLineDataset(filenames=train_path)
count = 0
for _ in txt_data:
count += 1
train_data = txt_data.batch(batch_size=batch_size)
return train_data, count
def net_generator(self):
net = YOLO4_NET()
return net
def loss_generator(self):
loss = YOLO4_LOSS()
return loss
def optimizer_generator(self):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.001,
decay_steps=3000,
decay_rate=0.96,
staircase=True
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
return optimizer
def metric_generator(self):
metric = tf.keras.metrics.Mean()
return metric
def train(self):
# GPU 设置
tf.debugging.set_log_device_placement(True)
if use_gpu:
gpus = tf.config.experimental.list_physical_devices(device_type="GPU")
if gpus:
logging.info("use gpu device")
# gpu显存分配
for gpu in gpus:
tf.config.experimental.set_memory_growth(device=gpu, enable=True)
tf.print(gpu)
else:
os.environ["CUDA_VISIBLE_DEVICE"] = "-1"
logging.info("not found gpu device,convert to use cpu")
else:
logging.info("use cpu device")
# 禁用gpu
os.environ["CUDA_VISIBLE_DEVICE"] = "-1"
# 训练数据
train_dataset, train_count = self.data_generator()
# 网络结构
net = self.net_generator()
net.summary()
global fine_tune_epoch
# 是否finetune
if fine_tune:
net.load_weights(filepath=weights_path + "epoch-{}".format(fine_tune_epoch))
print("load {} epoch weigth".format(fine_tune))
else:
fine_tune_epoch = -1
print("train model from init")
# 设置loss损失函数
loss = self.loss_generator()
# 设置优化器optimizer
optimizer = self.optimizer_generator()
# 设置评价指标
metric = self.metric_generator()
# 模型训练与更新
for epoch in range(fine_tune_epoch + 1, train_epochs):
step = 0
for train_dataset_batch in train_dataset:
# print(train_dataset_batch)
step += 1
images, boxes = parse_dataset_batch(dataset=train_dataset_batch)
image_batch = process_image_batch(images)
label_batch = generate_label_batch(boxes)
with tf.GradientTape() as tape:
out = net(image_batch)
total_loss = loss(y_true=label_batch, y_pred=out)
gradients = tape.gradient(total_loss, net.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables))
metric.updates(values=total_loss)
print("Epoch: {}/{}, step: {}/{} ,loss: {:.5f}".format(
epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result()
))
metric.reset_states()
if epoch % save_frequency == 0:
net.save_weights(filepath=weights_path + "epoch-{}".format(epoch), save_format='tf')
net.save_weights(filepath=weights_path + "epoch-{}".format(train_epochs), save_format='tf')
if __name__ == '__main__':
yolo = BaseModel()
yolo.train()
| [
"tensorflow.math.ceil",
"logging.basicConfig",
"tensorflow.keras.metrics.Mean",
"tensorflow.print",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.config.experimental.set_memory_growth",
"logging.info",
"tensorflow.keras.optimizers.Adam",
"tensorflow.debugging.set_log_device_placement",
"src.yolo4.Loss.YOLO4_LOSS",
"src.yolo4.Net.YOLO4_NET",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.GradientTape",
"tensorflow.data.TextLineDataset"
] | [((454, 635), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'datefmt': '"""%a, %d %b %Y %H:%M:%S"""', 'filename': '"""./yolo4/logs/train.log"""', 'filemode': '"""w+"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s %(message)s', datefmt=\n '%a, %d %b %Y %H:%M:%S', filename='./yolo4/logs/train.log', filemode='w+')\n", (473, 635), False, 'import logging\n'), ((827, 872), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', ([], {'filenames': 'train_path'}), '(filenames=train_path)\n', (850, 872), True, 'import tensorflow as tf\n'), ((1078, 1089), 'src.yolo4.Net.YOLO4_NET', 'YOLO4_NET', ([], {}), '()\n', (1087, 1089), False, 'from src.yolo4.Net import YOLO4_NET\n'), ((1155, 1167), 'src.yolo4.Loss.YOLO4_LOSS', 'YOLO4_LOSS', ([], {}), '()\n', (1165, 1167), False, 'from src.yolo4.Loss import YOLO4_LOSS\n'), ((1246, 1376), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': '(0.001)', 'decay_steps': '(3000)', 'decay_rate': '(0.96)', 'staircase': '(True)'}), '(initial_learning_rate=0.001,\n decay_steps=3000, decay_rate=0.96, staircase=True)\n', (1292, 1376), True, 'import tensorflow as tf\n'), ((1451, 1502), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), '(learning_rate=lr_schedule)\n', (1475, 1502), True, 'import tensorflow as tf\n'), ((1578, 1601), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (1599, 1601), True, 'import tensorflow as tf\n'), ((1671, 1714), 'tensorflow.debugging.set_log_device_placement', 'tf.debugging.set_log_device_placement', (['(True)'], {}), '(True)\n', (1708, 1714), True, 'import tensorflow as tf\n'), ((1754, 1817), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', ([], {'device_type': '"""GPU"""'}), "(device_type='GPU')\n", (1798, 1817), True, 'import tensorflow as tf\n'), ((2238, 2268), 'logging.info', 'logging.info', (['"""use cpu device"""'], {}), "('use cpu device')\n", (2250, 2268), False, 'import logging\n'), ((1855, 1885), 'logging.info', 'logging.info', (['"""use gpu device"""'], {}), "('use gpu device')\n", (1867, 1885), False, 'import logging\n'), ((2156, 2211), 'logging.info', 'logging.info', (['"""not found gpu device,convert to use cpu"""'], {}), "('not found gpu device,convert to use cpu')\n", (2168, 2211), False, 'import logging\n'), ((1965, 2030), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', ([], {'device': 'gpu', 'enable': '(True)'}), '(device=gpu, enable=True)\n', (2005, 2030), True, 'import tensorflow as tf\n'), ((2051, 2064), 'tensorflow.print', 'tf.print', (['gpu'], {}), '(gpu)\n', (2059, 2064), True, 'import tensorflow as tf\n'), ((3441, 3458), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3456, 3458), True, 'import tensorflow as tf\n'), ((3927, 3965), 'tensorflow.math.ceil', 'tf.math.ceil', (['(train_count / batch_size)'], {}), '(train_count / batch_size)\n', (3939, 3965), True, 'import tensorflow as tf\n')] |
import networkx as nx
import sys
from collections import defaultdict
sys.setrecursionlimit(100000000)
def direct_paths(lines):
world = nx.Graph()
for row,line in enumerate(lines):
for col,obj in enumerate(line):
if obj != '.':
continue
if line[col - 1] == '.':
world.add_edge((col, row), (col - 1, row))
if lines[row - 1][col] == '.':
world.add_edge((col, row), (col, row - 1))
return world
def search_portals(lines):
portals = defaultdict(list)
for row,line in enumerate(lines[:-1]):
for col,obj in enumerate(line):
if not obj.isalpha():
continue
if line[col + 1].isalpha():
portals[obj + line[col + 1]].append((col + 2, row) if line[col + 2] == '.' else (col - 1, row))
elif lines[row + 1][col].isalpha():
portals[obj + lines[row + 1][col]].append((col, row - 1) if lines[row - 1][col] == '.' else (col, row + 2))
return portals
def portal_paths(portal_list, world):
for portals in portal_list.values():
if len(portals) == 1:
continue
assert len(portals) == 2
world.add_edge(portals[0], portals[1])
with open('day20.txt') as f:
lines = f.readlines()
width = len(lines[0])
height = len(lines)
W = direct_paths(lines)
portal_connections = search_portals(lines)
portal_paths(portal_connections, W)
path = nx.dijkstra_path(W, portal_connections['AA'][0], portal_connections['ZZ'][0])
print('part one', len(path) - 1)
def is_outer(x, y):
return x == 2 or y == 2 or x == width - 4 or y == height - 3
def accessible_portals(pos, portal_list, world):
acc_outer, acc_inner = {}, {}
for portal_id in portal_list.keys():
if portal_id == 'AA':
continue
for portal_pos in portal_list[portal_id]:
if portal_pos == pos:
continue
try:
dst = nx.dijkstra_path_length(world, pos, portal_pos)
accessible = acc_outer if is_outer(*portal_pos) else acc_inner
assert portal_id not in accessible
accessible[portal_id] = dst, portal_pos
except nx.NetworkXNoPath:
pass
return acc_outer, acc_inner
def get_other_exit(portal_list, portal_id, current_pos):
return [pos for pos in portal_list[portal_id] if pos != current_pos][0]
def pathfind_recursive(pos, level, portal_list, world, history):
print(level)
def search_paths(accessible, dlevel):
paths = []
for pid, dst_pos in accessible.items():
if pid == 'ZZ' or (pid, dst_pos[1], level) in history:
continue
distance_to_goal = pathfind_recursive(get_other_exit(portal_list, pid, dst_pos[1]), level + dlevel, portal_list, world, history.union([(pid, dst_pos[1], level)]))
paths.append(distance_to_goal + dst_pos[0] + 1 if distance_to_goal else None)
paths = [path for path in paths if path]
return min(paths) if paths else None
acc_outer, acc_inner = accessible_portals(pos, portal_list, world)
if level == 0 and 'ZZ' in acc_outer:
return acc_outer['ZZ'][0]
if level != 0 and acc_outer:
outer_found = search_paths(acc_outer, -1)
if outer_found:
return outer_found
return search_paths(acc_inner, 1)
def pathfind_loop(world, portal_list, max_level):
def add_branches(accessible, new_level, current_length):
for pid in [pid for pid in accessible.keys() if pid != 'ZZ']:
current = accessible[pid]
new_length = current_length + 1 + current[0]
new_pos = get_other_exit(portal_list, pid, current[1])
to_check_branch.append((new_pos, new_level, new_length))
to_check_branch = [(portal_list['AA'][0], 0, 0)]
solutions = []
while to_check_branch:
pos, level, path_length = to_check_branch.pop()
acc_outer, acc_inner = accessible_portals(pos, portal_list, world)
if level == 0 and 'ZZ' in acc_outer:
solutions.append(path_length + acc_outer['ZZ'][0])
print(solutions[-1])
elif level >= max_level:
continue
add_branches(acc_inner, level + 1, path_length)
if level > 0 and acc_outer:
add_branches(acc_outer, level - 1, path_length)
return min(solutions) if solutions else None
W = direct_paths(lines)
#result = pathfind_recursive(portal_connections['AA'][0], 0, portal_connections, W, set())
result = pathfind_loop(W, portal_connections, 100)
print('part two', result) | [
"networkx.dijkstra_path_length",
"networkx.dijkstra_path",
"collections.defaultdict",
"networkx.Graph",
"sys.setrecursionlimit"
] | [((70, 102), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(100000000)'], {}), '(100000000)\n', (91, 102), False, 'import sys\n'), ((1461, 1538), 'networkx.dijkstra_path', 'nx.dijkstra_path', (['W', "portal_connections['AA'][0]", "portal_connections['ZZ'][0]"], {}), "(W, portal_connections['AA'][0], portal_connections['ZZ'][0])\n", (1477, 1538), True, 'import networkx as nx\n'), ((141, 151), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (149, 151), True, 'import networkx as nx\n'), ((539, 556), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (550, 556), False, 'from collections import defaultdict\n'), ((1982, 2029), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['world', 'pos', 'portal_pos'], {}), '(world, pos, portal_pos)\n', (2005, 2029), True, 'import networkx as nx\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.