content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
ar = [float(i) for i in input().split()]
ar_sq = []
for i in range(len(ar)):
ar_sq.append(ar[i]**2)
ar_sq = sorted(ar_sq)
print(ar_sq[0], end = ' ')
for i in range(1, len(ar_sq)):
if ar_sq[i] != ar_sq[i-1]:
print(ar_sq[i], end = ' ')
|
python
|
from chroniclr import window
if __name__ == '__main__':
window = window.AppWindow()
|
python
|
import atexit
import sys
import inspect
import json
import time
from .Autocomplete import Autocomplete
from .Connection import Connection, ConnectionError
from .Controller import Controller, ControllerError, parse_key
from .Device import (Device, DeviceError, parse_hex, parse_hsv,
parse_hsv_normalized, parse_rgb, parse_rgb_normalized)
from .Dialog import ask, prompt
from .Sketch import Sketch, SketchError
def no_controller():
if Controller.defined is False:
sketches = []
for name, obj in inspect.getmembers(sys.modules['__main__'], inspect.isclass):
if (obj is not Sketch) and (Sketch in inspect.getmro(obj)):
sketches.append((obj, name))
num_sketches = len(sketches)
if num_sketches == 0:
raise SketchError('No sketches found')
elif num_sketches == 1:
sketch_class, sketch_name = sketches.pop()
with Controller(sketch_class.config_path) as controller:
controller.run_sketch(sketch_class)
else:
raise SketchError(f'Use Controller to run multiple sketches (found {num_sketches})')
atexit.register(no_controller)
|
python
|
# pylint: disable=redefined-outer-name
""" py.test dynamic configuration.
For details needed to understand these tests, refer to:
https://pytest.org/
http://pythontesting.net/start-here/
"""
# Copyright © {{ cookiecutter.year }} {{ cookiecutter.full_name }} <{{ cookiecutter.email }}>
#
# ## LICENSE_SHORT ##
import os
import logging
from pathlib import Path
import pytest
# Globally available fixtures
@pytest.fixture(scope='session')
def logger() -> logging.Logger:
"""Test logger instance as a fixture."""
level = os.getenv('TESTS_LOG_LEVEL', 'DEBUG')
logging.basicConfig(level=getattr(logging, level))
return logging.getLogger('tests')
@pytest.fixture(scope='session')
def tests_dir() -> Path:
"""Directory where tests + data is located."""
return Path(__file__).parent
@pytest.fixture(scope='session')
def project_dir(tests_dir) -> Path:
""" Root directory of the project.
"""
return tests_dir.parent.parent
@pytest.fixture(scope='session')
def build_dir(project_dir) -> Path:
"""Build directory for dynamic data (created if missing)."""
result = project_dir / "build"
result.mkdir(exist_ok=True)
return result
|
python
|
import subprocess
import codecs
goal_goals = []
#txt_dirs = ['ted_originals/', 'ted_transcripts/']
#txt_dirs = ['orig_lower/']
txt_dirs = ['trans_preprocessed/', 'orig_preprocessed/']
for txt_dir in txt_dirs:
with codecs.open('goal_goals.txt') as goal_goals_in:
for line in goal_goals_in:
if line[-1] == '\n':
line = line[:-1]
goal_goals.append((line.split(' ')[0],int(line.split(' ')[1])))
for myfile,number in goal_goals:
with codecs.open('TestMaryamTexts.m','r','utf-8') as mat_in:
mat = mat_in.read()
mat = mat.replace('<filename_placeholder>',txt_dir + myfile.split("/")[-1])
mat = mat.replace('<number_placeholder>', '10') #str(number))
with codecs.open('TestMaryamTexts2.m','w','utf-8') as mat_out:
mat_out.write(mat)
subprocess.call("touch W/2", shell=True)
subprocess.call("touch W/1", shell=True)
subprocess.call("matlab -r \"try;run('TestMaryamTexts2.m');catch;end;quit;\"", shell=True)
subprocess.call("mv W/1 " + txt_dir + "habibi75_1/" + myfile.split("/")[-1], shell=True)
subprocess.call("mv W/2 " + txt_dir + "habibi75_2/" + myfile.split("/")[-1], shell=True)
|
python
|
import unittest
import os
NOT_FATAL = 0
iverilog = "iverilog -y./tb -y./main_unit/ -o ./tb/main_unit__adc_capture__tb.vvp ./tb/main_unit__adc_capture__tb.v"
vvp = "vvp ./tb/main_unit__adc_capture__tb.vvp"
class adc_capture(unittest.TestCase):
def test_adc_capture(self):
self.assertEqual(os.system(iverilog), NOT_FATAL)
self.assertEqual(os.system(vvp), NOT_FATAL)
if __name__ == '__main__':
unittest.main()
|
python
|
import random
import math
import sys
import copy
# import os
class Bracket:
def __init__(self, teams):
self.numTeams = 1
# self.numTeams = len(teams)
self.teams = list(teams)
self.maxScore = len(max(["Round "]+teams, key=len))
self.numRounds = int(math.ceil(math.log(self.numTeams, 2)) + 1)
self.totalNumTeams = int(2**math.ceil(math.log(self.numTeams, 2)))
self.totalTeams = self.addTeams()
self.lineup = ["bye" if "-" in str(x) else x for x in self.totalTeams]
self.numToName()
self.count = 0
self.rounds = []
for i in range(0, self.numRounds):
self.rounds.append([])
for _ in range(0, 2**(self.numRounds-i-1)):
self.rounds[i].append("-"*self.maxScore)
self.rounds[0] = list(self.totalTeams)
def numToName(self):
for i in range(0, self.numTeams):
self.totalTeams[self.totalTeams.index(i+1)] = self.teams[i]
def shuffle(self):
random.shuffle(self.teams)
self.totalTeams = self.addTeams()
self.numToName()
self.rounds[0] = list(self.totalTeams)
def update(self, rounds, teams):
lowercase = [team.lower() for team in self.rounds[rounds-2]]
for team in teams:
try:
index = lowercase.index(team.lower())
self.rounds[rounds-1][int(index/2)] = self.rounds[rounds-2][index]
except:
return False
if "-"*self.maxScore in self.rounds[rounds-1]:
return False
return True
def show(self):
self.count = 0
self.temp = copy.deepcopy(self.rounds)
self.tempLineup = list(self.lineup)
sys.stdout.write("Seed ")
for i in range(1, self.numRounds+1):
sys.stdout.write(("Round "+str(i)).rjust(self.maxScore+3))
print ""
self.recurse(self.numRounds-1, 0)
def recurse(self, num, tail):
if num == 0:
self.count += 1
if tail == -1:
print str(self.tempLineup.pop(0)).rjust(4)+self.temp[0].pop(0).rjust(self.maxScore+3)+" \\"
elif tail == 1:
print str(self.tempLineup.pop(0)).rjust(4)+self.temp[0].pop(0).rjust(self.maxScore+3)+" /"
else:
self.recurse(num-1, -1)
if tail == -1:
print "".rjust(4)+"".rjust((
self.maxScore+3)*num)+self.temp[num].pop(0).rjust(self.maxScore+3)+" \\"
elif tail == 1:
print "".rjust(4)+"".rjust((self.maxScore+3)*num)+self.temp[num].pop(0).rjust(self.maxScore+3)+" /"
else:
print "".rjust(4)+"".rjust((self.maxScore+3)*num)+self.temp[num].pop(0).rjust(self.maxScore+3)
self.recurse(num-1, 1)
def addTeams(self):
x = self.numTeams
teams = [1]
temp = []
count = 0
for i in range(2, x+1):
temp.append(i)
for i in range(0, int(2**math.ceil(math.log(x, 2))-x)):
temp.append("-"*self.maxScore)
for _ in range(0, int(math.ceil(math.log(x, 2)))):
high = max(teams)
for i in range(0, len(teams)):
index = teams.index(high)+1
teams.insert(index, temp[count])
high -= 1
count += 1
return teams
def getNumTeams():
print "How many players?",
numTeams = 2
try:
x = int(numTeams)
if x > 1:
return x
else:
print "Must be at least two players"
return getNumTeams()
except:
return getNumTeams()
def getTeamNames(numTeams):
teams = []
for i in range(0, numTeams):
correct = False
"""while not correct:
print "Name of player "+str(i+1)+"?",
teams.append(name)
correct = True"""
return teams
def run():
numTeams = getNumTeams()
teams = getTeamNames(numTeams)
bracket = Bracket(teams)
bracket.shuffle()
bracket.show()
for i in range(2, bracket.numRounds+1):
updated = False
while not updated:
teams = []
updated = bracket.update(i, teams)
bracket.show()
print ""
print bracket.rounds[-1][0]+" won!"
|
python
|
import re
import numpy
import os
import pdb
### INPUT FILES
rd_folder = "../raw_data/"
traits_file = rd_folder+"data_dental_master.csv"
bio_file_all = rd_folder+"data_sites_IUCN_narrowA.csv"
occurence_file = rd_folder+"occurence_IUCN_%s.csv"
bio_legend_file = rd_folder+"bio_legend.txt"
### OUTPUT FILES
pr_folder = "../prepared_data/"
agg_file = pr_folder+"IUCN_%s_agg.csv"
aggrnd_file = pr_folder+"IUCN_%s_agg_rounded%d.csv"
bio_file = pr_folder+"IUCN_%s_bio.csv"
stats_file = "../misc/IUCN_ordfam_stats.tex"
### PARAMETERS
continents = ["EU", "AF", "NA", "SA"]
keep_traits = ["HYP","FCT_HOD","FCT_AL","FCT_OL","FCT_SF","FCT_OT","FCT_CM"]
bool_traits = ["HYP:1", "HYP:2","HYP:3","FCT_HOD:1","FCT_HOD:2","FCT_HOD:3","FCT_AL","FCT_OL","FCT_SF","FCT_OT","FCT_CM"]
keep_ordfam = ["FAMILY", "ORDER"]
key_species = "TAXON"
NA_val = "NA"
files_thres_out = [{"ext": "_nbspc3+", "thres_type": "num", "thres_side": 0, "thres_col": "NB_SPC", "thres_min": 3}]
round_dgt = 3
### FUNCTIONS
def load_legend_bio(bio_legend_file):
leg = {}
with open(bio_legend_file) as fp:
for line in fp:
parts = line.strip().split("=")
leg[parts[0].strip()] = parts[0].strip()+":"+parts[1].strip()
return leg
def load_lines_bio(bio_file, remove_vars, key_var, trans_vars={}):
lines_bio = {}
key_col = None
sep = ","
with open(bio_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if key_col is None:
key_col = parts.index(key_var)
keep_cols = [key_col]+[k for (k,v) in enumerate(parts) if v not in remove_vars+[key_var]]
lines_bio[None] = sep.join([trans_vars.get(parts[k], parts[k]) for k in keep_cols])+"\n"
else:
lines_bio[parts[key_col]] = sep.join([parts[k] for k in keep_cols])+"\n"
return lines_bio
def load_traits(traits_file, keep_traits, bool_traits, key_species):
data_traits = {}
head_traits = None
sep = "\t"
with open(traits_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_traits is None:
head_traits = dict([(v,k) for (k,v) in enumerate(parts)])
else:
if True:
values = []
for kv in bool_traits:
tmp = re.match("(?P<trait>.*):(?P<val>[0-9]+)$", kv)
if tmp is not None:
if parts[head_traits[tmp.group("trait")]] == NA_val:
print parts[head_traits[key_species]], kv, "MISSING"
values.append(0)
else:
values.append(1*(parts[head_traits[tmp.group("trait")]] == tmp.group("val")))
else:
if parts[head_traits[kv]] == NA_val:
print parts[head_traits[key_species]], kv, "MISSING"
values.append(0)
else:
values.append(int(parts[head_traits[kv]]))
data_traits[parts[head_traits[key_species]]] = values
# except ValueError:
# print parts[head_traits[key_species]], "MISSING"
return data_traits, head_traits
def aggregate_traits(occurence_file, agg_file, data_traits, head_traits, bool_traits, lines_bio=None, bio_file=None):
data_occurence = {}
head_occurence = None
sep = ","
if bio_file is not None and lines_bio is not None:
flb = open(bio_file, "w")
else:
flb = None
fo = open(agg_file, "w")
with open(occurence_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_occurence is None:
head_occurence = dict([(k,v) for (k,v) in enumerate(parts)])
fo.write(",".join(["ID"]+["MEAN_%s" % t for t in bool_traits]+["NB_SPC"])+"\n")
if flb is not None:
flb.write(lines_bio[None])
elif lines_bio is None or parts[0] in lines_bio:
try:
present = [head_occurence[i] for (i,v) in enumerate(parts) if v =="1"]
except ValueError:
print line
pdb.set_trace()
data_mat = numpy.array([data_traits[p] for p in present])
if data_mat.shape[0] == 0:
fo.write(",".join([parts[0]]+["0" for t in bool_traits]+["0"])+"\n")
else:
fo.write(",".join([parts[0]]+["%f" % t for t in data_mat.mean(axis=0)]+["%d" % data_mat.shape[0]])+"\n")
if flb is not None:
flb.write(lines_bio[parts[0]])
if flb is not None:
flb.close()
fo.close()
def filter_nbspc(files_in, files_thres_out):
fps = [open(file_in) for file_in in files_in]
heads = []
head_lines = []
for fp in fps:
head_lines.append(fp.readline())
heads.append(dict([(v,k) for (k,v) in enumerate(head_lines[-1].strip().split(","))]))
checked = []
for ooo in files_thres_out:
out = dict(ooo)
if out["thres_side"] >= 0 and out["thres_side"] < len(heads) and out["thres_col"] in heads[out["thres_side"]]:
out["colid"] = heads[out["thres_side"]][out["thres_col"]]
out["fps"] = []
out["fns"] = []
### EXCLUDE FILTER COLUMN OR NOT
excl = None # out["colid"]
for file_in in files_in:
parts = file_in.split("_")
parts[-2]+= out["ext"]
fname = "_".join(parts)
out["fps"].append(open(fname, "w"))
out["fns"].append(fname)
for li, l in enumerate(head_lines):
if li == out["thres_side"]:
out["fps"][li].write(",".join([p for (pi, p) in \
enumerate(l.strip().split(",")) if pi != excl])+"\n")
else:
# out["fps"][li].write(l)
# pdb.set_trace()
## out["fps"][li].write(l.strip('\n') + ",bioA:abs_bio13-bio14,bioB:bio4_corr"+"\n")
out["fps"][li].write(l.strip('\n') + ",bioA:abs_bio13-bio14"+"\n")
out["count_lines"] = 0
checked.append(out)
stop = False
while not stop:
lines = [fp.readline() for fp in fps]
if numpy.prod([len(line) for line in lines]) == 0:
stop = True
else:
for out in checked:
inclus = False
if out["thres_type"] == "num":
v = float(lines[out["thres_side"]].split(",")[out["colid"]])
inclus = ("thres_min" not in out or v >= out["thres_min"]) and \
("thres_max" not in out or v <= out["thres_max"])
elif out["thres_type"] == "cat":
v = lines[out["thres_side"]].split(",")[heads[out["thres_side"]][out["thres_col"]]]
inclus = (v == out["thres_val"])
if inclus:
for li, l in enumerate(lines):
if li == out["thres_side"]:
out["fps"][li].write(",".join([p for (pi, p) in \
enumerate(l.strip().split(",")) if pi != excl])+"\n")
else:
# out["fps"][li].write(l)
# pdb.set_trace()
parts = l.strip().split(",")
valA = abs(float(parts[heads[li]['bio13:PWetM']]) - float(parts[heads[li]['bio14:PDryM']]))
# valB = float(parts[heads[li]['bio4:TSeason']])
# if valA < 232 and float(parts[heads[li]['bio7:TRngY']]) > 30:
# valB *= 10
# out["fps"][li].write(l.strip('\n') + (",%d,%d" % (valA, valB)) +"\n")
out["fps"][li].write(l.strip('\n') + (",%f" % valA) +"\n")
out["count_lines"] += 1
for out in checked:
for fp in out["fps"]:
fp.close()
if out["count_lines"] == 0:
for fn in out["fns"]:
os.remove(fn)
print "EMPTY %s removed..." % (", ".join(out["fns"]))
for fp in fps:
fp.close()
return checked
def round_values(in_file, out_file, round_dgt):
### first check that no information will be lost
with open(in_file) as fp:
head = fp.readline().strip().split(",")
cols = [i for i,p in enumerate(head) if re.match("MEAN_", p)]
D = numpy.loadtxt(in_file, delimiter=",", skiprows=2, usecols=cols)
print ">>> CHECK FOR LOSS OF INFO (all values should be True)"
print [numpy.unique(numpy.around(numpy.unique(D[:,i]), round_dgt)).shape[0] == numpy.unique(D[:,i]).shape[0] for i in range(D.shape[1])]
### then round values
fo = open(out_file, "w")
head = None
fmt = "%."+str(round_dgt)+"f"
with open(in_file) as fp:
for line in fp:
parts = line.strip().split(",")
if head is None:
head = parts
fo.write(line)
elif parts[0] == "enabled_col":
fo.write(line)
else:
for i,p in enumerate(head):
if re.match("MEAN_", p):
parts[i] = fmt % numpy.around(float(parts[i]), round_dgt)
fo.write(",".join(parts)+"\n")
def collect_all(files_in, tk, continents, suffixes, round_dgt):
for suffix in suffixes:
for ffi, file_in in enumerate(files_in):
parts = file_in.split("_")
parts[-2]+= suffix
fname = "_".join(parts)
fpo = open(fname % tk, "w")
if ffi == 0:
fname_splits = fname % (tk+"-splits")
fpos = open(fname_splits, "w")
head = False
for continent in continents:
if os.path.exists(fname % continent):
with open(fname % continent) as fp:
for li, line in enumerate(fp):
if li == 0:
if not head:
if ffi == 0:
fpos.write(line.strip()+",folds_split_C\n")
fpos.write(",".join(["enabled_col"]+ \
["T" for i in range(len(line.strip().split(","))-2)]+["F,F\n"]))
fpo.write(line)
if ffi == 0:
fpo.write(",".join(["enabled_col"]+ \
["T" for i in range(len(line.strip().split(","))-1)])+"\n")
else:
fpo.write(",".join(["enabled_col"]+ \
["T" for i in range(len(line.strip().split(","))-2)])+",F\n")
head = True
else:
if ffi == 0:
fpos.write(line.strip()+",F:%s\n" % continent)
fpo.write(line)
if ffi == 0:
fpos.close()
fpo.close()
parts = fname_splits.split(".")
fname_rnd = ".".join(parts[:-1])+("_rounded%d." % round_dgt) + parts[-1]
round_values(fname_splits, fname_rnd, round_dgt)
def load_ordfam(traits_file, keep_ordfam, key_species):
data_ordfam = {}
head_ordfam = None
sep = "\t"
with open(traits_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_ordfam is None:
head_ordfam = dict([(v,k) for (k,v) in enumerate(parts)])
else:
if True:
values = []
for kv in keep_ordfam:
if kv == "FAMILY":
for f,t in [("Galagonidae", "Galagidae"),
("Loridae", "Lorisidae"),
("Rhinoceratidae", "Rhinocerotidae")]:
if parts[head_ordfam[kv]] == f:
parts[head_ordfam[kv]] = t
if parts[head_ordfam[kv]] == NA_val:
print parts[head_ordfam[key_species]], kv, "MISSING"
values.append(0)
else:
values.append(parts[head_ordfam[kv]])
data_ordfam[parts[head_ordfam[key_species]]] = values
# except ValueError:
# print parts[head_ordfam[key_species]], "MISSING"
return data_ordfam, head_ordfam
def aggregate_counts(occurence_file, data_ordfam, head_ordfam, keep_ordfam, lines_bio=None):
data_occurence = {}
head_occurence = None
sep = ","
counts = {}
# keep_ordfam = [ss.replace("_Kari", "") for ss in keep_ordfam]
for ck in [0,3]:
counts[ck] = dict([("SITES",0)]+[(kk, {}) for kk in keep_ordfam])
with open(occurence_file) as fp:
for line in fp:
parts = line.strip().split(sep)
if head_occurence is None:
head_occurence = dict([(k,v) for (k,v) in enumerate(parts)])
elif lines_bio is None or parts[0] in lines_bio:
try:
present = [head_occurence[i] for (i,v) in enumerate(parts) if v =="1"]
except ValueError:
print line
pdb.set_trace()
nb_spc = len(present)
data_mat = [data_ordfam[p] for p in present]
for ck in counts.keys():
if nb_spc >= ck:
counts[ck]["SITES"] += 1
for i, cs in enumerate(map(set, zip(*data_mat))):
for ck in counts.keys():
if nb_spc >= ck:
for cc in cs:
counts[ck][keep_ordfam[i]][cc] = counts[ck][keep_ordfam[i]].get(cc, 0) + 1
return counts
def make_counts_table(data_ordfam, continents, counts_all):
pairs = sorted(set([(ka, kb) for (kb,ka) in data_ordfam.values()]))
table = """\\begin{table}[h]
\\caption{Number of sites from each continent containing taxa from the given order or family, after/before filtering out sites with fewer than three taxa.}\\label{fig:spc_counts}
\\vspace{2ex} \\centering
\\begin{tabular}{@{\\hspace*{3ex}}l@{\\hspace*{2ex}}ccr@{~/~}rc@{\\hspace*{2ex}}cr@{~/~}rc@{\\hspace*{2ex}}cr@{~/~}rc@{\\hspace*{2ex}}cr@{~/~}rc@{\\hspace*{3ex}}} \n\\toprule\n"""
table += " & & "+ "&".join(["\\multicolumn{4}{c}{\\textsc{%s}}" % c for c in continents]) +" \\\\\n\\midrule\n"
table += " & & & ".join(["Nb.\ sites" ] + ["%d & %d" % tuple([counts_all[continent][ck]["SITES"] for ck in [3,0]]) for continent in continents])+" & \\\\\n"
for pi, pair in enumerate(pairs):
if pi == 0 or pairs[pi-1][0] != pair[0]:
table += "[0.5em]\n"+" & & & ".join(["\\textbf{\\textit{%s}}" % pair[0]] + ["%d & %d" % tuple([counts_all[continent][ck]["ORDER"].get(pair[0], 0) for ck in [3,0]]) for continent in continents])+" & \\\\\n"
table += " & & & ".join(["\\textit{%s}" % pair[1]] + ["%d & %d" % tuple([counts_all[continent][ck]["FAMILY"].get(pair[1], 0) for ck in [3,0]]) for continent in continents])+" & \\\\\n"
table += """\\bottomrule\n\\end{tabular}\n\\end{table}"""
return table
### MAIN
data_traits, head_traits = load_traits(traits_file, keep_traits, bool_traits, key_species)
bio_leg = load_legend_bio(bio_legend_file)
bio_leg.update({"lon_bio":"longitude","lat_bio":"latitude", "SITE": "ID"})
lines_bio = load_lines_bio(bio_file_all, ["CONT","NO_SPECIES","NO_ORDERS","NO_FAMILIES","GlobalID"], "SITE", bio_leg)
for continent in continents:
aggregate_traits(occurence_file % continent, agg_file % continent, data_traits, head_traits, bool_traits, lines_bio, bio_file % continent)
filter_nbspc([agg_file % continent, bio_file % continent], files_thres_out)
collect_all([agg_file, bio_file], "all", continents, suffixes=[fto["ext"] for fto in files_thres_out], round_dgt=round_dgt)
############# COMPUTING COUNTS
data_ordfam, head_ordfam = load_ordfam(traits_file, keep_ordfam, key_species)
counts_all = {}
for continent in continents:
counts_all[continent] = aggregate_counts(occurence_file % continent, data_ordfam, head_ordfam, keep_ordfam, lines_bio)
table = make_counts_table(data_ordfam, continents, counts_all)
with open(stats_file, "w") as fo:
fo.write(table)
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku migrations management.
Migrations files are put in renku/core/management/migrations directory. Name
of these files has m_1234__name.py format where 1234 is the migration version
and name can be any alphanumeric and underscore combination. Migration files
are sorted based on their lowercase name. Each migration file must define a
public "migrate" function that accepts a client as its argument.
When executing a migration, the migration file is imported as a module and the
"migrate" function is executed. Migration version is checked against the Renku
project version (in .renku/metadata.yml) and any migration which has a higher
version is applied to the project.
"""
import hashlib
import importlib
import json
import os
import re
import shutil
from pathlib import Path
import pkg_resources
from jinja2 import Template
from renku.core.errors import (
DockerfileUpdateError,
MigrationError,
MigrationRequired,
ProjectNotSupported,
TemplateUpdateError,
)
from renku.core.utils.migrate import read_project_version
SUPPORTED_PROJECT_VERSION = 8
def check_for_migration(client):
"""Checks if migration is required."""
if is_migration_required(client):
raise MigrationRequired
elif is_project_unsupported(client):
raise ProjectNotSupported
def is_migration_required(client):
"""Check if project requires migration."""
return is_renku_project(client) and _get_project_version(client) < SUPPORTED_PROJECT_VERSION
def is_project_unsupported(client):
"""Check if this version of Renku cannot work with the project."""
return is_renku_project(client) and _get_project_version(client) > SUPPORTED_PROJECT_VERSION
def is_template_update_possible(client):
"""Check if the project can be updated to a newer version of the project template."""
return _update_template(client, check_only=True)
def is_docker_update_possible(client):
"""Check if the Dockerfile can be updated to a new version of renku-python."""
return _update_dockerfile(client, check_only=True)
def migrate(
client,
force_template_update=False,
skip_template_update=False,
skip_docker_update=False,
skip_migrations=False,
progress_callback=None,
):
"""Apply all migration files to the project."""
template_updated = docker_updated = False
if not is_renku_project(client):
return False, template_updated, docker_updated
if (
not skip_template_update
and client.project.template_source
and (force_template_update or client.project.automated_update)
):
try:
template_updated, _, _ = _update_template(client, progress_callback)
except TemplateUpdateError:
raise
except (Exception, BaseException) as e:
raise TemplateUpdateError("Couldn't update from template.") from e
if not skip_docker_update:
try:
docker_updated = _update_dockerfile(client, progress_callback)
except DockerfileUpdateError:
raise
except (Exception, BaseException) as e:
raise DockerfileUpdateError("Couldn't update renku version in Dockerfile.") from e
if skip_migrations:
return False, template_updated, docker_updated
project_version = _get_project_version(client)
n_migrations_executed = 0
for version, path in get_migrations():
if version > project_version:
module = importlib.import_module(path)
if progress_callback:
module_name = module.__name__.split(".")[-1]
progress_callback(f"Applying migration {module_name}...")
try:
module.migrate(client)
except (Exception, BaseException) as e:
raise MigrationError("Couldn't execute migration") from e
n_migrations_executed += 1
if n_migrations_executed > 0:
client._project = None # NOTE: force reloading of project metadata
client.project.version = str(version)
client.project.to_yaml()
if progress_callback:
progress_callback(f"Successfully applied {n_migrations_executed} migrations.")
return n_migrations_executed != 0, template_updated, docker_updated
def _update_template(client, check_only=False, progress_callback=None):
"""Update local files from the remote template."""
from renku.core.commands.init import fetch_template
project = client.project
if not project.template_version:
return False, None, None
template_manifest, template_folder, template_source, template_version = fetch_template(
project.template_source, project.template_ref, progress_callback
)
current_version = None
if template_source == "renku":
current_version = pkg_resources.parse_version(template_version)
template_version = pkg_resources.parse_version(project.template_version)
if template_version >= current_version:
return False, project.template_version, current_version
else:
if template_version == project.template_version:
return False, project.template_version, template_version
if check_only:
return True, project.template_version, current_version if current_version else template_version
if progress_callback:
progress_callback("Updating project from template...")
template_filtered = [
template_elem for template_elem in template_manifest if template_elem["folder"] == project.template_id
]
if len(template_filtered) == 1:
template_data = template_filtered[0]
else:
raise TemplateUpdateError(f'The template with id "{project.template_id}" is not available.')
template_path = template_folder / template_data["folder"]
metadata = json.loads(project.template_metadata)
template_variables = set(template_data.get("variables", {}).keys())
metadata_keys = set(metadata.keys())
missing_keys = ", ".join(template_variables - metadata_keys)
if missing_keys:
raise TemplateUpdateError(
f"Can't update template, it now requires variable(s) {missing_keys} which were not present on project "
"creation."
)
if not os.path.exists(client.template_checksums):
raise TemplateUpdateError("Can't update template as there are no template checksums set on the project.")
with open(client.template_checksums, "r") as checksum_file:
checksums = json.load(checksum_file)
updated_files = []
for file in template_path.glob("**/*"):
rel_path = file.relative_to(template_path)
destination = client.path / rel_path
try:
# parse file and process it
template = Template(file.read_text())
rendered_content = template.render(metadata)
sha256_hash = hashlib.sha256()
content_bytes = rendered_content.encode("utf-8")
blocksize = 4096
blocks = (len(content_bytes) - 1) // blocksize + 1
for i in range(blocks):
byte_block = content_bytes[i * blocksize : (i + 1) * blocksize]
sha256_hash.update(byte_block)
new_template_hash = sha256_hash.hexdigest()
current_hash = client._content_hash(destination)
local_changes = current_hash != checksums[str(rel_path)]
remote_changes = new_template_hash != checksums[str(rel_path)]
if local_changes:
if remote_changes and str(rel_path) in project.immutable_template_files:
# NOTE: There are local changes in a file that should not be changed by users,
# and the file as updated in the template as well. So the template can't be updated.
raise TemplateUpdateError(
f"Can't update template as immutable template file {rel_path} has local changes."
)
continue
elif not remote_changes:
continue
destination = Path(Template(str(destination)).render(metadata))
destination.write_text(rendered_content)
except IsADirectoryError:
destination.mkdir(parents=True, exist_ok=True)
except TypeError:
shutil.copy(file, destination)
if progress_callback:
updated = "\n".join(updated_files)
progress_callback(f"Updated project from template, updated files:\n{updated}")
return True, project.template_version, current_version if current_version else template_version
def _update_dockerfile(client, check_only=False, progress_callback=None):
"""Update the dockerfile to the newest version of renku."""
from renku import __version__
if not client.docker_path.exists():
return False
if progress_callback:
progress_callback("Updating dockerfile...")
with open(client.docker_path, "r") as f:
dockercontent = f.read()
current_version = pkg_resources.parse_version(__version__)
m = re.search(r"^ARG RENKU_VERSION=(\d+\.\d+\.\d+)$", dockercontent, flags=re.MULTILINE)
if not m:
if check_only:
return False
raise DockerfileUpdateError(
"Couldn't update renku-python version in Dockerfile, as it doesn't contain an 'ARG RENKU_VERSION=...' line."
)
docker_version = pkg_resources.parse_version(m.group(1))
if docker_version >= current_version:
return False
if check_only:
return True
dockercontent = re.sub(
r"^ARG RENKU_VERSION=\d+\.\d+\.\d+$", f"ARG RENKU_VERSION={__version__}", dockercontent, flags=re.MULTILINE,
)
with open(client.docker_path, "w") as f:
f.write(dockercontent)
if progress_callback:
progress_callback("Updated dockerfile.")
return True
def _get_project_version(client):
try:
return int(read_project_version(client))
except ValueError:
return 1
def is_renku_project(client):
"""Check if repository is a renku project."""
try:
return client.project is not None
except ValueError: # Error in loading due to an older schema
return client.renku_metadata_path.exists()
def get_migrations():
"""Return a sorted list of versions and migration modules."""
migrations = []
for file_ in pkg_resources.resource_listdir("renku.core.management", "migrations"):
match = re.search(r"m_([0-9]{4})__[a-zA-Z0-9_-]*.py", file_)
if match is None: # migration files match m_0000__[name].py format
continue
version = int(match.groups()[0])
path = "renku.core.management.migrations.{}".format(Path(file_).stem)
migrations.append((version, path))
migrations = sorted(migrations, key=lambda v: v[1].lower())
return migrations
|
python
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
from torchvision.models import resnet
class BasicBlock1d(nn.Module):
def __init__(self, inplanes, planes, stride, size,downsample):
super(BasicBlock1d, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=size, stride=stride, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d( planes, planes, kernel_size=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = nn.Sequential(
nn.Conv1d(inplanes, planes ,kernel_size=size, stride=stride, bias=False),
nn.BatchNorm1d(planes))
self.dropout = nn.Dropout(.2)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool =nn.AdaptiveAvgPool1d(1)
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
def forward(self, x):
x=x.squeeze(2)
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.bn2(out)
out = self.conv2(out)
#Squeeze-and-Excitation (SE)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1),1)
out = out * original_out
#resnet
out += residual
out = self.relu(out)
return out
class BasicBlock2d(nn.Module):
def __init__(self, inplanes, planes, stride, size,downsample):
super(BasicBlock2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=(1,size), stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=(1,1), stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes ,kernel_size=(1,size), stride=stride, bias=False),
nn.BatchNorm2d(planes))
self.dropout = nn.Dropout(.2)
self.sigmoid = nn.Sigmoid()
self.globalAvgPool = nn.AdaptiveAvgPool2d((1,1))
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
def forward(self, x):
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.bn2(out)
out = self.conv2(out)
#Squeeze-and-Excitation (SE)
original_out=out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1),1,1)
out = out * original_out
#resnet
out += residual
out = self.relu(out)
return out
class ECGNet(nn.Module):
def __init__(self, BasicBlock1d,BasicBlock2d, num_classes=9):
super(ECGNet, self).__init__()
self.sizes=[5,7,9]
self.external = 3
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(12,32, kernel_size=(1,50), stride=(1,2),padding=(0,0),bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.AvgPool = nn.AdaptiveAvgPool1d(1)
self.layers=nn.Sequential()
self.layers.add_module('layer_1',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers.add_module('layer_2',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers.add_module('layer_3',self._make_layer( BasicBlock2d,inplanes=32,planes=32,blocks=1,stride=(1,2),size=15))
self.layers1_list=nn.ModuleList()
self.layers2_list=nn.ModuleList()
for size in self.sizes:
self.layers1=nn.Sequential()
self.layers1.add_module('layer{}_1_1'.format(size),self._make_layer( BasicBlock2d,inplanes=32, planes=32,blocks=32,
stride=(1,1),size=size))
self.layers2=nn.Sequential()
self.layers2.add_module('layer{}_2_1'.format(size),self._make_layer(BasicBlock1d,inplanes=32, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_2'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_3'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers2.add_module('layer{}_2_4'.format(size),self._make_layer(BasicBlock1d,inplanes=256, planes=256,blocks=1,
stride=2,size=size))
self.layers1_list.append(self.layers1)
self.layers2_list.append(self.layers2)
self.fc = nn.Linear(256*len(self.sizes)+self.external, num_classes)
def _make_layer(self, block,inplanes, planes, blocks, stride ,size,downsample = None):
layers = []
for i in range(blocks):
layers.append(block(inplanes, planes, stride, size,downsample))
return nn.Sequential(*layers)
def forward(self, x0, fr):
x0=x0.unsqueeze(2)
x0 = self.conv1(x0)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x0 = self.layers(x0)
xs=[]
for i in range(len(self.sizes)):
x=self.layers1_list[i](x0)
x=torch.flatten(x,start_dim=2,end_dim=3)
x=self.layers2_list[i](x0)
x= self.AvgPool(x)
xs.append(x)
out = torch.cat(xs,dim=2)
out = out.view(out.size(0), -1)
out = torch.cat([out,fr], dim=1)
out = self.fc(out)
return out
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ECGNet(BasicBlock1d,BasicBlock2d,**kwargs)
return model
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sqlalchemy import MetaData
from py_privatekonomi.utilities import common
class ModelContext(object):
def __init__(self, context = {}):
self.__context = common.as_obj(context)
self.__context._metadata = MetaData()
@property
def metadata(self):
return self.__context._metadata
|
python
|
from setuptools import setup
setup(
name = 'json2html',
packages = ['json2html'],
version = '1.3.0',
description = 'JSON to HTML Table Representation',
long_description=open('README.rst').read(),
author = 'Varun Malhotra',
author_email = '[email protected]',
url = 'https://github.com/softvar/json2html',
download_url = 'https://github.com/softvar/json2html/tarball/1.3.0',
keywords = ['json', 'HTML', 'Table'],
license = 'MIT',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
)
|
python
|
class Solution:
# @param {integer} A
# @param {integer} B
# @param {integer} C
# @param {integer} D
# @param {integer} E
# @param {integer} F
# @param {integer} G
# @param {integer} H
# @return {integer}
def computeArea(self, A, B, C, D, E, F, G, H):
def area(x, y):
return 0 if x < 0 or y < 0 else x * y
return area(C-A, D-B) + area(G-E, H-F) - \
area(min(C, G) - max(A, E), min(D, H) - max(B, F))
|
python
|
def roll_new(name, gender):
pass
def describe(character):
pass
|
python
|
"""Integrate with NamecheapDNS."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_ACCESS_TOKEN, CONF_DOMAIN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.aiohttp_client import async_get_clientsession
DOMAIN = 'namecheapdns'
UPDATE_URL = 'https://dynamicdns.park-your-domain.com/update'
INTERVAL = timedelta(minutes=5)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the NamecheapDNS component."""
host = config[DOMAIN][CONF_HOST]
domain = config[DOMAIN][CONF_DOMAIN]
token = config[DOMAIN][CONF_ACCESS_TOKEN]
session = async_get_clientsession(hass)
result = yield from _update_namecheapdns(session, host, domain, token)
if not result:
return False
@asyncio.coroutine
def update_domain_interval(now):
"""Update the NamecheapDNS entry."""
yield from _update_namecheapdns(session, host, domain, token)
async_track_time_interval(hass, update_domain_interval, INTERVAL)
return result
@asyncio.coroutine
def _update_namecheapdns(session, host, domain, token):
"""Update NamecheapDNS."""
import xml.etree.ElementTree as ET
params = {
'host': host,
'domain': domain,
'password': token,
}
resp = yield from session.get(UPDATE_URL, params=params)
xml_string = yield from resp.text()
root = ET.fromstring(xml_string)
err_count = root.find('ErrCount').text
if int(err_count) != 0:
_LOGGER.warning('Updating Namecheap domain %s failed', domain)
return False
return True
|
python
|
celsius = float(input('Insira a temperatura em °C:'))
fahrenheit = celsius * 1.8 + 32
kelvin = celsius + 273.15
print(f'{celsius}°C vale {fahrenheit}°F e {kelvin}K.')
|
python
|
"""
A Python package module for simulating falling objects with simple aerodynamic drag.
Developed by FIRST Robotics Competition Team 6343 - Steel Ridge Robotics
Strong
Trustworthy
Empowering
Effective
Leadership
"""
__VERSION__ = "1.0.0b1"
|
python
|
"""
pluginName = dashcam
Senario Continuous Video Series like a Dashcam
----------------------------------------------
You want to take a series of videos like a dash cam.
You can manage disk space and delete oldest videos when disk
is close to full or run video session for a set number of minutes.
Edit the settings below to suit your project needs.
if config.py variable pluginEnable=True and pluginName=dashcam
then these settings will override the config.py settings.
"""
# Customize Settings Below to Suit your Project Needs
# ---------------------------------------------------
imageWidth = 1280 # default= 1280 Full Size video Width in px
imageHeight = 720 # default= 720 Full Size video Height in px
imageVFlip = False # default= False True Flips image Vertically
imageHFlip = False # default= False True Flips image Horizontally
showDateOnImage = True # default= True False=Do Not display date/time text on images
videoPath = "media/dashcam" # default= media/dashcam Storage folder path for videos
videoPrefix = "dc-" # prefix for dasbca video filenames
videoDuration = 120 # default= 120 seconds (2 min) for each video recording
videoTimer = 0 # default= 0 0=Continuous or Set Total Session Minutes to Record then Exit
videoFPS = 30 # default= 30 fps. Note slow motion can be achieved at 640x480 image resolution at 90 fps
videoNumOn = False # default= False False=filenames by date/time True=filenames by sequence Number
# Use settings below if motionNumOn = True
videoNumRecycle = False # default= False when NumMax reached restart at NumStart instead of exiting
videoNumStart = 1000 # default= 1000 Start of video filename number sequence
videoNumMax = 0 # default= 20 Max number of videos desired. 0=Continuous
# Manage Disk Space Settings
#---------------------------
spaceTimerHrs = 1 # default= 0 0=off or specify hours frequency to perform free disk space check
spaceFreeMB = 500 # default= 500 Target Free space in MB Required.
spaceMediaDir = videoPath # default= videoPath per variable above
spaceFileExt = 'mp4' # default= '' File extension to Delete Oldest Files
# Do Not Change these Settings
# ----------------------------
videoRepeatOn = True # Turn on Video Repeat Mode IMPORTANT Overrides timelapse and motion
|
python
|
load_dotenv('.env.txt')
|
python
|
import discord
from discord.ext import commands
from cogs.utils import checks
from .utils.dataIO import dataIO, fileIO
from __main__ import send_cmd_help
import os
import io
import requests
import json
import asyncio
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
creditIcon = "https://i.imgur.com/TP8GXZb.png"
credits = "Bot by GR8 | Titan"
class warlog:
"""Clash Royale Clan War log"""
def __init__(self, bot):
self.bot = bot
self.auth = dataIO.load_json('cogs/auth.json')
self.clans = dataIO.load_json('cogs/clans.json')
def getAuth(self):
return {"auth" : self.auth['token']}
def save_clans(self):
dataIO.save_json('cogs/clans.json', self.clans)
def update_clans(self):
self.clans = dataIO.load_json('cogs/clans.json')
async def getLeague(self, trophies):
if trophies >= 3000:
return "legend"
elif trophies >= 1500:
return "gold"
elif trophies >= 600:
return "silver"
else:
return "bronze"
async def findRank(self, lst, key, value):
for i, dic in enumerate(lst):
if dic[key] == value:
return i
return -1
async def genImage(self, leagueName, trophies, rank, clanName, participants, wins, crowns):
font1 = ImageFont.truetype("data/warlog/ClashRoyale.ttf",27)
font2 = ImageFont.truetype("data/warlog/ClashRoyale.ttf",37)
font3 = ImageFont.truetype("data/warlog/ClashRoyale.ttf",41)
img = Image.open("data/warlog/images/warlog.jpg")
draw = ImageDraw.Draw(img)
league = Image.open("data/warlog/images/{}.png".format(leagueName))
img.paste(league, (410, 55), league) # league
draw.text((493, 75), "{:,}".format(int(trophies)), (255,255,255), font=font1) # Trophies
# thin border
x, y = 284, 192
fillcolor = "white"
shadowcolor = "black"
draw.text((x-2, y-2), rank, font=font2, fill=shadowcolor)
draw.text((x+2, y-2), rank, font=font2, fill=shadowcolor)
draw.text((x-2, y+2), rank, font=font2, fill=shadowcolor)
draw.text((x+2, y+2), rank, font=font2, fill=shadowcolor)
draw.text((x, y), rank, font=font2, fill=fillcolor) # Rank
draw.text((347, 194), clanName, (255,255,255), font=font3) # Clan Name
draw.text((682, 340), participants, (255,255,255), font=font1) # Participants
draw.text((682, 457), wins, (255,255,255), font=font1) # Wins
draw.text((682, 575), crowns, (255,255,255), font=font1) # Crowns
# scale down and return
scale = 0.5
scaled_size = tuple([x * scale for x in img.size])
img.thumbnail(scaled_size)
return img
async def getWarData(self, channel):
self.update_clans()
for clankey in self.clans.keys():
try:
clandata = requests.get('https://api.royaleapi.com/clan/{}/warlog'.format(self.clans[clankey]['tag']), headers=self.getAuth(), timeout=10).json()
except (requests.exceptions.Timeout, json.decoder.JSONDecodeError):
return
except requests.exceptions.RequestException as e:
print(e)
return
standings = clandata[0]['standings']
clanRank = await self.findRank(standings, "tag", self.clans[clankey]['tag'])
warTrophies = standings[clanRank]['warTrophies']
if self.clans[clankey]['warTrophies'] != warTrophies:
clanLeague = await self.getLeague(warTrophies)
image = await self.genImage(clanLeague, str(warTrophies), str(clanRank+1), standings[clanRank]['name'], str(standings[clanRank]['participants']), str(standings[clanRank]['wins']), str(standings[clanRank]['crowns']))
filename = "warlog-{}.png".format(clankey)
with io.BytesIO() as f:
image.save(f, "PNG")
f.seek(0)
await self.bot.send_file(channel, f, filename=filename)
self.clans[clankey]['warTrophies'] = warTrophies
self.save_clans()
await asyncio.sleep(1)
@commands.command(pass_context=True)
async def warlog(self, ctx):
"""Track Clan wars"""
channel = ctx.message.channel
await self.getWarData(channel)
def check_clans():
c = dataIO.load_json('cogs/clans.json')
for clankey in c.keys():
if 'members' not in c[clankey]:
c[clankey]['members'] = []
dataIO.save_json('cogs/clans.json', c)
def check_files():
f = "cogs/auth.json"
if not fileIO(f, "check"):
print("enter your RoyaleAPI token in auth.json...")
fileIO(f, "save", {"token" : "enter your RoyaleAPI token here!"})
def setup(bot):
check_files()
check_clans()
bot.add_cog(warlog(bot))
|
python
|
"""
Module to implement a plugin that looks for hard tabs in the files.
"""
from pymarkdown.plugin_manager import Plugin, PluginDetails
class RuleMd040(Plugin):
"""
Class to implement a plugin that looks for hard tabs in the files.
"""
def get_details(self):
"""
Get the details for the plugin.
"""
return PluginDetails(
# code, language
plugin_name="fenced-code-language",
plugin_id="MD040",
plugin_enabled_by_default=True,
plugin_description="Fenced code blocks should have a language specified",
plugin_version="0.5.0",
plugin_interface_version=1,
) # https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md040---fenced-code-blocks-should-have-a-language-specified
def next_token(self, context, token):
"""
Event that a new token is being processed.
"""
if token.is_fenced_code_block:
if not token.extracted_text.strip():
self.report_next_token_error(context, token)
|
python
|
from django.dispatch import Signal
# providing_args: "orderId", "recipientAmount"
result_received = Signal()
|
python
|
"""
Generic module for managing manual data transfer jobs using Galaxy's built-in file browser.
This module can be used by various external services that are configured to transfer data manually.
"""
import logging, urllib2, re, shutil
from data_transfer import *
log = logging.getLogger( __name__ )
__all__ = [ 'ManualDataTransferPlugin' ]
class ManualDataTransferPlugin( DataTransfer ):
def __init__( self, app ):
super( ManualDataTransferPlugin, self ).__init__( app )
def create_job( self, trans, **kwd ):
if 'sample' in kwd and 'sample_datasets' in kwd and 'external_service' in kwd and 'external_service_type' in kwd:
sample = kwd[ 'sample' ]
sample_datasets = kwd[ 'sample_datasets' ]
external_service = kwd[ 'external_service' ]
external_service_type = kwd[ 'external_service_type' ]
# TODO: is there a better way to store the protocol?
protocol = external_service_type.data_transfer.keys()[0]
host = external_service.form_values.content[ 'host' ]
user_name = external_service.form_values.content[ 'user_name' ]
password = external_service.form_values.content[ 'password' ]
# TODO: In the future, we may want to implement a way for the user to associate a selected file with one of
# the run outputs configured in the <run_details><results> section of the external service config file. The
# following was a first pass at implementing something (the datatype was included in the sample_dataset_dict),
# but without a way for the user to associate stuff it's useless. However, allowing the user this ability may
# open a can of worms, so maybe we shouldn't do it???
#
#for run_result_file_name, run_result_file_datatype in external_service_type.run_details[ 'results' ].items():
# # external_service_type.run_details[ 'results' ] looks something like: {'dataset1_name': 'dataset1_datatype'}
# if run_result_file_datatype in external_service.form_values.content:
# datatype = external_service.form_values.content[ run_result_file_datatype ]
#
# When the transfer is automatic (the process used in the SMRT Portal plugin), the datasets and datatypes
# can be matched up to those configured in the <run_details><results> settings in the external service type config
# (e.g., pacific_biosciences_smrt_portal.xml). However, that's a bit trickier here since the user is manually
# selecting files for transfer.
sample_datasets_dict = {}
for sample_dataset in sample_datasets:
sample_dataset_id = sample_dataset.id
sample_dataset_dict = dict( sample_id = sample_dataset.sample.id,
name = sample_dataset.name,
file_path = sample_dataset.file_path,
status = sample_dataset.status,
error_msg = sample_dataset.error_msg,
size = sample_dataset.size,
external_service_id = sample_dataset.external_service.id )
sample_datasets_dict[ sample_dataset_id ] = sample_dataset_dict
params = { 'type' : 'init_transfer',
'sample_id' : sample.id,
'sample_datasets_dict' : sample_datasets_dict,
'protocol' : protocol,
'host' : host,
'user_name' : user_name,
'password' : password }
elif 'transfer_job_id' in kwd:
params = { 'type' : 'finish_transfer',
'protocol' : kwd[ 'result' ][ 'protocol' ],
'sample_id' : kwd[ 'sample_id' ],
'result' : kwd[ 'result' ],
'transfer_job_id' : kwd[ 'transfer_job_id' ] }
else:
log.error( 'No job was created because kwd does not include "samples" and "sample_datasets" or "transfer_job_id".' )
return
deferred_job = self.app.model.DeferredJob( state=self.app.model.DeferredJob.states.NEW,
plugin='ManualDataTransferPlugin',
params=params )
self.sa_session.add( deferred_job )
self.sa_session.flush()
log.debug( 'Created a deferred job in the ManualDataTransferPlugin of type: %s' % params[ 'type' ] )
# TODO: error reporting to caller (if possible?)
def check_job( self, job ):
if self._missing_params( job.params, [ 'type' ] ):
return self.job_states.INVALID
if job.params[ 'type' ] == 'init_transfer':
if job.params[ 'protocol' ] in [ 'http', 'https' ]:
raise Exception( "Manual data transfer is not yet supported for http(s)." )
elif job.params[ 'protocol' ] == 'scp':
if self._missing_params( job.params, [ 'protocol', 'host', 'user_name', 'password', 'sample_id', 'sample_datasets_dict' ] ):
return self.job_states.INVALID
# TODO: what kind of checks do we need here?
return self.job_states.READY
return self.job_states.WAIT
if job.params[ 'type' ] == 'finish_transfer':
if self._missing_params( job.params, [ 'transfer_job_id' ] ):
return self.job_states.INVALID
# Get the TransferJob object and add it to the DeferredJob so we only look it up once.
if not hasattr( job, 'transfer_job' ):
job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) )
state = self.app.transfer_manager.get_state( job.transfer_job )
if not state:
log.error( 'No state for transfer job id: %s' % job.transfer_job.id )
return self.job_states.WAIT
if state[ 'state' ] in self.app.model.TransferJob.terminal_states:
return self.job_states.READY
log.debug( "Checked on finish transfer job %s, not done yet." % job.id )
return self.job_states.WAIT
else:
log.error( 'Unknown job type for ManualDataTransferPlugin: %s' % str( job.params[ 'type' ] ) )
return self.job_states.INVALID
|
python
|
# coding: utf-8
# In[36]:
# In[39]:
import numpy as np
import powerlaw
edges= np.array([[1,2],[0,2],[0,3],[2,3],[3,4],[4,1]])
class karpatiGraphSolution:
def __init__(self,edges):
assert type(edges)==np.ndarray, "input is not an edge list"
self.edgeList=edges
self.numNodes=np.amax(edges)+1
def give_me_matrix(self):
res=[[0] * self.numNodes for i in range(self.numNodes)]
for edge in self.edgeList:
res[edge[0]][edge[1]]=1
self.adjMat=res
return res
def isConnected(self):
rowSums=np.asarray(self.adjMat).sum(0)
colSums=np.asarray(self.adjMat).sum(1)
print(rowSums)
print(colSums)
total=rowSums+colSums
res=0 not in total
return res
def isStronglyConnected(self):
rowSums=np.asarray(self.adjMat).sum(0)
colSums=np.asarray(self.adjMat).sum(1)
print(rowSums)
print(colSums)
res=0 not in rowSums & 0 not in colSums
return res
def MST(self):
assert self.isConnected, "Sorry, your graph is not connected"
treeMST=set()
nodeInMST=set()
nodeInMST.add(self.edgeList[0][0])
print(nodeInMST)
for edge in self.edgeList:
if (edge[1] in nodeInMST and edge[0] not in nodeInMST):
print("LOL")
treeMST.add((edge[0],edge[1]))
nodeInMST.add(edge[0])
print(nodeInMST)
elif (edge[0] in nodeInMST and edge[1] not in nodeInMST):
print("LOL2")
nodeInMST.add(edge[1])
treeMST.add((edge[1],edge[0]))
print(nodeInMST)
#nodeInMST.add(edge[1])
if len(nodeInMST)==self.numNodes:
print("BREAKING")
break
return(treeMST)
def fitPowerLaw(self):
#get degree distribution
rowSums=np.asarray(self.adjMat).sum(0)
colSums=np.asarray(self.adjMat).sum(1)
total=rowSums+colSums
results=powerlaw.Fit(total)
print("LOL")
return(results.power_law.alpha,results.power_law.xmin)
sol=karpatiGraphSolution(edges)
cucc=sol.give_me_matrix()
cucc3=sol.MST()
print(cucc3)
cucc4=sol.fitPowerLaw()
print(cucc4)
# In[144]:
var = 100
if var == 200:
print "1 - Got a true expression value"
print var
elif var == 150:
print "2 - Got a true expression value"
print var
elif var == 100:
print "3 - Got a true expression value"
print var
else:
print "4 - Got a false expression value"
print var
print "Good bye!"
# In[ ]:
# In[ ]:
|
python
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class GuildManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('GuildManagerAI')
|
python
|
from xviz.builder.base_builder import XVIZBaseBuilder, CATEGORY
from xviz.v2.core_pb2 import Pose, MapOrigin
class XVIZPoseBuilder(XVIZBaseBuilder):
"""
# Reference
[@xviz/builder/xviz-pose-builder]/(https://github.com/uber/xviz/blob/master/modules/builder/src/builders/xviz-pose-builder.js)
"""
def __init__(self, metadata, logger=None):
super().__init__(CATEGORY.POSE, metadata, logger)
self._poses = None
self.reset()
def reset(self):
super().reset()
self._category = CATEGORY.POSE
self._temp_pose = Pose()
def map_origin(self, longitude, latitude, altitude):
self._temp_pose.map_origin.longitude = longitude
self._temp_pose.map_origin.latitude = latitude
self._temp_pose.map_origin.altitude = altitude
return self
def position(self, x, y, z):
self._temp_pose.position.extend([x, y, z])
return self
def orientation(self, roll, pitch, yaw):
self._temp_pose.orientation.extend([roll, pitch, yaw])
return self
def timestamp(self, timestamp):
self._temp_pose.timestamp = timestamp
return self
def _flush(self):
if not self._poses:
self._poses = {}
self._poses[self._stream_id] = self._temp_pose
self._temp_pose = Pose()
def get_data(self):
if self._stream_id:
self._flush()
return self._poses
|
python
|
# SPDX-FileCopyrightText: 2022 Cedar Grove Maker Studios
# SPDX-License-Identifier: MIT
"""
touch_calibrator_built_in.py 2022-01-21 v2.1
Author(s): JG for Cedar Grove Maker Studios
On-screen touchscreen calibrator for built-in displays.
When the test screen appears, use a stylus to swipe to the four edges
of the visible display area. As the screen is calibrated, the small red
square tracks the stylus tip (REPL_ONLY=False). Minimum and maximum
calibration values will display on the screen and in the REPL. The calibration
tuple can be copied and pasted into the calling code's touchscreen
instantiation statement.
DISPLAY_ROTATION: Display rotation value in degrees. Only values of
None, 0, 90, 180, and 270 degrees are accepted. Defaults to None, the
previous orientation of the display.
REPL_ONLY: If False, calibration values are shown graphically on the screen
and printed to the REPL. If True, the values are only printed to the REPL.
Default value is False.
"""
import board
import time
import displayio
import vectorio
import terminalio
from adafruit_display_text.label import Label
import adafruit_touchscreen
from simpleio import map_range
# Operational parameters:
DISPLAY_ROTATION = 0 # Specify 0, 90, 180, or 270 degrees
REPL_ONLY = False # True to disable graphics
# A collection of colors used for graphic objects
class Colors:
BLUE_DK = 0x000060 # Screen fill
RED = 0xFF0000 # Boundary
WHITE = 0xFFFFFF # Text
# Instantiate the built-in display.
display = board.DISPLAY
# Check rotation value and update display.
# Always set rotation before instantiating the touchscreen.
if DISPLAY_ROTATION != None and DISPLAY_ROTATION in (0, 90, 180, 270):
display.rotation = DISPLAY_ROTATION
else:
print("Warning: invalid rotation value -- defalting to zero")
display.rotation = 0
time.sleep(1)
# Activate the display graphics unless REPL_ONLY=True.
if not REPL_ONLY:
display_group = displayio.Group()
display.show(display_group)
# Instantiate touch screen without calibration or display size parameters
if display.rotation == 0:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XL,
board.TOUCH_XR,
board.TOUCH_YD,
board.TOUCH_YU,
# calibration=((5200, 59000), (5250, 59500)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
elif display.rotation == 90:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_YU,
board.TOUCH_YD,
board.TOUCH_XL,
board.TOUCH_XR,
# calibration=((5250, 59500), (5200, 59000)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
elif display.rotation == 180:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XR,
board.TOUCH_XL,
board.TOUCH_YU,
board.TOUCH_YD,
# calibration=((5200, 59000), (5250, 59500)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
elif display.rotation == 270:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_YD,
board.TOUCH_YU,
board.TOUCH_XR,
board.TOUCH_XL,
# calibration=((5250, 59500), (5200, 59000)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
else:
raise ValueError("Rotation value must be 0, 90, 180, or 270")
# Define the graphic objects if REPL_ONLY = False.
if not REPL_ONLY:
# Define the text graphic objects
font_0 = terminalio.FONT
coordinates = Label(
font=font_0,
text="calib: ((x_min, x_max), (y_min, y_max))",
color=Colors.WHITE,
)
coordinates.anchor_point = (0.5, 0.5)
coordinates.anchored_position = (board.DISPLAY.width // 2, board.DISPLAY.height // 4)
display_rotation = Label(
font=font_0,
text="rotation: " + str(display.rotation),
color=Colors.WHITE,
)
display_rotation.anchor_point = (0.5, 0.5)
display_rotation.anchored_position = (board.DISPLAY.width // 2, board.DISPLAY.height // 4 - 30)
# Define graphic objects for the screen fill, boundary, and touch pen.
target_palette = displayio.Palette(1)
target_palette[0] = Colors.BLUE_DK
screen_fill = vectorio.Rectangle(
pixel_shader=target_palette,
x=2,
y=2,
width=board.DISPLAY.width - 4,
height=board.DISPLAY.height - 4,
)
target_palette = displayio.Palette(1)
target_palette[0] = Colors.RED
boundary = vectorio.Rectangle(
pixel_shader=target_palette,
x=0,
y=0,
width=board.DISPLAY.width,
height=board.DISPLAY.height,
)
pen = vectorio.Rectangle(
pixel_shader=target_palette,
x=board.DISPLAY.width // 2,
y=board.DISPLAY.height // 2,
width=10,
height=10,
)
display_group.append(boundary)
display_group.append(screen_fill)
display_group.append(pen)
display_group.append(coordinates)
display_group.append(display_rotation)
# Reset x and y values to raw touchscreen mid-point before measurement.
x_min = x_max = y_min = y_max = 65535 // 2
print("Touchscreen Calibrator")
print(" Use a stylus to swipe slightly beyond the")
print(" four edges of the visible display area.")
print(" ")
print(f" display rotation: {display.rotation} degrees")
print(" Calibration values follow:")
print(" ")
while True:
time.sleep(0.100)
touch = ts.touch_point # Check for touch
if touch:
if not REPL_ONLY:
pen.x = int(map_range(touch[0], x_min, x_max, 0, board.DISPLAY.width)) - 5
pen.y = int(map_range(touch[1], y_min, y_max, 0, board.DISPLAY.height)) - 5
# Remember minimum and maximum values for the calibration tuple.
x_min = min(x_min, touch[0])
x_max = max(x_max, touch[0])
y_min = min(y_min, touch[1])
y_max = max(y_max, touch[1])
# Show the calibration tuple.
print(f"(({x_min}, {x_max}), ({y_min}, {y_max}))")
if not REPL_ONLY:
coordinates.text = f"calib: (({x_min}, {x_max}), ({y_min}, {y_max}))"
|
python
|
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, ListDyField, \
DateTimeDyField, EnumDyField, SearchField
from spaceone.inventory.libs.schema.resource import CloudServiceTypeResource, CloudServiceTypeResponse, \
CloudServiceTypeMeta
cst_elb = CloudServiceTypeResource()
cst_elb.name = 'LoadBalancer'
cst_elb.provider = 'aws'
cst_elb.group = 'ELB'
cst_elb.labels = ['Networking']
cst_elb.is_primary = True
cst_elb.is_major = True
cst_elb.service_code = 'AWSELB'
cst_elb.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/Elastic-Load-Balancing.svg',
}
cst_elb._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Name', 'data.load_balancer_name'),
TextDyField.data_source('DNS Name', 'data.dns_name'),
EnumDyField.data_source('State', 'data.state.code', default_state={
'safe': ['active'],
'warning': ['provisioning'],
'alert': ['active_impaired', 'failed']
}),
EnumDyField.data_source('Type', 'data.type', default_badge={
'indigo.500': ['network'], 'coral.600': ['application']
}),
ListDyField.data_source('Availability Zones', 'data.availability_zones', options={
'sub_key': 'zone_name',
'delimiter': '<br>'
}),
DateTimeDyField.data_source('Created At', 'data.created_time'),
TextDyField.data_source('ARN', 'data.load_balancer_arn', options={
'is_optional': True
}),
TextDyField.data_source('Scheme', 'data.scheme', options={
'is_optional': True
}),
TextDyField.data_source('VPC ID', 'data.vpc_id', options={
'is_optional': True
}),
ListDyField.data_source('Subnet ID', 'data.availability_zones', options={
'delimiter': '<br>',
'sub_key': 'subnet_id',
'is_optional': True
}),
ListDyField.data_source('Availability Zone', 'data.availability_zones', options={
'delimiter': '<br>',
'sub_key': 'zone_name',
'is_optional': True
}),
TextDyField.data_source('Hosted Zone ID', 'data.canonical_hosted_zone_id', options={
'is_optional': True
}),
ListDyField.data_source('Security Groups', 'data.security_group', options={
'delimiter': '<br>',
'is_optional': True
}),
ListDyField.data_source('Listener IDs', 'data.listeners', options={
'delimiter': '<br>',
'sub_key': 'listener_arn',
'is_optional': True
}),
ListDyField.data_source('Protocols', 'data.listeners', options={
'delimiter': '<br>',
'sub_key': 'protocol',
'is_optional': True
}),
ListDyField.data_source('Ports', 'data.listeners', options={
'delimiter': '<br>',
'sub_key': 'port',
'is_optional': True
}),
TextDyField.data_source('IP Address Type', 'data.ip_address_type', options={
'is_optional': True
}),
TextDyField.data_source('Access Log S3 Bucket', 'data.attributes.access_logs_s3_bucket', options={
'is_optional': True
}),
TextDyField.data_source('Routing HTTP2 Enabled', 'data.attributes.routing_http2_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Idel Timeout Seconds', 'data.attributes.idle_timeout_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Routing HTTP Drop Invalid Header Fields Enabled',
'data.attributes.routing_http_drop_invalid_header_fields_enabled', options={
'is_optional': True
}),
TextDyField.data_source('WAF Fail Open Enabled',
'data.attributes.waf_fail_open_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Deletion Protection Enabled',
'data.attributes.deletion_protection_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Routing HTTP Desync Mitigation Mode',
'data.attributes.routing_http_desync_mitigation_mode', options={
'is_optional': True
}),
TextDyField.data_source('Load Balancing Cross Zone Enabled',
'data.attributes.load_balancing_cross_zone_enabled', options={
'is_optional': True
}),
TextDyField.data_source('AWS Account ID', 'data.account_id', options={
'is_optional': True
})
],
search=[
SearchField.set(name='Name', key='data.load_balancer_name'),
SearchField.set(name='ARN', key='data.load_balancer_arn'),
SearchField.set(name='DNS Name', key='data.dns_name'),
SearchField.set(name='State', key='data.state'),
SearchField.set(name='Type', key='data.type',
enums={
'application': {'label': 'Application'},
'network': {'label': 'Network'},
}),
SearchField.set(name='Scheme', key='data.scheme',
enums={
'internet-facing': {'label': 'Internet Facing'},
'internal': {'label': 'Internal'},
}),
SearchField.set(name='VPC ID', key='data.vpc_id'),
SearchField.set(name='Availability Zone', key='data.availability_zones.zone_name'),
SearchField.set(name='Subnet ID', key='data.availability_zones.subnet_id'),
SearchField.set(name='Hosted Zone', key='data.canonical_hosted_zone_id'),
SearchField.set(name='Protocol', key='data.listeners.protocol',
enums={
'HTTP': {'label': 'HTTP'},
'HTTPS': {'label': 'HTTPS'},
'TCP': {'label': 'TCP'},
'UDP': {'label': 'UDP'},
'TLS': {'label': 'TLS'},
'TCP_UDP': {'label': 'TCP/UDP'},
}),
SearchField.set(name='Port', key='data.listeners.port', data_type='integer'),
SearchField.set(name='Deletion Protection', key='data.attributes.deletion_protection_enabled',
data_type='boolean'),
SearchField.set(name='Cross-Zone Load Balancing', key='data.attributes.load_balancing_cross_zone_enabled',
data_type='boolean'),
SearchField.set(name='Security Group ID', key='data.security_groups'),
SearchField.set(name='Listener ARN', key='data.listeners.listener_arn'),
SearchField.set(name='Created Time', key='data.created_time', data_type='datetime'),
SearchField.set(name='Region', key='data.region_name'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
cst_tg = CloudServiceTypeResource()
cst_tg.name = 'TargetGroup'
cst_tg.provider = 'aws'
cst_tg.group = 'ELB'
cst_tg.labels = ['Networking']
cst_tg.service_code = 'AWSELB'
cst_tg.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/Elastic-Load-Balancing.svg',
}
cst_tg._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Name', 'data.target_group_name'),
TextDyField.data_source('Port', 'data.port'),
TextDyField.data_source('Protocol', 'data.protocol'),
TextDyField.data_source('Target Type', 'data.target_type'),
ListDyField.data_source('Load Balancers', 'data.load_balancer_arns', options={
'delimiter': '<br>'
}),
EnumDyField.data_source('Health Check', 'data.health_check_enabled', default_badge={
'indigo.500': ['true'], 'coral.600': ['false']
}),
TextDyField.data_source('ARN', 'data.target_group_arn', options={
'is_optional': True
}),
TextDyField.data_source('VPC ID', 'data.vpc_id', options={
'is_optional': True
}),
TextDyField.data_source('Healthy Threshold Count', 'data.healthy_threshold_count', options={
'is_optional': True
}),
TextDyField.data_source('Unhealthy Threshold Count', 'data.unhealthy_threshold_count', options={
'is_optional': True
}),
TextDyField.data_source('Health Check Enabled', 'data.health_check_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Health Check Timeout Seconds', 'data.health_check_timeout_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Health Check Interval Seconds', 'data.health_check_interval_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Deregistration Delay Timeout Seconds', 'data.deregistration_delay_timeout_seconds',
options={'is_optional': True}),
TextDyField.data_source('Slow Start Duration Seconds', 'data.slow_start_duration_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Stickiness Enabled', 'data.stickiness_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Stickiness Type', 'data.stickiness_type', options={
'is_optional': True
}),
TextDyField.data_source('Load Balancing Algorithm Type', 'data.load_balancing_algorithm_type', options={
'is_optional': True
}),
TextDyField.data_source('Stickiness LB Cookie Duration Seconds', 'data.stickiness_lb_cookie_duration_seconds',
options={'is_optional': True}),
TextDyField.data_source('AWS Account ID', 'data.account_id', options={
'is_optional': True
})
],
search=[
SearchField.set(name='Name', key='data.target_group_name'),
SearchField.set(name='ARN', key='data.'),
SearchField.set(name='Protocol', key='data.protocol',
enums={
'HTTP': {'label': 'HTTP'},
'HTTPS': {'label': 'HTTPS'},
'TCP': {'label': 'TCP'},
'UDP': {'label': 'UDP'},
'TLS': {'label': 'TLS'},
'TCP_UDP': {'label': 'TCP/UDP'},
}),
SearchField.set(name='Port', key='data.port', data_type='integer'),
SearchField.set(name='Target Type', key='data.target_type',
enums={
'instance': {'label': 'Instance'},
'ip': {'label': 'IP'},
'lambda': {'label': 'Lambda'},
}),
SearchField.set(name='VPC ID', key='data.vpc_id'),
SearchField.set(name='Stickiness', key='data.attributes.stickiness_enabled',
enums={
'Enabled': {'label': 'Enabled'},
'Disabled': {'label': 'Disabled'}
}),
SearchField.set(name='Stickiness Type', key='data.attributes.stickiness_type',
enums={
'lb_cookie': {'label': 'LB Cookie'},
'source_ip': {'label': 'Source IP'}
}),
SearchField.set(name='Region', key='data.region_name'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
CLOUD_SERVICE_TYPES = [
CloudServiceTypeResponse({'resource': cst_elb}),
CloudServiceTypeResponse({'resource': cst_tg}),
]
|
python
|
import csv
import matplotlib.pyplot as plt
import numpy as np
CTEs = list()
error = list()
labels = list()
with open('results.csv', 'r') as file:
reader = csv.DictReader(file)
for line in reader:
labels.append(line['permutation'].replace(",","\n"))
CTEs.append(float(line['accuracy']))
error.append(float(line['std']))
x_pos = np.arange(len(labels))
print(CTEs,error)
# Build the plot
fig, ax = plt.subplots()
fig.set_size_inches(10, 5)
ax.bar(x_pos, CTEs,
yerr=error,
align='center',
alpha=0.5,
ecolor='black',
capsize=10)
ax.set_ylabel('Accuracy')
ax.set_xlabel('Feature Categories')
ax.set_xticks(x_pos)
ax.set_xticklabels(labels)
ax.set_title('Model performance by feature categories')
ax.yaxis.grid(True)
# Save the figure and show
plt.tight_layout()
plt.savefig('subset.pdf')
plt.show()
|
python
|
import numpy as np
import uncertainties.unumpy as unp
def center():
return None # or the arg-number of the center.
def getCenter(args):
# return the average
return (args[1] + args[4])/2
def args():
return ('Amp1', 'Center1', 'Sigma1', 'Amp2', 'Center2', 'Sigma2', 'Offset')
def f(x, A1, x01, sig1, A2, x02, sig2, offset):
"""
The normal function call for this function. Performs checks on valid arguments, then calls the "raw" function.
:return:
"""
penalty = 10**10 * np.ones(len(x))
if A1 > 0 or A2 > 0:
# Penalize positive amplitude fits.
return penalty
if offset > 1:
return penalty
if not (min(x) < x01 < max(x) and min(x) < x02 < max(x)):
# penalize if center is not on the graph
return penalty
# assume that there's at least a little peak
#if A1 < 1 or A2 < 1:
# return penalty
# The fitting of the second gaussian then sometimes assumes it's even broader than it is to make it an effective offset.
#r = max(x) - min(x)
#if sig1 > r/5 or sig2 > r/5:
# return penalty
return f_raw(x, A1, x01, sig1, A2, x02, sig2, offset)
def f_raw(x, A1, x01, sig1, A2, x02, sig2, offset):
"""
The raw function call, performs no checks on valid parameters..
:return:
"""
return offset + A1 * np.exp(-(x-x01)**2/(2*sig1**2)) + A2 * np.exp(-(x-x02)**2/(2*sig2**2))
def f_unc(x, A1, x01, sig1, A2, x02, sig2, offset):
"""
similar to the raw function call, but uses unp instead of np for uncertainties calculations.
:return:
"""
return offset + A1 * unp.exp(-(x-x01)**2/(2*sig1**2)) + A2 * unp.exp(-(x-x02)**2/(2*sig2**2))
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this
class.
:param key:
:param values:
:return:
"""
a = 0.6*min(values) - max(values)
dx = max(key)-min(key)
minLoc = key[np.argmin(values)]
return [a, minLoc,
dx/20,
0.8*a,
minLoc+dx/9,
#2*(min(key) + 0-5 * dx - minLoc) + minLoc # other side of middle
dx/32, max(values)]
def areas(A1, x01, sig1, A2, x02, sig2):
return np.array([A1*sig1,A2*sig2])*np.sqrt(2*np.pi)
|
python
|
# check file encoding format
# import chardet
# f = open('test-1000.txt', 'rb')
# result = chardet.detect(f.read())
# print(result)
import codecs
f = codecs.open("dev-1000.txt", 'r', 'GB18030')
ff = f.read()
file_object = codecs.open('dev-1000-new.txt', 'w', 'utf-8')
file_object.write(ff)
# with open("test-1000.txt", 'r') as file:
# for line in file.readlines():
# print(line)
# count = 3001
# with codecs.open("train-3000.txt", "a", "utf-8") as train:
# with codecs.open("test-1000.txt", "r", "utf-8") as test:
# train.write(test.read())
# for line in test.readlines():
# new_line = ""
# new_line += str(count) + '\t'
# for s in range(1, len(line.strip('\t'))):
# if s == len(line.strip("\t")) - 1:
# new_line += line.strip("\t")[s] + '\n'
# else:
# new_line += line.strip("\t")[s] + "\t"
# train.write(new_line)
# count += 1
|
python
|
from newGui import Ui_MainWindow
import sys
from pyqtgraph import PlotWidget ,PlotItem
import os
import pathlib
import pyqtgraph as pg
import pandas as pd
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets ,QtPrintSupport
#--------- to save as pdf ------------#
def print_widget(widget, filename):
printer =QtPrintSupport.QPrinter(QtPrintSupport.QPrinter.HighResolution)
printer.setOutputFormat(QtGui.QtPrintSupport.QPrinter.PdfFormat)
printer.setOutputFileName(filename)
painter = QtGui.QPainter(printer)
# start scale
xscale = printer.pageRect().width() * 1.0 / widget.width()
yscale = printer.pageRect().height() * 1.0 / widget.height()
scale = min(xscale, yscale)
painter.translate(printer.paperRect().center())
painter.scale(scale, scale)
painter.translate(-widget.width() / 2, -widget.height() / 2)
# end scale
widget.render(painter)
painter.end()
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.actionsiganl_1.triggered.connect(lambda:self.opensignal1())
def readsignal1(self):
self.fname1=QtGui.QFileDialog.getOpenFileName(self,'open only txt file',os.getenv('home'),"text(*.txt)")
path=self.fname1[0]
self.data1=np.genfromtxt(path)
def opensignal1(self):
self.readsignal1()
self.data_line1 = self.ui.signal_1.plot(self.data1, name="mode2")
self.ptr1 = 0
self.n = 0
# Set timer
self.timer = pg.QtCore.QTimer()
# Timer signal binding update_data function
self.timer.timeout.connect(self.update_data)
# The timer interval is 50ms, which can be understood as refreshing data once in 50ms
self.timer.start(50)
self.signal_1.show()
#Data shift left
def update_data(self):
self.n += 10
self.data_line1.setData(self.data1[0 : 100+self.n])
self.data_line1.setPos(self.ptr1,0)
#----- save as pdf ---#
# def savepdf(self):
# fn, _ = QtWidgets.QFileDialog.getSaveFileName(
# self, "Export PDF", None, "PDF files (.pdf);;All Files()")
# if fn:
# if QtCore.QFileInfo(fn).suffix() == "": fn += ".pdf"
# print_widget(MainWindow , fn)
def main():
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
app.exec_()
if __name__ == "__main__":
main()
|
python
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def export_to_tensorflow(self, path):
"""
Export frame to TensorFlow Records file on given path
TensorFlow records are the standard data format for TensorFlow. The recommended format for TensorFlow is a TFRecords file
containing tf.train.Example protocol buffers. The tf.train.Example protocol buffers encodes (which contain Features as a field).
https://www.tensorflow.org/how_tos/reading_data
During export, the API parses Spark SQL DataTypes to TensorFlow compatible DataTypes as below:
* IntegerType or LongType => Int64List
* FloatType or DoubleType => FloatList
* ArrayType(Double) [Vector] => FloatList
* Any other DataType (Ex: String) => BytesList
Parameters
----------
:param path: (str) HDFS/Local path to export current frame as TensorFlow records
Examples
--------
>>> file_path = "../datasets/cities.csv"
>>> frame = tc.frame.import_csv(file_path, "|", header=True)
-etc-
>>> frame.sort("rank")
>>> frame.inspect()
[#] rank city population_2013 population_2010 change county
============================================================================
[0] 1 Portland 609456 583776 4.40% Multnomah
[1] 2 Salem 160614 154637 3.87% Marion
[2] 3 Eugene 159190 156185 1.92% Lane
[3] 4 Gresham 109397 105594 3.60% Multnomah
[4] 5 Hillsboro 97368 91611 6.28% Washington
[5] 6 Beaverton 93542 89803 4.16% Washington
[6] 7 Bend 81236 76639 6.00% Deschutes
[7] 8 Medford 77677 74907 3.70% Jackson
[8] 9 Springfield 60177 59403 1.30% Lane
[9] 10 Corvallis 55298 54462 1.54% Benton
>>> destPath = "../tests/sandbox/output24.tfr"
>>> import os
... if os.path.exists(filename) os.remove(destPath)
>>> frame.export_to_tensorflow(destPath)
Check for output24.tfr in specified destination path either on Local or HDFS file system
"""
self._scala.exportToTensorflow(path)
|
python
|
# Generated by Django 2.1 on 2019-06-19 12:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0004_merge_20190618_0923'),
('articles', '0004_merge_20190618_1311'),
]
operations = [
]
|
python
|
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path, include
from django.conf.urls.static import static
urlpatterns = [
path('', include('frontend.urls')),
path('api/', include('backend.urls')),
path('api/auth/', include('accounts.urls')),
path('admin/', admin.site.urls),
]
# Serve static files in development
if settings.DEBUG:
urlpatterns += static('/', document_root=settings.STATIC_ROOT)
|
python
|
import logging
import matplotlib.pyplot as plt
import mrcfile
import numpy as np
from scipy.linalg import lstsq
import aspire.volume
from aspire.nufft import anufft
from aspire.numeric import fft, xp
from aspire.utils import crop_pad_2d, grid_2d
from aspire.utils.matrix import anorm
logger = logging.getLogger(__name__)
def _im_translate2(im, shifts):
"""
Translate image by shifts
:param im: An Image instance to be translated.
:param shifts: An array of size n-by-2 specifying the shifts in pixels.
Alternatively, it can be a row vector of length 2, in which case the same shifts is applied to each image.
:return: An Image instance translated by the shifts.
TODO: This implementation has been moved here from aspire.aspire.abinitio and is faster than _im_translate.
"""
if not isinstance(im, Image):
logger.warning(
"_im_translate2 expects an Image, attempting to convert array."
"Expects array of size n-by-L-by-L."
)
im = Image(im)
if shifts.ndim == 1:
shifts = shifts[np.newaxis, :]
n_shifts = shifts.shape[0]
if shifts.shape[1] != 2:
raise ValueError("Input `shifts` must be of size n-by-2")
if n_shifts != 1 and n_shifts != im.n_images:
raise ValueError("The number of shifts must be 1 or match the number of images")
resolution = im.res
grid = xp.asnumpy(
fft.ifftshift(xp.asarray(np.ceil(np.arange(-resolution / 2, resolution / 2))))
)
om_y, om_x = np.meshgrid(grid, grid)
phase_shifts = np.einsum("ij, k -> ijk", om_x, shifts[:, 0]) + np.einsum(
"ij, k -> ijk", om_y, shifts[:, 1]
)
# TODO: figure out how why the result of einsum requires reshape
phase_shifts = phase_shifts.reshape(n_shifts, resolution, resolution)
phase_shifts /= resolution
mult_f = np.exp(-2 * np.pi * 1j * phase_shifts)
im_f = xp.asnumpy(fft.fft2(xp.asarray(im.asnumpy())))
im_translated_f = im_f * mult_f
im_translated = np.real(xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f))))
return Image(im_translated)
def normalize_bg(imgs, bg_radius=1.0, do_ramp=True):
"""
Normalize backgrounds and apply to a stack of images
:param imgs: A stack of images in N-by-L-by-L array
:param bg_radius: Radius cutoff to be considered as background (in image size)
:param do_ramp: When it is `True`, fit a ramping background to the data
and subtract. Namely perform normalization based on values from each image.
Otherwise, a constant background level from all images is used.
:return: The modified images
"""
L = imgs.shape[-1]
grid = grid_2d(L, indexing="yx")
mask = grid["r"] > bg_radius
if do_ramp:
# Create matrices and reshape the background mask
# for fitting a ramping background
ramp_mask = np.vstack(
(
grid["x"][mask].flatten(),
grid["y"][mask].flatten(),
np.ones(grid["y"][mask].flatten().size),
)
).T
ramp_all = np.vstack(
(grid["x"].flatten(), grid["y"].flatten(), np.ones(L * L))
).T
mask_reshape = mask.reshape((L * L))
imgs = imgs.reshape((-1, L * L))
# Fit a ramping background and apply to images
coeff = lstsq(ramp_mask, imgs[:, mask_reshape].T)[0] # RCOPT
imgs = imgs - (ramp_all @ coeff).T # RCOPT
imgs = imgs.reshape((-1, L, L))
# Apply mask images and calculate mean and std values of background
imgs_masked = imgs * mask
denominator = np.sum(mask)
first_moment = np.sum(imgs_masked, axis=(1, 2)) / denominator
second_moment = np.sum(imgs_masked**2, axis=(1, 2)) / denominator
mean = first_moment.reshape(-1, 1, 1)
variance = second_moment.reshape(-1, 1, 1) - mean**2
std = np.sqrt(variance)
return (imgs - mean) / std
class Image:
def __init__(self, data, dtype=None):
"""
A stack of one or more images.
This is a wrapper of numpy.ndarray which provides methods
for common processing tasks.
:param data: Numpy array containing image data with shape `(n_images, res, res)`.
:param dtype: Optionally cast `data` to this dtype. Defaults to `data.dtype`.
:return: Image instance storing `data`.
"""
assert isinstance(
data, np.ndarray
), "Image should be instantiated with an ndarray"
if data.ndim == 2:
data = data[np.newaxis, :, :]
if dtype is None:
self.dtype = data.dtype
else:
self.dtype = np.dtype(dtype)
self.data = data.astype(self.dtype, copy=False)
self.ndim = self.data.ndim
self.shape = self.data.shape
self.n_images = self.shape[0]
self.res = self.shape[1]
assert data.shape[1] == data.shape[2], "Only square ndarrays are supported."
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __add__(self, other):
if isinstance(other, Image):
other = other.data
return Image(self.data + other)
def __sub__(self, other):
if isinstance(other, Image):
other = other.data
return Image(self.data - other)
def __mul__(self, other):
if isinstance(other, Image):
other = other.data
return Image(self.data * other)
def __neg__(self):
return Image(-self.data)
def sqrt(self):
return Image(np.sqrt(self.data))
def flip_axes(self):
return Image(np.transpose(self.data, (0, 2, 1)))
def __repr__(self):
return f"{self.n_images} images of size {self.res}x{self.res}"
def asnumpy(self):
return self.data
def copy(self):
return Image(self.data.copy())
def shift(self, shifts):
"""
Translate image by shifts. This method returns a new Image.
:param shifts: An array of size n-by-2 specifying the shifts in pixels.
Alternatively, it can be a column vector of length 2, in which case
the same shifts is applied to each image.
:return: The Image translated by the shifts, with periodic boundaries.
"""
if shifts.ndim == 1:
shifts = shifts[np.newaxis, :]
return self._im_translate(shifts)
def downsample(self, ds_res):
"""
Downsample Image to a specific resolution. This method returns a new Image.
:param ds_res: int - new resolution, should be <= the current resolution
of this Image
:return: The downsampled Image object.
"""
# compute FT with centered 0-frequency
fx = fft.centered_fft2(self.data)
# crop 2D Fourier transform for each image
crop_fx = np.array([crop_pad_2d(fx[i], ds_res) for i in range(self.n_images)])
# take back to real space, discard complex part, and scale
out = np.real(fft.centered_ifft2(crop_fx)) * (ds_res**2 / self.res**2)
return Image(out)
def filter(self, filter):
"""
Apply a `Filter` object to the Image and returns a new Image.
:param filter: An object of type `Filter`.
:return: A new filtered `Image` object.
"""
filter_values = filter.evaluate_grid(self.res)
im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data)))
if im_f.ndim > filter_values.ndim:
im_f *= filter_values
else:
im_f = filter_values * im_f
im = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f)))
im = np.real(im)
return Image(im)
def rotate(self):
raise NotImplementedError
def save(self, mrcs_filepath, overwrite=False):
with mrcfile.new(mrcs_filepath, overwrite=overwrite) as mrc:
# original input format (the image index first)
mrc.set_data(self.data.astype(np.float32))
def _im_translate(self, shifts):
"""
Translate image by shifts
:param im: An array of size n-by-L-by-L containing images to be translated.
:param shifts: An array of size n-by-2 specifying the shifts in pixels.
Alternatively, it can be a row vector of length 2, in which case the same shifts is applied to each image.
:return: The images translated by the shifts, with periodic boundaries.
TODO: This implementation is slower than _im_translate2
"""
im = self.data
if shifts.ndim == 1:
shifts = shifts[np.newaxis, :]
n_shifts = shifts.shape[0]
assert shifts.shape[-1] == 2, "shifts must be nx2"
assert (
n_shifts == 1 or n_shifts == self.n_images
), "number of shifts must be 1 or match the number of images"
# Cast shifts to this instance's internal dtype
shifts = shifts.astype(self.dtype)
L = self.res
im_f = xp.asnumpy(fft.fft2(xp.asarray(im)))
grid_shifted = fft.ifftshift(
xp.asarray(np.ceil(np.arange(-L / 2, L / 2, dtype=self.dtype)))
)
grid_1d = xp.asnumpy(grid_shifted) * 2 * np.pi / L
om_x, om_y = np.meshgrid(grid_1d, grid_1d, indexing="ij")
phase_shifts_x = -shifts[:, 0].reshape((n_shifts, 1, 1))
phase_shifts_y = -shifts[:, 1].reshape((n_shifts, 1, 1))
phase_shifts = (
om_x[np.newaxis, :, :] * phase_shifts_x
+ om_y[np.newaxis, :, :] * phase_shifts_y
)
mult_f = np.exp(-1j * phase_shifts)
im_translated_f = im_f * mult_f
im_translated = xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f)))
im_translated = np.real(im_translated)
return Image(im_translated)
def norm(self):
return anorm(self.data)
@property
def size(self):
# probably not needed, transition
return np.size(self.data)
def backproject(self, rot_matrices):
"""
Backproject images along rotation
:param im: An Image (stack) to backproject.
:param rot_matrices: An n-by-3-by-3 array of rotation matrices \
corresponding to viewing directions.
:return: Volume instance corresonding to the backprojected images.
"""
L = self.res
assert (
self.n_images == rot_matrices.shape[0]
), "Number of rotation matrices must match the number of images"
# TODO: rotated_grids might as well give us correctly shaped array in the first place
pts_rot = aspire.volume.rotated_grids(L, rot_matrices)
pts_rot = pts_rot.reshape((3, -1))
im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data))) / (L**2)
if L % 2 == 0:
im_f[:, 0, :] = 0
im_f[:, :, 0] = 0
im_f = im_f.flatten()
vol = anufft(im_f, pts_rot[::-1], (L, L, L), real=True) / L
return aspire.volume.Volume(vol)
def show(self, columns=5, figsize=(20, 10)):
"""
Plotting Utility Function.
:param columns: Number of columns in a row of plots.
:param figsize: Figure size in inches, consult `matplotlib.figure`.
"""
# We never need more columns than images.
columns = min(columns, self.n_images)
plt.figure(figsize=figsize)
for i, im in enumerate(self):
plt.subplot(self.n_images // columns + 1, columns, i + 1)
plt.imshow(im, cmap="gray")
plt.show()
class CartesianImage(Image):
def expand(self, basis):
return BasisImage(basis)
class PolarImage(Image):
def expand(self, basis):
return BasisImage(basis)
class BispecImage(Image):
def expand(self, basis):
return BasisImage(basis)
class BasisImage(Image):
def __init__(self, basis):
self.basis = basis
def evaluate(self):
return CartesianImage()
class FBBasisImage(BasisImage):
pass
|
python
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.remoteobject.manager.system.event.component_connection_changed\
import ComponentConnectionChanged
from org.o3project.odenos.remoteobject.transport.message_dispatcher\
import MessageDispatcher
from org.o3project.odenos.core.component.logic\
import Logic
from org.o3project.odenos.remoteobject.message.event import Event
from org.o3project.odenos.remoteobject.message.response import Response
from org.o3project.odenos.core.util.network_interface import NetworkInterface
from org.o3project.odenos.core.component.network.topology.port import Port
from org.o3project.odenos.core.component.network.topology.node import Node
from org.o3project.odenos.core.component.network.topology.link import Link
from org.o3project.odenos.core.component.network.flow.flow import Flow
from org.o3project.odenos.core.component.network.packet.in_packet import InPacket
from org.o3project.odenos.core.component.network.packet.out_packet import OutPacket
from org.o3project.odenos.core.component.network.flow.basic.basic_flow_match import\
BasicFlowMatch
from org.o3project.odenos.core.component.network.packet.in_packet_added\
import InPacketAdded
from org.o3project.odenos.core.component.network.packet.out_packet_added\
import OutPacketAdded
import unittest
from contextlib import nested
from mock import Mock, MagicMock, patch
class LogicTest(unittest.TestCase):
Message = MagicMock()
value = {}
result = {}
def setUp(self):
self.target = Logic(
"cc_action",
self.Message)
def tearDown(self):
self.target = None
def test_constructor(self):
conversion_table = self.target._conversion_table
self.assertEqual(
self.target._object_property._object_property["type"],
"Logic")
self.assertEqual(
self.target._object_property._object_property["id"],
"cc_action")
self.assertEqual(
conversion_table._ConversionTable__connection_type_map, {})
self.assertEqual(
conversion_table._ConversionTable__network_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__flow_conversion_table, {})
self.assertEqual(self.target._network_interfaces, {})
self.assertEqual(self.target._Logic__subscription_table, {})
def test_conversion_table(self):
self.assertEqual(self.target.conversion_table(), self.target._conversion_table)
def test_do_event_componentconnectionchanged_add_action_not_Exist(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"}}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
self.target._network_interfaces["network1"].network_id,
"network1")
self.assertEqual(
logging_debug.call_count, 4)
def test_do_event_componentconnectionchanged_add_action_Exist(self):
with patch("logging.debug") as logging_debug:
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"}}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
self.target._network_interfaces["network1"].network_id,
"network1")
self.assertEqual(
logging_debug.call_count, 3)
def test_do_event_componentconnectionchanged_update_action(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "update",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
logging_debug.call_count, 3)
def test_do_event_componentconnectionchanged_delete_action(self):
self.target._network_interfaces = {"network1": "network1_value",
"network2": "network2_value"}
with patch("logging.debug") as logging_debug:
self.value = {"action": "delete",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"},
"curr": None}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
self.target._network_interfaces,
{"network1": "network1_value"})
self.assertEqual(
logging_debug.call_count, 3)
def test_do_event_componentconnectionchanged_other_action(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "other_action",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"},
"curr": None}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
logging_debug.call_count, 1)
def test_do_event_componentconnectionchanged_Error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.value = {"error": "other_action"}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
logging_error.call_count, 1)
def test_connection_changed_added_pre(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value = ComponentConnectionChanged.create_from_packed(
self.value)
self.result = self.target._connection_changed_added_pre(self.value)
self.assertEqual(self.result, True)
def test_connection_changed_update_pre(self):
self.value = {"action": "update",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value = ComponentConnectionChanged.create_from_packed(self.value)
self.result = self.target._connection_changed_update_pre(self.value)
self.assertEqual(self.result, True)
def test_connection_changed_delete_pre(self):
self.value = {"action": "delete",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": None}
self.value = ComponentConnectionChanged.create_from_packed(self.value)
self.result = self.target._connection_changed_delete_pre(self.value)
self.assertEqual(self.result, True)
def test_connection_changed_added(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value =\
ComponentConnectionChanged.create_from_packed(self.value)
self.result =\
self.target._connection_changed_update_pre(self.value)
self.assertEqual(
logging_debug.call_count, 1)
def test_connection_changed_update(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "update",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value =\
ComponentConnectionChanged.create_from_packed(self.value)
self.result =\
self.target._connection_changed_update_pre(self.value)
self.assertEqual(
logging_debug.call_count, 1)
def test_connection_changed_delete(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "delete",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": None}
self.value =\
ComponentConnectionChanged.create_from_packed(self.value)
self.result =\
self.target._connection_changed_update_pre(self.value)
self.assertEqual(
logging_debug.call_count, 1)
def test_add_event_subscription_network_event_type(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"NodeChanged", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None})
def test_add_event_subscription_packet_event_type(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"InPacketAdded", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"InPacketAdded::Network123": None})
def test_add_event_subscription_event_type_not_match(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"NotType", "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_add_event_subscription_event_type_None(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
None, "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_add_event_subscription_network_id_None(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"InPacketAdded", None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_add_event_subscription_event_type_network_id_None(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
None, None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_remove_event_subscription_network_event_type(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"NodeChanged::Network456": None}
self.result = self.target._remove_event_subscription(
"NodeChanged", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network456": None})
def test_remove_event_subscription_packet_event_type(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
"OutPacketAdded", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None})
def test_remove_event_subscription_event_type_not_match(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"NotType::Network123": None}
self.result = self.target._remove_event_subscription(
"NotType", "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"NotType::Network123": None})
def test_remove_event_subscription_event_type_None(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
None, "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None})
def test_remove_event_subscription_neteork_id_None(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
"NodeChanged", None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None})
def test_remove_event_subscription_event_type_neteork_id_None(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
None, None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None})
def test_update_event_subscription_network_event_type(self):
self.result = self.target._update_event_subscription(
"NodeChanged", "Network123", ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::UPDATE::Network123": ["attributes"]})
def test_update_event_subscription_event_type_not_match(self):
self.result = self.target._update_event_subscription(
"NotType", "Network123", ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_update_event_subscription_event_type_None(self):
self.result = self.target._update_event_subscription(
None, "Network123", ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_update_event_subscription_neteork_id_None(self):
self.result = self.target._update_event_subscription(
"NodeChanged", None, ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_update_event_subscription_event_type_neteork_id_None(self):
self.result = self.target._update_event_subscription(
None, None)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_do_event_nodechanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._on_node_added = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"node": "node"}}
mock_node.return_value = node
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_node_added.assert_called_once_with(
"publisher_id", node)
def test_do_event_nodechanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._on_node_update = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "update",
"prev": {"node": "node"},
"curr": {"node": "node"}}
self.result = Event("publisher_id", "NodeChanged", self.value)
mock_node.return_value = node
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_node_update.assert_called_once_with(
"publisher_id", node, node, "subscription")
def test_do_event_nodechanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._on_node_delete = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "delete",
"prev": {"node": "node"},
"curr": None}
mock_node.return_value = node
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_node_delete.assert_called_once_with(
"publisher_id", node)
def test_do_event_nodechanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "Other",
"prev": {"node": "node"},
"curr": None}
mock_node.return_value
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_nodechanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.target._on_node_delete = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
self.value = {"node_id": "NodeId",
"version": "0001",
"action": "Other",
"prev": {"node": "node"},
"curr": None}
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_portchanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_added = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"node": "node"}}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_port_added.assert_called_once_with(
"publisher_id", port)
def test_do_event_portchanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_update = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "update",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_port_update.assert_called_once_with(
"publisher_id", port, port, "subscription")
def test_do_event_portchanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_delete = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "delete",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_port_delete.assert_called_once_with(
"publisher_id", port)
def test_do_event_portchanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_delete = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "other",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_portchanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_delete = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"error_id": "PortId",
"version": "0001",
"action": "other",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_linkchanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._on_link_added = Mock()
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"node": "node"}}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_link_added.assert_called_once_with(
"publisher_id", link)
def test_do_event_linkchanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._on_link_update = Mock()
self.target._Logic__subscription_table =\
{"LinkChanged::UPDATE::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "update",
"prev": {"node": "node"},
"curr": {"node": "node"}}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_link_update.assert_called_once_with(
"publisher_id", link, link, "subscription")
def test_do_event_linkchanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._on_link_delete = Mock()
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "delete",
"prev": {"node": "node"},
"curr": None}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_link_delete.assert_called_once_with(
"publisher_id", link)
def test_do_event_linkchanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "other",
"prev": {"node": "node"},
"curr": None}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_linkchanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"error_id": "PortId",
"version": "0001",
"action": "other",
"prev": {"node": "node"},
"curr": None}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_flowchanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._on_flow_added = Mock()
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_flow_added.assert_called_once_with(
"publisher_id", flow)
def test_do_event_flowchanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._on_flow_update = Mock()
self.target._Logic__subscription_table =\
{"FlowChanged::UPDATE::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "update",
"prev": {"type": "Flow"},
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_flow_update.assert_called_once_with(
"publisher_id", flow, flow, "subscription")
def test_do_event_flowchanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._on_flow_delete = Mock()
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "delete",
"prev": {"type": "Flow"},
"curr": None}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_flow_delete.assert_called_once_with(
"publisher_id", flow)
def test_do_event_flowchanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "other",
"prev": None,
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_flowchanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"error_id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_inpacketadded_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet_added.InPacketAdded."
"create_from_packed")) as (logging_debug,
logging_error,
mock_in_packet_added):
mock_in_packet_added.return_value = "Dummy"
self.target._on_in_packet_added_pre = Mock(
return_value=True)
self.target._add_in_packet_conversion = Mock(
return_value="resp_list")
self.target._on_in_packet_added_post = Mock()
self.value = {"id": "InPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "InPacketAdded", self.value)
self.target._do_event_inpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_in_packet_added_pre.assert_called_once_with(
"publisher_id", "Dummy")
self.target._add_in_packet_conversion.assert_called_once_with(
"publisher_id", "Dummy")
self.target._on_in_packet_added_post.assert_called_once_with(
"publisher_id", "Dummy", "resp_list")
def test_do_event_inpacketadded_error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.value = {"error_id": "InPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "InPacketAdded", self.value)
self.target._do_event_inpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_on_in_packet_added_pre(self):
with patch("logging.debug") as logging_debug:
self.target._on_in_packet_added_pre("network_id", "msg")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_in_packet_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_in_packet_added_post("network_id", "msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_do_event_outpacketadded_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet_added.OutPacketAdded."
"create_from_packed")) as (logging_debug,
logging_error,
mock_out_packet_added):
mock_out_packet_added.return_value = "Dummy"
self.target._on_out_packet_added_pre = Mock(
return_value=True)
self.target._add_out_packet_conversion = Mock(
return_value="resp_list")
self.target._on_out_packet_added_post = Mock()
self.value = {"id": "OutPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "OutPacketAdded", self.value)
self.target._do_event_outpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_out_packet_added_pre.assert_called_once_with(
"publisher_id", "Dummy")
self.target._add_out_packet_conversion.assert_called_once_with(
"publisher_id", "Dummy")
self.target._on_out_packet_added_post.assert_called_once_with(
"publisher_id", "Dummy", "resp_list")
def test_do_event_outpacketadded_error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.value = {"error_id": "OutPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_outpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_on_out_packet_added_pre(self):
with patch("logging.debug") as logging_debug:
self.target._on_out_packet_added_pre("network_id", "msg")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_out_packet_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_out_packet_added_post("network_id", "msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_added(self):
self.target._on_node_added_pre = Mock(
return_value=True)
self.target._add_node_conversion = Mock(
return_value="resp_list")
self.target._on_node_added_post = Mock()
self.target._on_node_added("network_id", "node_msg")
self.target._on_node_added_pre.assert_called_once_with(
"network_id", "node_msg")
self.target._add_node_conversion.assert_called_once_with(
"network_id", "node_msg")
self.target._on_node_added_post.assert_called_once_with(
"network_id", "node_msg", "resp_list")
def test_on_node_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_node_added_pre("network_id",
"node_msg")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_node_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_added(self):
self.target._on_port_added_pre = Mock(
return_value=True)
self.target._add_port_conversion = Mock(
return_value="resp_list")
self.target._on_port_added_post = Mock()
self.target._on_port_added("network_id", "port_msg")
self.target._on_port_added_pre.assert_called_once_with(
"network_id", "port_msg")
self.target._add_port_conversion.assert_called_once_with(
"network_id", "port_msg")
self.target._on_port_added_post.assert_called_once_with(
"network_id", "port_msg", "resp_list")
def test_on_port_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_port_added_pre("network_id",
"port_msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_port_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_added(self):
self.target._on_link_added_pre = Mock(
return_value=True)
self.target._add_link_conversion = Mock(
return_value="resp_list")
self.target._on_link_added_post = Mock()
self.target._on_link_added("network_id", "link_msg")
self.target._on_link_added_pre.assert_called_once_with(
"network_id", "link_msg")
self.target._add_link_conversion.assert_called_once_with(
"network_id", "link_msg")
self.target._on_link_added_post.assert_called_once_with(
"network_id", "link_msg", "resp_list")
def test_on_link_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_link_added_pre("network_id",
"link_msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_link_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_added(self):
self.target._on_flow_added_pre = Mock(
return_value=True)
self.target._add_flow_conversion = Mock(
return_value="resp_list")
self.target._on_flow_added_post = Mock()
self.target._on_flow_added("network_id", "flow_msg")
self.target._on_flow_added_pre.assert_called_once_with(
"network_id", "flow_msg")
self.target._add_flow_conversion.assert_called_once_with(
"network_id", "flow_msg")
self.target._on_flow_added_post.assert_called_once_with(
"network_id", "flow_msg", "resp_list")
def test_on_flow_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_flow_added_pre("network_id",
"flow_msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_flow_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_update(self):
self.target._on_node_update_pre = Mock(
return_value=True)
self.target._update_node_conversion = Mock(
return_value="resp_list")
self.target._on_node_update_post = Mock()
self.target._on_node_update("network_id", "prev", "curr", "sttributes")
self.target._on_node_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_node_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_node_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_node_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_node_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_node_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_update(self):
self.target._on_port_update_pre = Mock(
return_value=True)
self.target._update_port_conversion = Mock(
return_value="resp_list")
self.target._on_port_update_post = Mock()
self.target._on_port_update("network_id", "prev", "curr", "sttributes")
self.target._on_port_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_port_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_port_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_port_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_port_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_port_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_update(self):
self.target._on_link_update_pre = Mock(
return_value=True)
self.target._update_link_conversion = Mock(
return_value="resp_list")
self.target._on_link_update_post = Mock()
self.target._on_link_update("network_id", "prev", "curr", "sttributes")
self.target._on_link_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_link_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_link_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_link_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_link_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_link_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_update(self):
self.target._on_flow_update_pre = Mock(
return_value=True)
self.target._update_flow_conversion = Mock(
return_value="resp_list")
self.target._on_flow_update_post = Mock()
self.target._on_flow_update("network_id", "prev", "curr", "sttributes")
self.target._on_flow_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_flow_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_flow_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_flow_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_flow_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_flow_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_delete(self):
self.target._on_node_delete_pre = Mock(
return_value=True)
self.target._delete_node_conversion = Mock(
return_value="resp_list")
self.target._on_node_delete_post = Mock()
self.target._on_node_delete("network_id", "msg")
self.target._on_node_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_node_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_node_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_node_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_node_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_node_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_delete(self):
self.target._on_port_delete_pre = Mock(
return_value=True)
self.target._delete_port_conversion = Mock(
return_value="resp_list")
self.target._on_port_delete_post = Mock()
self.target._on_port_delete("network_id", "msg")
self.target._on_port_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_port_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_port_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_port_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_port_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_port_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_delete(self):
self.target._on_link_delete_pre = Mock(
return_value=True)
self.target._delete_link_conversion = Mock(
return_value="resp_list")
self.target._on_link_delete_post = Mock()
self.target._on_link_delete("network_id", "msg")
self.target._on_link_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_link_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_link_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_link_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_link_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_link_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_delete(self):
self.target._on_flow_delete_pre = Mock(
return_value=True)
self.target._delete_flow_conversion = Mock(
return_value="resp_list")
self.target._on_flow_delete_post = Mock()
self.target._on_flow_delete("network_id", "msg")
self.target._on_flow_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_flow_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_flow_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_flow_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_flow_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_flow_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_add_node_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node
mock_put_object.return_value = self.value
self.result = self.target._add_node_conversion("network1",
node)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "node_item")
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]})
def test_add_node_conversion_not_in__network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "node_item")
mock_node.return_value = node
mock_put_object.return_value = self.value
self.result = self.target._add_node_conversion("network1",
node)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{})
def test_add_node_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_node_conversion("network1",
node)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result["network2"].body, {"key": "error"})
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{})
def test_add_port_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port
mock_put_object.return_value = self.value
self.result = self.target._add_port_conversion("network1",
port)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "port_item")
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]})
def test_add_port_conversion_not_in_network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "port_item")
mock_port.return_value = port
mock_put_object.return_value = self.value
self.result = self.target._add_port_conversion("network1",
port)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{})
def test_add_port_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_port_conversion("network1",
port)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result["network2"].body, {"key": "error"})
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{})
def test_add_link_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link
mock_put_object.return_value = self.value
self.result = self.target._add_link_conversion("network1",
link)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "link_item")
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{"network1::LinkId": ["network2::LinkId"],
"network2::LinkId": ["network1::LinkId"]})
def test_add_link_conversion_not_in_network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "link_item")
mock_link.return_value = link
mock_put_object.return_value = self.value
self.result = self.target._add_link_conversion("network1",
link)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{})
def test_add_link_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_link_conversion("network1",
link)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result["network2"].body, {"key": "error"})
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{})
def test_add_flow_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_flow_conversion("network1", flow)
self.assertEqual(logging_error.call_count, 1)
self.assertEqual(self.result["network2"].body, {"key": "error"})
self.assertEqual(conversion_table._ConversionTable__flow_conversion_table, {})
def test_add_flow_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "flow_item")
mock_flow.return_value = flow
mock_put_object.return_value = self.value
self.result = self.target._add_flow_conversion("network1", flow)
self.assertEqual(logging_error.call_count, 0)
self.assertEqual(self.result["network2"].body, "flow_item")
def test_add_flow_conversion_not_in_network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "flow_item")
mock_put_object.return_value = self.value
self.result = self.target._add_flow_conversion("network1", flow)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "inpacket_item")
def test_add_in_packet_conversionnot_in_network_interfaces(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_del_in_packet_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = None
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_del_in_packet_node_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
None, "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_convert_in_node_id_list_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_cdel_in_packet_port_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", None, basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_convert_in_port_id_list_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_header_in_port_id_list_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "Node01",
"Port01")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_attr_list_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_ports_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], None,
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "outpacket_item")
def test_add_out_packet_conversion_portsEx_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::PortExId": ["network2::PortExId"],
"network1::NodeId": ["network2::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortExId": ["network2::NodeId::PortExId"],
"network1::NodeId::PortId": ["network2::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", None, ["PortExId"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "outpacket_item")
def test_add_out_packet_conversion_network_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_del_out_packet_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = None
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_convert_port_id_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_del_out_packet_node_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
None, ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_convert_node_id_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_ports_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network1::Node01": ["network2::Node01"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"Node01", ["Port01"], None,
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_portsEx_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::PortExId": ["network2::PortExId"],
"network1::NodeId": ["network2::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortEx": ["network2::NodeId::PortEx"],
"network1::NodeId::PortId": ["network2::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", None, ["PortExId"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node01"].packed_object(),
node_prev.packed_object())
def test_update_node_conversion_network_id_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion(None,
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_node_curr_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
None,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_attributes_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node01"].packed_object(),
node_prev.packed_object())
def test_update_node_conversion_node_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_success_get_node_false(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = None
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_attr_key_in_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::network1": ["physical_id", "vendor"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "1", "Node01", {"port_id": port_prev},
{"oper_status": "DOWN"})
node_curr = Node("Node", "2", "Node01", {"port_id": port_curr},
{"oper_status": "UP"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["physical_id", "vendor"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute": "value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute": "value"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_KeyError(self):
conversion_table = self.target._conversion_table
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util." +
"network_interface.NetworkInterface.get_node")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_get_node):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.side_effect = KeyError()
mock_get_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_update_port_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node02::Port02"].packed_object(),
port_prev.packed_object())
def test_update_port_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion(None,
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_port_curr_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
None,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_attributes_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node02::Port02"].packed_object(),
port_prev.packed_object())
def test_update_port_conversion_port_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_port_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = None
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_attr_key_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"max_bandwidth": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "curr"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_KeyError(self):
conversion_table = self.target._conversion_table
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util." +
"network_interface.NetworkInterface.get_port")
) as (logging_debug,
logging_error,
mock_port,
mock_put_object,
mock_get_port):
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_get_port.return_value = port_prev
mock_port.side_effect = KeyError()
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_update_link_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Link02"].packed_object(),
link_prev.packed_object())
def test_update_link_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion(None,
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_link_curr_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
None,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Link02"].packed_object(),
link_prev.packed_object())
def test_update_link_conversion_link_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_line_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = None
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_attr_key_in_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"unreserved_bandwidth": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_KeyError(self):
conversion_table = self.target._conversion_table
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util." +
"network_interface.NetworkInterface.get_link")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
Mock_get_link):
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
Mock_get_link.return_value = link_prev
mock_link.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
None, flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_flow_curr_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, None, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_attributes_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_flow_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
self.value = Response(200, {"type": "Flow"})
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_flow_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_get_flow.return_value = None
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_differ_enabled(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
False, 123456789, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_differ_status(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "teardown",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_attr_key_in_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"bandwidth": "curr"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["enabled", "priority",
"status", "bandwidth"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["enabled", "priority", "status"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_KeyError(self):
conversion_table = self.target._conversion_table
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
node_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{"network1::Node01": ["network2::Node01"]})
self.assertEqual(
self.result["network1::Node02"].packed_object(),
node_prev.packed_object())
def test_delete_node_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion(None,
node_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_node_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_node_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
node_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"key": "error"})
mock_node.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
node_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_delete_port_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
port_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node01::Port01"].packed_object(),
port_prev.packed_object())
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{"network2::Node02::Port02": ["network1::Node02::Port02"]})
def test_delete_port_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion(None,
port_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_port_conversion_port_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_port_conversion_port_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
port_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_port_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
port_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_delete_link_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
link_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Link01"].packed_object(),
link_prev.packed_object())
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{"network2::Link02": ["network1::Link02"]})
def test_delete_link_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion(None,
link_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_link_conversion_link_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_link_conversion_link_in_not_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
link_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_link_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
link_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_delete_flow_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = flow_prev
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.result = self.target._delete_flow_conversion(
"network1", flow_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow01"].packed_object(),
flow_prev.packed_object())
self.assertEqual(
conversion_table._ConversionTable__flow_conversion_table,
{"network2::Flow02": ["network1::Flow02"]})
def test_delete_flow_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = self.value
self.result = self.target._delete_flow_conversion(
None, flow_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_flow_conversion_link_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = self.value
self.result = self.target._delete_flow_conversion(
"network1", None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_flow_conversion_flow_id_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = flow_prev
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.result = self.target._delete_flow_conversion(
"network1", flow_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_flow_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = self.value
self.result = self.target._delete_flow_conversion(
"network1", flow_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_in_packet_conversion_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "inpacket_item")
mock_in_packet.return_value = inpacket
mock_del_object.return_value = self.value
self.result = self.target._del_in_packet(
self.target._network_interfaces["network1"], "inpacket_id")
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result.packed_object(), inpacket.packed_object())
def test_del_in_packet_conversion_Response_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response("400", "inpacket_item")
mock_in_packet.return_value = inpacket
mock_del_object.return_value = self.value
self.result = self.target._del_in_packet(
self.target._network_interfaces["network1"], "inpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_in_packet_conversion_KeyError(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "inpacket_item")
mock_in_packet.side_effect = KeyError()
mock_del_object.return_value = self.value
self.result = self.target._del_in_packet(
self.target._network_interfaces["network1"], "inpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_out_packet_conversion_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", "Ports", "Ports_ex",
basic_flow_match, "Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "outpacket_item")
mock_out_packet.return_value = outpacket
mock_del_object.return_value = self.value
self.result = self.target._del_out_packet(
self.target._network_interfaces["network1"], "outpacket_id")
self.assertEqual(
logging_debug.call_count, 3)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result.packed_object(), outpacket.packed_object())
def test_del_out_packet_conversion_Response_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", "Ports", "Ports_ex",
basic_flow_match, "Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response("400", "outpacket_item")
mock_out_packet.return_value = outpacket
mock_del_object.return_value = self.value
self.result = self.target._del_out_packet(
self.target._network_interfaces["network1"], "outpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_out_packet_conversion_KeyError(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", "Ports", "Ports_ex",
basic_flow_match, "Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "outpacket_item")
mock_out_packet.side_effect = KeyError()
mock_del_object.return_value = self.value
self.result = self.target._del_out_packet(
self.target._network_interfaces["network1"], "outpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_get_ignore_keys_match(self):
attributes_port = ["oper_status", "physical_id",
"vendor", "max_bandwidth",
"unreserved_bandwidth", "is_boundary"]
self.result = self.target._Logic__get_ignore_keys(
attributes_port, ["attributes::unreserved_bandwidth"])
self.assertEqual(
self.result,
["oper_status", "physical_id",
"vendor", "max_bandwidth",
"is_boundary"])
def test_get_ignore_keys_not_match(self):
attributes_port = ["oper_status", "physical_id",
"vendor", "max_bandwidth",
"unreserved_bandwidth", "is_boundary"]
self.result = self.target._Logic__get_ignore_keys(
attributes_port, ["unreserved_bandwidth"])
self.assertEqual(
self.result,
["oper_status", "physical_id",
"vendor", "max_bandwidth",
"is_boundary"])
if __name__ == "__main__":
unittest.main()
|
python
|
#!/usr/bin/env python
import argparse
import astropy.io.fits as pyfits
if __name__=="__main__":
parser = argparse.ArgumentParser(
prog='show_gti.py',
usage='show_gti.py input.fits',
description='Show GTI information',
epilog='',
add_help=True,
)
parser.add_argument(
'input_fits',metavar='input_fits',type=str,
help='Input fits file.')
args = parser.parse_args()
hdu = pyfits.open(args.input_fits)
extname_list = []
for i in range(len(hdu)):
try:
extname_list.append(hdu[i].header['EXTNAME'])
except:
pass
#print('skip the extension...')
if 'GTI' in extname_list:
gtiname = 'GTI'
filetype = 'evt'
elif 'STDGTI' in extname_list:
gtiname = 'STDGTI'
filetype = 'evt'
elif 'PREFILTER' in extname_list:
filetype = 'mkf'
total_exp = 0.0
if filetype == 'evt':
num_of_gti = len(hdu[gtiname].data)
print("# GTI-num START STOP Exposure(s)")
for i in range(num_of_gti):
gti_start = hdu[gtiname].data[i]['START']
gti_stop = hdu[gtiname].data[i]['STOP']
gti_exp = gti_stop - gti_start
print("%03d: %.3f %.3f %.3f" % (i+1,gti_start,gti_stop,gti_exp))
total_exp += gti_exp
elif filetype == 'mkf':
total_exp = len(hdu['PREFILTER'].data)
print("# Total exposure: %.3f (s)" % total_exp)
|
python
|
#!/usr/bin/env python3
import os.path
import json
dir_path = os.path.normpath(os.path.join(__file__, '..' , '..' , 'data'))
file_path = os.path.join(dir_path, 'config_file.json')
## get user input-----------------------------------------
ssid = input("Enter wifi ssid: ")
pswd = input("Enter wifi password: ")
name = input("Enter device name: ")
device_position = input("Enter device position: ")
pub_topic = input("Enter publish topic: ")
sub_topic = input("Enter subscribe topic: ")
logger_level = input("""
LOG_LEVEL_OFF 0 no logging
LOG_LEVEL_FATAL 1 Designates very severe error events that will presumably lead the application to abort
LOG_LEVEL_ERROR 2 Designates error events that might still allow the application to continue running
LOG_LEVEL_WARNING 3 Designates potentially harmful situations
LOG_LEVEL_INFO 4 Designates informational messages that highlight the progress of the application at coarse-grained level.
LOG_LEVEL_DEBUG 5 Designates fine-grained informational events that are most useful to debug an application.
Enter logger level number: """)
logger_output = input("""
LOG_OUTPUT_SERIAL_DIRECT 0 Logging events are directly printed over serial interface when they occure
LOG_OUTPUT_SERIAL_BEGIN 1 Logging events are stored in the SPIFFS and printed over the serial interface at the start of the programm
LOG_OUTPUT_SERIAL_DIRECT_BEGIN 2 Comibination of LOG_OUTPUT_SERIAL_DIRECT and LOG_OUTPUT_SERIAL_BEGIN
LOG_OUTPUT_OFF 3 Not output at all
Enter logger output number: """)
## create json--------------------------------------------
json_dict = {
"ssid":ssid,
"password":pswd,
"aws_url":"aq60dkt3q20bd-ats.iot.eu-central-1.amazonaws.com",
"aws_port":8883,
"mqtt_pub_topic":pub_topic,
"mqtt_sub_topic":sub_topic,
"device_id":name,
"device_position":device_position,
"logger_level":int(logger_level),
"logger_output":int(logger_output)
}
file_data = json.dumps(json_dict, indent=0)
## write to file------------------------------------------
if not os.path.exists(dir_path):
os.makedirs(dir_path)
f = open(file_path, "w")
f.write(file_data)
f.close()
|
python
|
from abc import ABCMeta
from discord.ext.commands import CogMeta
from bot.utils.redis_cache import RedisCache
__all__ = ['RedisCache', 'CogABCMeta']
class CogABCMeta(CogMeta, ABCMeta):
"""Metaclass for ABCs meant to be implemented as Cogs."""
pass
def pad_base64(data: str) -> str:
"""Return base64 `data` with padding characters to ensure its length is a multiple of 4."""
return data + "=" * (-len(data) % 4)
|
python
|
# First
# Hello World
import developer skill
import resilience skill
import persistence skill
pythonapprentice = str('Johnny')
print(f'welcome to the python world {pythonapprentice}')
print('Learning...')
|
python
|
from tweets.models import Comment
from django.db import router
# from posts.views import my_view
from rest_framework import routers
from django.urls.conf import include
from django.urls import path
from tweets.views import TweetViewSet, LikeViewSet, RetweetviewSet, CommentviewSet, index
router = routers.DefaultRouter()
router.register(r'tweets', TweetViewSet)
router.register(r'likes', LikeViewSet)
router.register(r'retweet',RetweetviewSet)
# router.register(r'trends',TrendsviewSet)
router.register(r"comment",CommentviewSet)
urlpatterns = [
path('index/', index),
path("", include(router.urls))
]
|
python
|
from Repository.eventTriggerOutputGroupingSetupValueRepo import eventTriggerOutputGroupingSetupValueRepo
from sqlalchemy import Table
from sqlalchemy.engine.base import Connection
from sqlalchemy.sql.expression import BinaryExpression
class eventTriggerOutputGroupingSetupValueServices():
__eventTriggerOutputGroupingSetupValueRepo: eventTriggerOutputGroupingSetupValueRepo
def __init__(self, eventTriggerOutputGroupingSetupValueTable: Table, context: Connection):
self.__eventTriggerOutputGroupingSetupValueRepo = eventTriggerOutputGroupingSetupValueRepo(eventTriggerOutputGroupingSetupValueTable, context=context)
def AddManyEventTriggerOutputGroupSetupValueWithCustomData(self, l:list):
self.__eventTriggerOutputGroupingSetupValueRepo.InsertManyWithCustomData(l)
|
python
|
'''
Version and license information.
'''
__all__ = ['__version__', '__versiondate__', '__license__']
__version__ = '1.3.3'
__versiondate__ = '2022-01-16'
__license__ = f'Sciris {__version__} ({__versiondate__}) – © 2014-2022 by the Sciris Development Team'
|
python
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
def _impl(ctx):
"""Core implementation of passwd_file."""
f = "%s:x:%s:%s:%s:%s:%s\n" % (
ctx.attr.username,
ctx.attr.uid,
ctx.attr.gid,
ctx.attr.info,
ctx.attr.home,
ctx.attr.shell
)
ctx.file_action(
output = ctx.outputs.out,
content = f,
executable=False
)
build_tar = ctx.executable.build_tar
args = [
"--output=" + ctx.outputs.tar.path,
"--file=%s=/etc/passwd" % ctx.outputs.out.path
]
arg_file = ctx.new_file(ctx.attr.name + ".args")
ctx.file_action(arg_file, "\n".join(args))
ctx.action(
executable = build_tar,
arguments = ["--flagfile=" + arg_file.path],
inputs = [ctx.outputs.out, arg_file],
outputs = [ctx.outputs.tar],
use_default_shell_env = True
)
passwd_file = rule(
attrs = {
"username": attr.string(mandatory = True),
"uid": attr.int(default = 1000),
"gid": attr.int(default = 1000),
"info": attr.string(default = "user"),
"home": attr.string(default = "/home"),
"shell": attr.string(default = "/bin/bash"),
"build_tar": attr.label(
default = Label("@bazel_tools//tools/build_defs/pkg:build_tar"),
cfg = "host",
executable = True,
allow_files = True,
),
},
executable = False,
outputs = {
"out": "%{name}.passwd",
"tar": "%{name}.passwd.tar",
},
implementation = _impl,
)
|
python
|
from __future__ import absolute_import
import os
import sys
import traceback
from webob.exc import HTTPNotFound, HTTPInternalServerError
from .config import Config
from .config import get_config
from .request import Request
from .response import Response
from .exceptions import PageNotFound
from .tools import import_module
from .views import View
class Router(object):
"""
Main project router that calls appropriate controller.
TODO:
- decorate each controller's call with middleware
- (done) load all controllers and their actions to dict to speedup
lookup of desired url address
"""
def __init__(self):
"""
Load all controllers.
It allow us to speed-up get controller by given url.
"""
self._controllers = {}
self._project_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
self._load_config()
self._load_controllers()
self._init_view()
def __call__(self, environ, start_response):
"""
Find appropriate controller for requested address.
Return Response object that support the WSGI interface.
"""
request = Request(environ)
try:
controller_name = request.get_controller_name()
action_name = request.get_action_name()
action_handler = self.get_action_handler(controller_name, action_name)
if not callable(action_handler):
# action handler should be a callable function
raise PageNotFound(
"Controller '{name}' doesn't have action '{action}'",
name=controller_name,
action=action_name
)
resp = action_handler(request)
if not isinstance(resp, Response):
raise Exception("Controller should return Response object, but given '{}'".format(type(resp)))
except PageNotFound as err:
message = self._format_error_message(str(err), with_traceback=True)
return HTTPNotFound(message)(environ, start_response)
except Exception as err:
message = self._format_error_message(str(err), with_traceback=True)
return HTTPInternalServerError(message)(environ, start_response)
return resp(environ, start_response)
def _load_config(self):
"""
Load config for current project.
"""
self._config = Config()
def _load_controllers(self):
"""
Load all controllers from folder 'controllers'.
Ignore files with leading underscore (for example: controllers/_blogs.py)
"""
for file_name in os.listdir(os.path.join(self._project_dir, 'controllers')):
# ignore disabled controllers
if not file_name.startswith('_'):
module_name = file_name.split('.', 1)[0]
module_path = "controllers.{}".format(module_name)
module = import_module(module_path)
# transform 'blog_articles' file name to 'BlogArticles' class
controller_class_name = module_name.title().replace('_', '')
controller_class = getattr(module, controller_class_name)
controller = controller_class()
for action_name in dir(controller):
action = getattr(controller, action_name)
if action_name.startswith('_') or not callable(action):
continue
url_path = "/".join([module_name, action_name])
self._controllers[url_path] = action
return self._controllers
def _init_view(self):
"""
Initialize View with project settings.
"""
views_engine = get_config('rails.views.engine', 'jinja')
templates_dir = os.path.join(self._project_dir, "views", "templates")
self._view = View(views_engine, templates_dir)
def _format_error_message(self, msg, with_traceback=False):
if with_traceback:
tb = traceback.format_exc()
msg += "<h3>Traceback</h3>\n\n<pre>{}</pre>".format(tb)
return msg
def get_action_handler(self, controller_name, action_name):
"""
Return action of controller as callable.
If requested controller isn't found - return 'not_found' action
of requested controller or Index controller.
"""
try_actions = [
controller_name + '/' + action_name,
controller_name + '/not_found',
# call Index controller to catch all unhandled pages
'index/not_found'
]
# search first appropriate action handler
for path in try_actions:
if path in self._controllers:
return self._controllers[path]
return None
|
python
|
"""
Clean up & organize outputs from processing workflow batch.
"""
import logging
import os
import re
import zipfile
import shutil
logger = logging.getLogger(__name__)
class OutputCleaner(object):
"""
Moves, renames, and deletes individual output files from a workflow
processing batch for a selected project.
"""
def __init__(self, path):
logger.debug("creating `OutputCleaner` instance for '{}'".format(path))
self.path = path
self.output_types = self._get_output_types()
def _get_output_types(self):
"""
Identify the types of outputs included for the project.
"""
out_types = ['qc', 'metrics', 'counts', 'alignments', 'logs']
logging.debug("subfolders in project folder: {}"
.format(os.listdir(self.path)))
return [f for f in os.listdir(self.path)
if f.lower() in out_types]
def _get_output_paths(self, output_type):
"""
Return full path for individual output files.
"""
logging.debug("locating output files of type '{}'".format(output_type))
output_root = os.path.join(self.path, output_type)
return [os.path.join(self.path, root, f)
for root, dirs, files in os.walk(output_root)
for f in files
if not re.search('(DS_Store|_old)', f)]
def _unzip_output(self, path):
"""
Unzip the contents of a compressed output file.
"""
logging.debug("extracting contents of '{}' to '{}'"
.format(path, os.path.dirname(path)))
paths = []
with zipfile.ZipFile(path) as zf:
logger.debug("zip folder contents: {}".format(zf.namelist()))
for f in zf.namelist():
if f != './':
paths.append(zf.extract(f, os.path.dirname(path)))
logging.debug("unzipped the following files: {}".format(paths))
return paths
def _unnest_output(self, path):
"""
Unnest files in a subfolder by concatenating filenames and
moving up one level.
"""
logging.debug("unnesting output '{}' from subfolder '{}'"
.format(path, os.path.dirname(path)))
prefix = os.path.dirname(path)
if re.search('.zip$', path):
logging.debug("unzipping contents of '{}' before unnesting"
.format(path))
for p in self._unzip_output(path):
shutil.move(p, '{}_{}'.format(prefix, os.path.basename(p)))
try:
shutil.rmtree(os.path.splitext(path)[0])
except OSError:
pass
else:
shutil.move(path, '{}_{}'.format(prefix, os.path.basename(path)))
def _recode_output(self, path, output_type):
"""
Rename file according to template.
"""
filename_map = {'QC': ('fastqc_data.txt', 'fastqc_qc.txt')}
swap = filename_map[output_type]
newpath = re.sub(swap[0], swap[1], path)
logging.debug("renaming '{}' to '{}'".format(path, newpath))
shutil.move(path, newpath)
return newpath
def clean_outputs(self):
"""
Walk through output types to unzip, unnest, and rename files.
"""
for output_type in self.output_types:
if output_type == 'QC':
outputs = self._get_output_paths(output_type)
for o in outputs:
outregex = re.compile(output_type + '$')
if not outregex.search(os.path.dirname(o)):
self._unnest_output(o)
for o in os.listdir(os.path.join(self.path, output_type)):
self._recode_output(
os.path.join(self.path, output_type, o),
output_type
)
|
python
|
# Copyright 2018 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add module docstring."""
import json
from datetime import datetime
import responses
from sampledata import acl_rule, forward_zone, ip4_zone, ip6_zone, sample_zone_change
from vinyldns.serdes import to_json_string, from_json_string
from vinyldns.zone import Zone, ZoneChange, ListZonesResponse, ListZoneChangesResponse
def check_zone_connections_are_same(a, b):
if a is None:
assert b is None
else:
assert a.primary_server == b.primary_server
assert a.key == b.key
assert a.name == b.name
assert a.key_name == b.key_name
def check_zones_are_same(a, b):
assert a.id == b.id
assert a.name == b.name
assert a.email == b.email
assert a.admin_group_id == b.admin_group_id
assert a.status == b.status
assert a.updated == b.updated
assert a.created == b.created
check_zone_connections_are_same(a.connection, b.connection)
check_zone_connections_are_same(a.transfer_connection, b.transfer_connection)
assert all([l.__dict__ == r.__dict__ for l, r in zip(a.acl.rules, b.acl.rules)])
def test_zone_serdes():
s = to_json_string(forward_zone)
print(json.dumps(s, indent=4))
z = from_json_string(s, Zone.from_dict)
assert z.name == forward_zone.name
assert z.connection.primary_server == forward_zone.connection.primary_server
assert all([a.__dict__ == b.__dict__ for a, b in zip(z.acl.rules, forward_zone.acl.rules)])
def test_connect_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.POST, 'http://test.com/zones',
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.connect_zone(forward_zone)
check_zones_are_same(forward_zone, r.zone)
def test_update_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.PUT, 'http://test.com/zones/{0}'.format(forward_zone.id),
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.update_zone(forward_zone)
check_zones_are_same(forward_zone, r.zone)
def test_abandon_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.DELETE, 'http://test.com/zones/{0}'.format(forward_zone.id),
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.abandon_zone(forward_zone.id)
check_zones_are_same(forward_zone, r.zone)
def test_sync_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.POST, 'http://test.com/zones/{0}/sync'.format(forward_zone.id),
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.sync_zone(forward_zone.id)
assert sample_zone_change.id == r.id
assert sample_zone_change.change_type == r.change_type
assert sample_zone_change.status == r.status
assert sample_zone_change.system_message == r.system_message
assert sample_zone_change.user_id == r.user_id
check_zones_are_same(forward_zone, r.zone)
def test_list_zones(mocked_responses, vinyldns_client):
lzr = ListZonesResponse(zones=[forward_zone, ip4_zone, ip6_zone], name_filter='*', start_from='start-from',
next_id='next', max_items=100)
mocked_responses.add(
responses.GET, 'http://test.com/zones?nameFilter=*&startFrom=start-from&maxItems=100',
body=to_json_string(lzr), status=200
)
r = vinyldns_client.list_zones('*', 'start-from', 100)
assert r.name_filter == lzr.name_filter
assert r.start_from == lzr.start_from
assert r.next_id == lzr.next_id
assert r.max_items == lzr.max_items
for l, r in zip(lzr.zones, r.zones):
check_zones_are_same(l, r)
def test_get_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.GET, 'http://test.com/zones/{0}'.format(forward_zone.id),
body=to_json_string({'zone': forward_zone}), status=200)
r = vinyldns_client.get_zone(forward_zone.id)
check_zones_are_same(forward_zone, r)
def test_list_zone_changes(mocked_responses, vinyldns_client):
change1 = ZoneChange(zone=forward_zone, user_id='some-user', change_type='Create', status='Pending',
created=datetime.utcnow(), system_message=None, id='zone-change-id1')
change2 = ZoneChange(zone=ip4_zone, user_id='some-user', change_type='Create', status='Pending',
created=datetime.utcnow(), system_message='msg', id='zone-change-id2')
lzcr = ListZoneChangesResponse(forward_zone.id, [change1, change2], 'next', 'start', 100)
mocked_responses.add(
responses.GET, 'http://test.com/zones/{0}/changes?startFrom=start&maxItems=100'.format(forward_zone.id),
body=to_json_string(lzcr), status=200
)
r = vinyldns_client.list_zone_changes(forward_zone.id, 'start', 100)
assert r.start_from == lzcr.start_from
assert r.next_id == lzcr.next_id
assert r.max_items == lzcr.max_items
for l, r in zip(lzcr.zone_changes, r.zone_changes):
assert l.id == r.id
assert l.user_id == r.user_id
assert l.change_type == r.change_type
assert l.status == r.status
assert l.created == r.created
assert l.system_message == r.system_message
check_zones_are_same(l.zone, r.zone)
def test_add_acl_rule(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.PUT, 'http://test.com/zones/{0}/acl/rules'.format(forward_zone.id),
body=to_json_string(sample_zone_change)
)
r = vinyldns_client.add_zone_acl_rule(forward_zone.id, acl_rule)
check_zones_are_same(r.zone, sample_zone_change.zone)
def test_delete_acl_rule(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.DELETE, 'http://test.com/zones/{0}/acl/rules'.format(forward_zone.id),
body=to_json_string(sample_zone_change)
)
r = vinyldns_client.delete_zone_acl_rule(forward_zone.id, acl_rule)
check_zones_are_same(r.zone, sample_zone_change.zone)
|
python
|
import sftoolbox
class Variable(object):
"""variable
"""
def __init__(self, project):
"""construct"""
project.add(self)
self.project = project
self.idname = None
def _apply_json(self, data):
"""apply the json data
"""
self.label = data.get('label')
self.idname = data.get('idname')
@classmethod
def from_json(cls, project, value):
instance = cls(project)
instance._apply_json(value)
return instance
@sftoolbox.engine.register_variable_class
class TextVariable(Variable):
"""text
"""
json_type = 'text'
def __init__(self, project):
super(TextVariable, self).__init__(project)
self._value = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def _apply_json(self, data):
super(TextVariable, self)._apply_json(data)
def from_json(project, value):
"""return a action from the given json
"""
json_type = value.get('type')
for class_ in sftoolbox.engine.variable_classes_register:
if json_type == class_.json_type:
return class_.from_json(project, value)
return Variable.from_json(project, value)
|
python
|
# Generated by Django 3.2.7 on 2021-12-07 12:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('insta', '0002_auto_20211206_1623'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='comments',
new_name='comment',
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=250)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='insta.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
]
|
python
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from nnabla.utils.image_utils import imsave
import numpy as np
import random
def save_generations(rgb_output, filepath, drange=[-1, 1], return_images=False):
"""
Save generated images
"""
if return_images:
images = []
for i in range(rgb_output.shape[0]):
scale = 255 / (drange[1] - drange[0])
if isinstance(rgb_output, nn.Variable):
image = rgb_output.d[i] * scale + (0.5 - drange[0] * scale)
else:
image = rgb_output.data[i] * scale + (0.5 - drange[0] * scale)
if return_images:
images.append(np.uint8(np.clip(image, 0, 255)))
else:
imsave(f'{filepath}_{i}.png', np.uint8(
np.clip(image, 0, 255)), channel_first=True)
print(f'Output saved. Saved {filepath}_{i}.png')
if return_images:
return images
def collect_data(data):
data = [np.expand_dims(d, 1) for d in data]
data = np.concatenate(data, 1)
return data
def mixing_noise(batch_size, latent_dim, mixing_prob, seed):
rnd = np.random.RandomState(seed=seed[0])
z = rnd.randn(batch_size, latent_dim).astype(np.float32)
if mixing_prob > 0 and random.random() < mixing_prob:
rnd_2 = np.random.RandomState(seed=seed[1])
z_1 = z
z_2 = rnd_2.randn(batch_size, latent_dim).astype(np.float32)
else:
z_1 = z_2 = z
return z_1, z_2
def slerp(noise_1, noise_2, ratio):
interpolated_noises = []
for a, b in zip(noise_1, noise_2):
a_norm = F.pow_scalar(
F.sum(F.pow_scalar(a, 2), axis=1, keepdims=True), 0.5)
b_norm = F.pow_scalar(
F.sum(F.pow_scalar(b, 2), axis=1, keepdims=True), 0.5)
a /= a_norm
b /= b_norm
d = F.sum(a*b, axis=1, keepdims=True)
p = ratio*F.acos(d)
c = b-d*a
c_norm = F.pow_scalar(
F.sum(F.pow_scalar(c, 2), axis=1, keepdims=True), 0.5)
c /= c_norm
d = a*F.cos(p) + c*F.sin(p)
d = d/F.pow_scalar(F.sum(F.pow_scalar(d, 2),
axis=1, keepdims=True), 0.5)
interpolated_noises.append(d)
return interpolated_noises
def lerp(a, b, t):
return a + (b - a) * t
|
python
|
#! /usr/bin/env python
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Quick description:
'''
# standard library
from math import *
from copy import deepcopy
# local library
import inkex
import pathmodifier
import cubicsuperpath
import bezmisc
import simplepath
import simpletransform
def getColorAndOpacity(longColor):
'''
Convert the long into a #rrggbb color value
Conversion back is A + B*256^1 + G*256^2 + R*256^3
'''
longColor = long(longColor)
if longColor < 0:
longColor = longColor & 0xFFFFFFFF
hexColor = hex(longColor)
hexOpacity = hexColor[-3:-1]
hexColor = '#' + hexColor[2:-3].rjust(6, '0')
return (hexColor, hexOpacity)
def setColorAndOpacity(style, color, opacity):
declarations = style.split(';')
strokeOpacityInStyle = False
newOpacity = round((int(opacity, 16) / 255.0), 8)
for i,decl in enumerate(declarations):
parts = decl.split(':', 2)
if len(parts) == 2:
(prop, val) = parts
prop = prop.strip().lower()
if (prop == 'stroke' and val != color):
declarations[i] = prop + ':' + color
if prop == 'stroke-opacity':
if val != newOpacity:
declarations[i] = prop + ':' + str(newOpacity)
strokeOpacityInStyle = True
if not strokeOpacityInStyle:
declarations.append('stroke-opacity' + ':' + str(newOpacity))
return ";".join(declarations)
def getSkeletonPath(d, offs):
'''
Recieves a current skeleton path and offset specified by the user if it's line.
Calculates new skeleton path to use for creating contour with given offset.
'''
if offs != 0:
comps = d.split()
if ((comps[2] == 'h' or comps[2] == 'H') and len(comps) == 4):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalX = float(comps[3]) if comps[2] == 'H' else startX + float(comps[3])
if startX < finalX:
startY -= offs
else:
startY += offs
comps[1] = startPt[0] + ',' + str(startY)
elif ((comps[2] == 'v' or comps[2] == 'V') and len(comps) == 4):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalY = float(comps[3]) if comps[2] == 'V' else startY + float(comps[3])
if startY < finalY:
startX += offs
else:
startX -= offs
comps[1] = str(startX) + ',' + startPt[1]
elif (comps[0] == 'M' and len(comps) == 3):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalPt = comps[2].split(',')
finalX = float(finalPt[0])
finalY = float(finalPt[1])
if startX < finalX:
if (startY > finalY):
startX -= offs
finalX -= offs
else:
startX += offs
finalX += offs
startY -= offs
finalY -= offs
else:
if startY > finalY:
startX -= offs
finalX -= offs
else:
startX += offs
finalX += offs
startY += offs
finalY += offs
comps[1] = str(startX) + ',' + str(startY)
comps[2] = str(finalX) + ',' + str(finalY)
elif (comps[0] == 'm' and len(comps) == 3):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalPt = comps[2].split(',')
dx = float(finalPt[0])
dy = float(finalPt[1])
finalX = startX + dx
finalY = startY + dy
if startX < finalX:
if startY > finalY:
startX -= offs
else:
startX += offs
startY -= offs
else:
if startY > finalY:
startX -= offs
else:
startX += offs
startY += offs
comps[1] = str(startX) + ',' + str(startY)
comps[2] = str(dx) + ',' + str(dy)
return cubicsuperpath.parsePath(' '.join(comps))
return cubicsuperpath.parsePath(d)
def modifySkeletonPath(skelPath):
resPath = []
l = len(skelPath)
resPath += skelPath[0]
if l > 1:
for i in range(1, l):
if skelPath[i][0][1] == resPath[-1][1]:
skelPath[i][0][0] = resPath[-1][0]
del resPath[-1]
resPath += skelPath[i]
return resPath
def linearize(p, tolerance=0.001):
'''
This function receives a component of a 'cubicsuperpath' and returns two things:
The path subdivided in many straight segments, and an array containing the length of each segment.
'''
zero = 0.000001
i = 0
d = 0
lengths=[]
while i < len(p) - 1:
box = bezmisc.pointdistance(p[i][1], p[i][2])
box += bezmisc.pointdistance(p[i][2], p[i+1][0])
box += bezmisc.pointdistance(p[i+1][0], p[i+1][1])
chord = bezmisc.pointdistance(p[i][1], p[i+1][1])
if (box - chord) > tolerance:
b1, b2 = bezmisc.beziersplitatt([p[i][1], p[i][2], p[i + 1][0], p[i + 1][1]], 0.5)
p[i][2][0], p[i][2][1] = b1[1]
p[i + 1][0][0], p[i + 1][0][1] = b2[2]
p.insert(i + 1, [[b1[2][0], b1[2][1]], [b1[3][0], b1[3][1]], [b2[1][0], b2[1][1]]])
else:
d = (box + chord) / 2
lengths.append(d)
i += 1
new = [p[i][1] for i in range(0, len(p) - 1) if lengths[i] > zero]
new.append(p[-1][1])
lengths = [l for l in lengths if l > zero]
return (new, lengths)
def isSkeletonClosed(sklCmp):
cntOfDgts = 2
if (round(sklCmp[0][0], cntOfDgts) != round(sklCmp[-1][0], cntOfDgts) or round(sklCmp[0][1], cntOfDgts) != round(sklCmp[-1][1], cntOfDgts)):
return False
return True
def getPolygonCentroid(polygon):
x = 0
y = 0
n = len(polygon)
for vert in polygon:
x += vert[0]
y += vert[1]
x = x / n
y = y / n
return [x, y]
def getPoint(p1, p2, x, y):
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
a = (y1 - y2) / (x1 - x2)
b = y1 - a * x1
if x == None:
x = (y - b) / a
else:
y = a * x + b
return [x, y]
def getPtOnSeg(p1, p2, segLen, l):
if p1[0] == p2[0]:
return [p2[0], p2[1] - l] if p2[1] < p1[1] else [p2[0], p2[1] + l]
if p1[1] == p2[1]:
return [p2[0] - l, p2[1]] if p2[0] < p1[0] else [p2[0] + l, p2[1]]
dy = abs(p1[1] - p2[1])
angle = asin(dy / segLen)
dx = l * cos(angle)
x = p1[0] - dx if p1[0] > p2[0] else p1[0] + dx
return getPoint(p1, p2, x, None)
def drawfunction(nodes, width, fx):
# x-bounds of the plane
xstart = 0.0
xend = 2 * pi
# y-bounds of the plane
ybottom = -1.0
ytop = 1.0
# size and location of the plane on the canvas
height = 2
left = 15
bottom = 15 + height
# function specified by the user
try:
if fx != "":
f = eval('lambda x: ' + fx.strip('"'))
except SyntaxError:
return []
scalex = width / (xend - xstart)
xoff = left
# conver x-value to coordinate
coordx = lambda x: (x - xstart) * scalex + xoff
scaley = height / (ytop - ybottom)
yoff = bottom
# conver y-value to coordinate
coordy = lambda y: (ybottom - y) * scaley + yoff
# step is the distance between nodes on x
step = (xend - xstart) / (nodes - 1)
third = step / 3.0
# step used in calculating derivatives
ds = step * 0.001
# initialize function and derivative for 0;
# they are carried over from one iteration to the next, to avoid extra function calculations.
x0 = xstart
y0 = f(xstart)
# numerical derivative, using 0.001*step as the small differential
x1 = xstart + ds # Second point AFTER first point (Good for first point)
y1 = f(x1)
dx0 = (x1 - x0) / ds
dy0 = (y1 - y0) / ds
# path array
a = []
# Start curve
a.append(['M ', [coordx(x0), coordy(y0)]])
for i in range(int(nodes - 1)):
x1 = (i + 1) * step + xstart
x2 = x1 - ds # Second point BEFORE first point (Good for last point)
y1 = f(x1)
y2 = f(x2)
# numerical derivative
dx1 = (x1 - x2) / ds
dy1 = (y1 - y2) / ds
# create curve
a.append([' C ', [coordx(x0 + (dx0 * third)), coordy(y0 + (dy0 * third)),
coordx(x1 - (dx1 * third)), coordy(y1 - (dy1 * third)),
coordx(x1), coordy(y1)]])
# Next segment's start is this segment's end
x0 = x1
y0 = y1
# Assume the function is smooth everywhere, so carry over the derivative too
dx0 = dx1
dy0 = dy1
return a
def offset(pathComp, dx, dy):
for ctl in pathComp:
for pt in ctl:
pt[0] += dx
pt[1] += dy
def stretch(pathComp, xscale, yscale, org):
for ctl in pathComp:
for pt in ctl:
pt[0] = org[0] + (pt[0] - org[0]) * xscale
pt[1] = org[1] + (pt[1] - org[1]) * yscale
class GuillocheContour(pathmodifier.PathModifier):
def __init__(self):
pathmodifier.PathModifier.__init__(self)
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab", default="contour",
help="Active tab")
self.OptionParser.add_option("--contourFunction",
action="store", type="string",
dest="contourFunction", default="sin",
help="Function of the contour")
self.OptionParser.add_option("--frequency",
action="store", type="int",
dest="frequency", default=10,
help="Frequency of the function")
self.OptionParser.add_option("--amplitude",
action="store", type="int",
dest="amplitude", default=1,
help="Amplitude of the function")
self.OptionParser.add_option("--phaseOffset",
action="store", type="int",
dest="phaseOffset", default=0,
help="Phase offset of the function")
self.OptionParser.add_option("--offset",
action="store", type="int",
dest="offset", default=0,
help="Offset of the function")
self.OptionParser.add_option("--nodes",
action="store", type="int",
dest="nodes", default=20,
help="Count of nodes")
self.OptionParser.add_option("--remove",
action="store", type="inkbool",
dest="remove", default=False,
help="If True, control object will be removed")
self.OptionParser.add_option("--strokeColor",
action="store", type="string",
dest="strokeColor", default=255,
help="The line's color")
self.OptionParser.add_option("--amplitude1",
action="store", type="float",
dest="amplitude1", default=0.0,
help="Amplitude of first harmonic")
self.OptionParser.add_option("--phase1",
action="store", type="int",
dest="phase1", default=0,
help="Phase offset of first harmonic")
self.OptionParser.add_option("--amplitude2",
action="store", type="float",
dest="amplitude2", default=0.0,
help="Amplitude of second harmonic")
self.OptionParser.add_option("--phase2",
action="store", type="int",
dest="phase2", default=0,
help="Phase offset of second harmonic")
self.OptionParser.add_option("--amplitude3",
action="store", type="float",
dest="amplitude3", default=0.0,
help="Amplitude of third harmonic")
self.OptionParser.add_option("--phase3",
action="store", type="int",
dest="phase3", default=0,
help="Phase offset of third harmonic")
self.OptionParser.add_option("--amplitude4",
action="store", type="float",
dest="amplitude4", default=0.0,
help="Amplitude of fourth harmonic")
self.OptionParser.add_option("--phase4",
action="store", type="int",
dest="phase4", default=0,
help="Phase offset of fourth harmonic")
self.OptionParser.add_option("--amplitude5",
action="store", type="float",
dest="amplitude5", default=0.0,
help="Amplitude of fifth harmonic")
self.OptionParser.add_option("--phase5",
action="store", type="int",
dest="phase5", default=0,
help="Phase offset of fifth harmonic")
def prepareSelectionList(self):
self.skeletons = self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
def linearizePath(self, skelPath, offs):
comps, lengths = linearize(skelPath)
self.skelCompIsClosed = isSkeletonClosed(comps)
if (self.skelCompIsClosed and offs != 0):
centroid = getPolygonCentroid(comps)
for i in range(len(comps)):
pt1 = comps[i]
dist = bezmisc.pointdistance(centroid, pt1)
comps[i] = getPtOnSeg(centroid, pt1, dist, dist + offs)
if i > 0:
lengths[i - 1] = bezmisc.pointdistance(comps[i - 1], comps[i])
return (comps, lengths)
def getFunction(self, func):
res = ''
presetAmp1 = presetAmp2 = presetAmp3 = presetAmp4 = presetAmp5 = 0.0
presetPhOf1 = presetPhOf2 = presetPhOf3 = presetPhOf4 = presetPhOf5 = presetOffs = 0
if (func == 'sin' or func == 'cos'):
return '(' + str(self.options.amplitude) + ') * ' + func + '(x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + '))'
if func == 'env1':
presetAmp1 = presetAmp3 = 0.495
elif func == 'env2':
presetAmp1 = presetAmp3 = 0.65
presetPhOf1 = presetPhOf3 = 25
elif func == 'env3':
presetAmp1 = 0.75
presetPhOf1 = 25
presetAmp3 = 0.24
presetPhOf3 = -25
elif func == 'env4':
presetAmp1 = 1.105
presetAmp3 = 0.27625
presetPhOf3 = 50
elif func == 'env5':
presetAmp1 = 0.37464375
presetPhOf1 = 25
presetAmp2 = 0.5655
presetAmp3 = 0.37464375
presetPhOf3 = -25
elif func == 'env6':
presetAmp1 = 0.413725
presetPhOf1 = 25
presetAmp2 = 0.45695
presetPhOf2 = 50
presetAmp3 = 0.494
presetPhOf3 = -25
elif func == 'env7':
presetAmp1 = 0.624
presetPhOf1 = 25
presetAmp2 = 0.312
presetAmp3 = 0.624
presetPhOf3 = 25
elif func == 'env8':
presetAmp1 = 0.65
presetPhOf1 = 50
presetAmp2 = 0.585
presetAmp3 = 0.13
elif func == 'env9':
presetAmp1 = 0.07605
presetPhOf1 = 25
presetAmp2 = 0.33345
presetPhOf2 = 50
presetAmp3 = 0.468
presetPhOf3 = -25
presetAmp4 = 0.32175
elif func == 'env10':
presetAmp1 = 0.3575
presetPhOf1 = -25
presetAmp2 = 0.3575
presetAmp3 = 0.3575
presetPhOf3 = 25
presetAmp4 = 0.3575
presetPhOf4 = 50
elif func == 'env11':
presetAmp1 = 0.65
presetPhOf1 = 25
presetAmp2 = 0.13
presetPhOf2 = 50
presetAmp3 = 0.26
presetPhOf3 = 25
presetAmp4 = 0.39
elif func == 'env12':
presetAmp1 = 0.5525
presetPhOf1 = -25
presetAmp2 = 0.0414375
presetPhOf2 = 50
presetAmp3 = 0.15884375
presetPhOf3 = 25
presetAmp4 = 0.0966875
presetAmp5 = 0.28315625
presetPhOf5 = -25
harm1 = '(' + str(self.options.amplitude * (presetAmp1 + self.options.amplitude1)) + ') * cos(1 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf1 + self.options.phase1) / 100.0 * 2 * pi) + '))'
harm2 = '(' + str(self.options.amplitude * (presetAmp2 + self.options.amplitude2)) + ') * cos(2 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf2 + self.options.phase2) / 100.0 * 2 * pi) + '))'
harm3 = '(' + str(self.options.amplitude * (presetAmp3 + self.options.amplitude3)) + ') * cos(3 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf3 + self.options.phase3) / 100.0 * 2 * pi) + '))'
harm4 = '(' + str(self.options.amplitude * (presetAmp4 + self.options.amplitude4)) + ') * cos(4 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf4 + self.options.phase4) / 100.0 * 2 * pi) + '))'
harm5 = '(' + str(self.options.amplitude * (presetAmp5 + self.options.amplitude5)) + ') * cos(5 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf5 + self.options.phase5) / 100.0 * 2 * pi) + '))'
res = harm1 + ' + ' + harm2 + ' + ' + harm3 + ' + ' + harm4 + ' + ' + harm5
return res
def lengthToTime(self, l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelComp
containing the corresponding point, together with the position of the point on this segment.
If the deformer is closed, do computations modulo the total length.
'''
if self.skelCompIsClosed:
l = l % sum(self.lengths)
if l <= 0:
return 0, l / self.lengths[0]
i = 0
while (i < len(self.lengths)) and (self.lengths[i] <= l):
l -= self.lengths[i]
i += 1
t = l / self.lengths[min(i, len(self.lengths) - 1)]
return (i, t)
def applyDiffeo(self, bpt, vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s = bpt[0] - self.skelComp[0][0]
i, t = self.lengthToTime(s)
if i == len(self.skelComp) - 1:
x, y = bezmisc.tpoint(self.skelComp[i - 1], self.skelComp[i], t + 1)
dx = (self.skelComp[i][0] - self.skelComp[i - 1][0]) / self.lengths[-1]
dy = (self.skelComp[i][1] - self.skelComp[i - 1][1]) / self.lengths[-1]
else:
x, y = bezmisc.tpoint(self.skelComp[i], self.skelComp[i + 1], t)
dx = (self.skelComp[i + 1][0] - self.skelComp[i][0]) / self.lengths[i]
dy = (self.skelComp[i + 1][1] - self.skelComp[i][1]) / self.lengths[i]
vx = 0
vy = bpt[1] - self.skelComp[0][1]
bpt[0] = x + vx * dx - vy * dy
bpt[1] = y + vx * dy + vy * dx
for v in vects:
vx = v[0] - self.skelComp[0][0] - s
vy = v[1] - self.skelComp[0][1]
v[0] = x + vx * dx - vy * dy
v[1] = y + vx * dy + vy * dx
def effect(self):
if len(self.options.ids) < 1:
inkex.errormsg(_("This extension requires one selected path."))
return
self.prepareSelectionList()
for skeleton in self.skeletons.itervalues():
resPath = []
pattern = inkex.etree.Element(inkex.addNS('path','svg'))
self.options.strokeHexColor, self.strokeOpacity = getColorAndOpacity(self.options.strokeColor)
# Copy style of skeleton with setting color and opacity
s = skeleton.get('style')
if s:
pattern.set('style', setColorAndOpacity(s, self.options.strokeHexColor, self.strokeOpacity))
skeletonPath = modifySkeletonPath(getSkeletonPath(skeleton.get('d'), self.options.offset))
self.skelComp, self.lengths = self.linearizePath(skeletonPath, self.options.offset)
length = sum(self.lengths)
patternWidth = length / self.options.frequency
selectedFunction = self.getFunction(self.options.contourFunction)
pattern.set('d', simplepath.formatPath(drawfunction(self.options.nodes, patternWidth, selectedFunction)))
# Add path into SVG structure
skeleton.getparent().append(pattern)
if self.options.remove:
skeleton.getparent().remove(skeleton)
# Compute bounding box
bbox = simpletransform.computeBBox([pattern])
width = bbox[1] - bbox[0]
dx = width
if dx < 0.01:
exit(_("The total length of the pattern is too small."))
patternPath = cubicsuperpath.parsePath(pattern.get('d'))
curPath = deepcopy(patternPath)
xoffset = self.skelComp[0][0] - bbox[0]
yoffset = self.skelComp[0][1] - (bbox[2] + bbox[3]) / 2
patternCopies = max(1, int(round(length / dx)))
width = dx * patternCopies
newPath = []
# Repeat pattern to cover whole skeleton
for subPath in curPath:
for i in range(0, patternCopies, 1):
newPath.append(deepcopy(subPath))
offset(subPath, dx, 0)
curPath = newPath
# Offset pattern to the first node of the skeleton
for subPath in curPath:
offset(subPath, xoffset, yoffset)
# Stretch pattern to whole skeleton
for subPath in curPath:
stretch(subPath, length / width, 1, self.skelComp[0])
for subPath in curPath:
for ctlpt in subPath:
self.applyDiffeo(ctlpt[1], (ctlpt[0], ctlpt[2]))
# Check if there is a need to close path manually
if self.skelCompIsClosed:
firstPtX = round(curPath[0][0][1][0], 8)
firstPtY = round(curPath[0][0][1][1], 8)
finalPtX = round(curPath[-1][-1][1][0], 8)
finalPtY = round(curPath[-1][-1][1][1], 8)
if (firstPtX != finalPtX or firstPtY != finalPtY):
curPath[-1].append(curPath[0][0])
resPath += curPath
pattern.set('d', cubicsuperpath.formatPath(resPath))
if __name__ == '__main__':
e = GuillocheContour()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
|
python
|
import cv2
from collections import defaultdict
from utils.timer import Timer
_GRAY = [218, 227, 218]
_RED = [0, 0, 255]
_GREEN = [18, 127, 15]
_BULE = [255, 144, 30]
_WHITE = [255, 255, 255]
_BLACK = [0, 0, 0]
colors = [_RED, _GREEN, _BULE, _WHITE]
def get_class_string(class_index, score, dataset):
class_text = dataset.classes[class_index] if dataset is not None else 'id{:d}'.format(class_index)
return class_text + ' {:0.2f}'.format(score).lstrip('0')
def vis_quad(img, cfg_vis, quad, color=None):
border_thick = cfg_vis.SHOW_QUAD_BOX.BORDER_THICK
for j in range(4):
str_point = (quad[j * 2], quad[j * 2 + 1])
end_point = (quad[((j + 1) * 2) % len(quad)], quad[(((j + 1) * 2 + 1) % len(quad))])
if color is not None:
cv2.line(img, str_point, end_point, color, thickness=border_thick)
else:
cv2.line(img, str_point, end_point, _BULE, thickness=border_thick)
cv2.circle(img, (quad[0], quad[1]), cfg_vis.SHOW_QUAD_BOX.CENTER_RADIUS, (0, 0, 255), -1)
return img
def vis_point(img, cfg_vis, point, color):
cv2.circle(img, (point[0], point[1]), cfg_vis.SHOW_QUAD_BOX.CENTER_RADIUS, color, -1)
return img
def vis_class(img, cfg_vis, pos, class_str, bg_color):
"""Visualizes the class."""
font_color = cfg_vis.SHOW_CLASS.COLOR
font_scale = cfg_vis.SHOW_CLASS.FONT_SCALE
x0, y0 = int(pos[0]), int(pos[1])
# Compute text size.
txt = class_str
font = cv2.FONT_HERSHEY_SIMPLEX
((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
# Place text background.
back_tl = x0, y0 - int(1.3 * txt_h)
back_br = x0 + txt_w, y0
cv2.rectangle(img, back_tl, back_br, bg_color, -1)
# Show text.
txt_tl = x0, y0 - int(0.3 * txt_h)
cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA)
return img
def vis_one_image_opencv(im, cfg_vis, boxes=None, classes=None, dataset=None):
"""Constructs a numpy array with the detections visualized."""
timers = defaultdict(Timer)
timers['bbox_prproc'].tic()
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, -1]) < cfg_vis.VIS_TH:
return im
timers['bbox_prproc'].toc()
for i in range(boxes.shape[0]):
quad = boxes[i, :-1]
score = boxes[i, -1]
if score < cfg_vis.VIS_TH:
continue
if cfg_vis.SHOW_QUAD_BOX.ENABLED:
timers['show_quad_box'].tic()
if len(quad) == 8:
im = vis_quad(im, cfg_vis, quad)
elif len(quad) == 10:
im = vis_quad(im, cfg_vis, quad[:8])
center = quad[8:10]
im = vis_point(im, cfg_vis, center, _GRAY)
timers['show_quad_box'].toc()
# show class (on by default)
if cfg_vis.SHOW_CLASS.ENABLED:
timers['show_class'].tic()
class_str = get_class_string(classes[i], score, dataset)
im = vis_class(im, cfg_vis, (quad[0], quad[1] - 2), class_str, _BLACK)
timers['show_class'].toc()
return im
|
python
|
# The parsing logic is heavily borrowed from the python-nubia project, available at:
# https://github.com/facebookincubator/python-nubia
#
# In compliance with python-nubia's BSD-style license, its copyright and license terms
# are included below:
#
# BSD License
#
# For python-nubia software
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import pyparsing as pp
from enum import auto, Enum
from functools import lru_cache
from typing import NamedTuple
from prompt_toolkit.document import Document
from ..errors import PartialParseError, TotalParseError
class Patterns:
ALLOWED_SYMBOLS_IN_STRING = r'-_/#@£$€%*+~|<>?.'
IDENTIFIER = r'([a-zA-Z_][a-zA-Z0-9_\-]*)'
WHITESPACE = r'\s+'
UNQUOTED_STRING = r'([a-zA-Z0-9' + ALLOWED_SYMBOLS_IN_STRING + r']+)'
STRING_SINGLE_QUOTE = r"\'([^\\\']|\\.)*\'"
STRING_DOUBLE_QUOTE = r'\"([^\\\"]|\\.)*\"'
BOOLEAN = r'(True|true|False|false)'
FLOAT = r'\-?\d+\.\d*([eE]\d+)?'
INTEGER = r'\-?\d+'
KWARG = IDENTIFIER + r'(\s*=\s*)'
COMMAND = r'^' + IDENTIFIER + r'(\s+|$)'
@staticmethod
def is_valid_identifier(
s: str
) -> bool:
"""Whether the specified string is a valid command name or kwarg identifier."""
return bool(re.fullmatch(Patterns.IDENTIFIER, s))
def _no_transform(x):
return x
def _bool_transform(x):
return x in ('True', 'true',)
def _str_transform(x):
return x.strip('"\'')
_TRANSFORMS = {
'bool': _bool_transform,
'str': _str_transform,
'int': int,
'float': float,
'dict': dict,
}
def _parse_type(data_type):
transform = _TRANSFORMS.get(data_type, _no_transform)
def _parse(s, loc, toks):
return [transform(x) for x in toks]
return _parse
# Valid identifiers cannot start with a number, but may contain them in their body.
identifier = pp.Word(pp.alphas + '_-', pp.alphanums + '_-')
# XXX: allow for hex?
int_value = pp.Regex(Patterns.INTEGER).setParseAction(_parse_type('int'))
float_value = pp.Regex(Patterns.FLOAT).setParseAction(_parse_type('float'))
bool_value = (
pp.Literal('True') ^ pp.Literal('true') ^
pp.Literal('False') ^ pp.Literal('false')
).setParseAction(_parse_type('bool'))
quoted_string = pp.quotedString.setParseAction(_parse_type('str'))
unquoted_string = pp.Word(
pp.alphanums + Patterns.ALLOWED_SYMBOLS_IN_STRING
).setParseAction(_parse_type('str'))
string_value = quoted_string | unquoted_string
single_value = bool_value | float_value | int_value | string_value
list_value = pp.Group(
pp.Suppress('[') +
pp.Optional(pp.delimitedList(single_value)) +
pp.Suppress(']')
).setParseAction(_parse_type('list'))
dict_value = pp.Forward()
value = list_value ^ single_value ^ dict_value
dict_key_value = pp.dictOf(string_value + pp.Suppress(':'), value)
dict_value << pp.Group(
pp.Suppress('{') + pp.delimitedList(dict_key_value) + pp.Suppress('}')
).setParseAction(_parse_type('dict'))
# Positionals must be end of line or has a space (or more) afterwards.
# This is to ensure that the parser treats text like "something=" as invalid
# instead of parsing this as positional "something" and leaving the "=" as
# invalid on its own.
positionals = pp.ZeroOrMore(
value + (pp.StringEnd() ^ pp.Suppress(pp.OneOrMore(pp.White())))
).setResultsName('positionals')
key_value = pp.Dict(pp.ZeroOrMore(pp.Group(
identifier + pp.Suppress('=') + value
))).setResultsName('kv')
command = identifier.setResultsName('command')
command_line = command + positionals + key_value
class ParseState(Enum):
FULL = auto()
PARTIAL = auto()
NONE = auto()
class ParseStatus(NamedTuple):
results: pp.ParseResults
unparsed_text: str
unparsed_start_pos: int
state: ParseState
@lru_cache()
def parse_cmd_line(
text: str
) -> ParseStatus:
"""Attempt to parse a command line, returning a :class:`ParseStatus` object."""
try:
parse_results = _raw_parse_cmd_line(text)
unparsed_text = ''
unparsed_start_pos = len(text)
parse_state = ParseState.FULL
except PartialParseError as e:
parse_results = e.partial_result
unparsed_text = e.remaining
unparsed_start_pos = e.error_pos
parse_state = ParseState.PARTIAL
except TotalParseError:
parse_results = None
unparsed_text = text
unparsed_start_pos = 0
parse_state = ParseState.NONE
return ParseStatus(parse_results, unparsed_text, unparsed_start_pos, parse_state)
def _raw_parse_cmd_line(
text: str
) -> pp.ParseResults:
"""Attempt to parse the command line as per the grammar defined in this module.
If the specified text can be fully parsed, then a `pypaysing.ParseResults` will be
returned with the following attributes:
* command: The name or alias of the command.
* kv: A dictionary of key-value pairs representing the keyword arguments.
* positionals: Any positional argument values.
Otherwise, a descendant of :class:`CommandParseError` is raised.
Raises:
:class:`CommandPartialParseError`: If the specified text can be partially
parsed, but errors still exist.
:class:`CommandPartialParseError`: If the text cannot even be partially parsed.
"""
try:
result = command_line.parseString(text, parseAll=True)
return result
except pp.ParseException as e:
remaining = e.markInputline()
remaining = remaining[(remaining.find('>!<') + 3):]
try:
partial_result = command_line.parseString(text, parseAll=False)
except pp.ParseException as ee:
raise TotalParseError(str(ee)) from None
new_exc = PartialParseError(str(e), remaining, partial_result, e.col)
raise new_exc from None
class IncompleteToken:
"""Encapsulation of a token that could only be partially parsed."""
def __init__(
self,
token: str
) -> None:
self._token = token
self._key = ''
self._value = ''
self._is_kw_arg = False
self._is_pos_arg = False
self._parse()
def _parse(
self
) -> None:
key, delim, value = self._token.partition('=')
if any(x in key for x in '[]{}"\''):
# Treat the whole token as a positional value.
self._is_pos_arg = True
self._value = self._token
return
if delim == '=':
# This is a key=value.
self._is_kw_arg = True
self._key = key
self._value = value
else:
# This could either be the beginning of something like key=value or the
# positional literal keywest.
self._key = self._value = key
@property
def is_kw_arg(
self
) -> bool:
return self._is_kw_arg
@property
def is_pos_arg(
self
) -> bool:
return self._is_pos_arg
@property
def is_ambiguous_arg(
self
) -> bool:
return not self._is_kw_arg and not self._is_pos_arg
@property
def key(
self
) -> str:
return self._key
@property
def value(
self
) -> str:
return self._value
def __str__(
self
) -> str:
if self.is_kw_arg:
return f'kwarg {self._key}={self._value}'
elif self.is_pos_arg:
return f'positional {self._value}'
elif self.is_ambiguous_arg:
return f'ambiguous {self._key}'
else:
return 'Parse error'
def __repr__(
self
) -> str:
return f'<{self.__class__.__qualname__} [{str(self)}]>'
def last_incomplete_token(
document: Document,
unparsed_text: str
) -> IncompleteToken:
if document.char_before_cursor in ' ]}':
last_token = ''
else:
last_space = document.find_backwards(' ', in_current_line=True)
if last_space is None:
last_space = -1
last_token = document.text[last_space+1:]
# The longer of the last_token and unparsed_text is taken in the event that the
# unparsed_text is an open literal, which could itself contain spaces.
if len(unparsed_text) > len(last_token):
last_token = unparsed_text
return IncompleteToken(last_token)
def last_incomplete_token_from_document(
document: Document
) -> IncompleteToken:
"""Shortcut for getting the last incomplete token only from a ``Document``."""
parse_status = parse_cmd_line(document.text)
return last_incomplete_token(document, parse_status.unparsed_text)
|
python
|
import re
f = open('Dockerfile')
data = f.read()
f.close()
resp = re.sub('RUN','sudo',data)
resp = re.sub('WORKDIR','cd',resp)
resp = re.sub('FROM.*','',resp)
cmd = re.findall('ENTRYPOINT\s+\[(.*?)].*?CMD\s+\[(.*?)\]',resp,re.DOTALL)
if cmd:
cmd = cmd[0]
cmd = cmd[0].strip() + ' ' + cmd[1].strip()
cmd = cmd.replace('"','')
resp = re.findall('(.*?)# An ENTRYPOINT',resp,re.DOTALL)
f = open('debian.sh','w')
f.write(resp[0]+cmd)
f.close()
|
python
|
# flush
import time
import yaqc
from .._pump import *
def run():
# Flush
# (all volumes in mL unless specified differently)
if True:
print("flush")
for i in range(65):
print(i)
return
# pall_flow_rates is the flow rate of one DSP
# FILL IN PUMP FLOW RATE BELOW
pall_flow_rates = 10 # [10,20, 30, 40, 50] (mL/min)
pall_flow_rates_mL_s = pall_flow_rates / 60 # (mL/s)
# Reactor Parameters
Vsl = 1.94
Veq = 0.715
Vrxnzone = 9.975
Vexit = 0.393
Veq_quench = 1.2
valve_open_time = 1
# Median exit time calc w/ pumps flow rates
if pall_flow_rates == 10:
median_exit_time = 48.2
elif pall_flow_rates == 20:
median_exit_time = 24.6
elif pall_flow_rates == 30:
median_exit_time = 16.8
elif pall_flow_rates == 40:
median_exit_time = 12.8
elif pall_flow_rates == 50:
median_exit_time = 10.6
# Valve assignments (A= run reactions, B= refill SL and DSPs)
valve0 = yaqc.Client(36000) # sample collection valve (A= sample, B=waste)
valve1 = yaqc.Client(36001) # Monomer
valve2 = yaqc.Client(36002) # Catalyst
valve3 = yaqc.Client(36003) # Quench
# Pump assignments
p1 = Pump(1) # Monomer line
p2 = Pump(2) # Catalyst line
p3 = Pump(4) # Quench line
# Pump injection volume for rxn
pall_rxn_inj = 2.5 * Vsl + Veq + 0.5 * Vrxnzone + 0.333 * Vexit
# Pump parameters for flush (extra step for DSPs multi-step mode)
pall_flush_inj = 0.1
pall_flush_rates = 10
pump_run_time = pall_rxn_inj / pall_flow_rates_mL_s
print("run time " + str(round((pump_run_time / 60), 1)) + " min")
# Open Mon, Cat, & Quench valves
valve1.set_identifier("A")
while valve1.busy():
continue
assert valve1.get_identifier() == "A"
valve2.set_identifier("A")
while valve2.busy():
continue
assert valve2.get_identifier() == "A"
valve3.set_identifier("A")
while valve3.busy():
continue
assert valve3.get_identifier() == "A"
# Collection valve to waste first
valve0.set_identifier("B")
while valve0.busy():
continue
assert valve0.get_identifier() == "B"
# Prompt the User (y/n)
def yes_or_no(question):
answer = input(question + "(y/n): ").lower().strip()
print("")
while not (answer == "y" or answer == "yes" or answer == "n" or answer == "no"):
print("Input yes or no")
answer = input(question + "(y/n):").lower().strip()
print("")
if answer[0] == "y":
return True
else:
return False
if yes_or_no("Are you sure you want to FLUSH the reactor?"):
print("Starting Flush")
else:
print("Flush Stopped")
exit()
print("Error") # checking exit()
# Pump instructions for reaction, flush, and refill (Cat & Mon the same)
p1.add_step(volume=pall_rxn_inj, rate=pall_flow_rates, delay=0)
p1.add_step(volume=pall_flush_inj, rate=pall_flush_rates, delay=0)
p2.add_step(volume=pall_rxn_inj, rate=pall_flow_rates, delay=0)
p2.add_step(volume=pall_flush_inj, rate=pall_flush_rates, delay=0)
# Quench needs additioal quench delay
p3.add_step(volume=pall_rxn_inj, rate=pall_flow_rates, delay=0)
p3.add_step(volume=pall_flush_inj, rate=pall_flush_rates, delay=0)
start_pumps(1, 2, 4)
# Collection valve timing and instructions
time.sleep(pump_run_time / 2)
valve0.set_identifier("A")
while valve0.busy():
continue
assert valve0.get_identifier() == "A"
time.sleep(valve_open_time)
valve0.set_identifier("B")
while valve0.busy():
continue
assert valve0.get_identifier() == "B"
# Set valves back to B for refill
time.sleep(pump_run_time)
valve1.set_identifier("B")
while valve1.busy():
continue
assert valve1.get_identifier() == "B"
valve2.set_identifier("B")
while valve2.busy():
continue
assert valve2.get_identifier() == "B"
valve3.set_identifier("B")
while valve3.busy():
continue
assert valve3.get_identifier() == "B"
valve0.set_identifier("B")
while valve0.busy():
continue
assert valve0.get_identifier() == "B"
print("Flush Complete")
|
python
|
from bs4 import BeautifulSoup as bs
from splinter import Browser
import time
import pandas as pd
import lxml
# full "scrape" function, comprised of the four subfunctions
# defined below
def scrape():
# create the overall dictionary to hold all the results
# which will be returned by this function to the flask app
results = {}
# first, scrape and then add the article info
article_info = scrape_article_info()
results.update(article_info)
print("Article Info Scraped!")
# scrape and then add the featured mars image
featured_image = scrape_featured_mars_image()
results.update(featured_image)
print("Featured Image Scraped!")
# scrape and then add the Mars data table
Martian_data_table = scrape_data_table()
results.update(Martian_data_table)
print("Martian Data Table Scraped!")
# scrape and then add the hemisphere images
hemisphere_images = scrape_hemisphere_enhanced_images()
results.update({"Hemispheres":hemisphere_images})
print("Hemisphere Images Scraped!")
print(results)
return results
# first scraped info for the Mars app, article headline and summary
# from the NASA website, returned as a dictionary
def scrape_article_info():
url = "https://mars.nasa.gov/news/"
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
browser.visit(url)
#add a delay so page fully loads
time.sleep(1)
mars_news = browser.html
news_soup = bs(mars_news, "html.parser")
latest_news = news_soup.find_all("li", class_="slide")[0]
latest_headline = latest_news.find("div", class_="content_title").a.text
latest_description = latest_news.find("div", class_="article_teaser_body").text
browser.quit()
return {"headline":latest_headline, "description":latest_description}
# scrape the latest Mars image from the JPL website, returned as a dictionary
def scrape_featured_mars_image():
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
base_url = "https://www.jpl.nasa.gov"
image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(image_url)
browser.find_by_css('img.thumb').first.click()
time.sleep(2)
browser.execute_script(
"document.getElementById('fancybox-lock').scrollTo(0, document.body.scrollHeight);")
browser.links.find_by_partial_text("more info").click()
time.sleep(1)
#get image src
img_soup = bs(browser.html, "html.parser")
img_src = img_soup.find("img", class_="main_image")["src"]
img_src = base_url + img_src
browser.quit()
return {"featured_image": img_src}
# scrape Mars data table info directly from space-facts.com/mars
def scrape_data_table():
data_table_url = "https://space-facts.com/mars/"
tables = pd.read_html(data_table_url)
mars_info_df = tables[0]
mars_info_df = mars_info_df.set_index(0)
mars_info_df.index.name = "Mars"
mars_info_df.columns = ["Data Table"]
mars_info_df
#html_mars_table = mars_info_df.to_html()
#return
output_dict = mars_info_df.to_dict()
return output_dict
# scrape high-quality pictures for each Martian hemisphere
# returns a dictionary of hemisphere name to file location
def scrape_hemisphere_enhanced_images():
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
base_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
all_hemispheres = []
browser.visit(base_url)
num_hemispheres = len(browser.find_by_css(".thumb"))
for hemisphere_num in range(num_hemispheres):
curr_title = browser.find_by_tag(
"h3")[hemisphere_num].html.replace(" Enhanced", "")
browser.find_by_css(".thumb")[hemisphere_num].click()
curr_img_url = browser.find_by_text("Sample").first["href"]
# print(curr_img_url)
browser.back()
all_hemispheres.append({"title": curr_title, "img_url": curr_img_url})
browser.windows[0].close_others()
# print(all_hemispheres)
browser.quit()
return all_hemispheres
|
python
|
# -*- coding: utf-8 -*-
from shuup_workbench.settings.utils import get_disabled_migrations
from shuup_workbench.settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'myapp.sqlite3'
}
}
|
python
|
#!/usr/bin/env python
import ConfigParser
import log
import obslog
import os
from pyraf import iraf
import shutil
import utils
# ----------------------------------------------------------------------------------------------------------------------
def start(configfile):
"""
Parameters are loaded from gnirs-pype.cfg configuration file. This script will automatically detect if it is being run
on telluric data or science data. There are 5 steps.
INPUT FILES:
- Configuration file
- Science or Telluric frames
- mdfshiftrefimage
- masterflat
- /database files from the appropriate calibrations directory
OUTPUT FILES:
- If telluric: cleaned (optional), prepared, radiation-event corrected, reduced, spatial distortion corrected,
and transformed images
- If science: cleaned (optional), prepared, radiation-event corrected, reduced, spatial distortion corrected,
and transformed images
Args:
- kind (string): Either 'Science' or 'Telluric'
- configfile: gnirs-pype.cfg configuration file.
- Paths to the Science (str), reduction truth value (boolean)
E.g. 'target/date/config/{Sci,Tel}_ObsID/{Calibrations,Intermediate}', True
- Paths to the Tellurics (str), reduction truth value (boolean)
E.g. 'target/date/config/{Sci,Tel}_ObsID/{Calibrations,Intermediate}', True
- manualMode (boolean): Enable optional manualModeging pauses? Default: False
- overwrite (boolean): Overwrite old files? Default: False
# And gnirsReduce specific settings
"""
logger = log.getLogger('extract_spectra')
path = os.getcwd() # Store current working directory for later use.
# Set up/prepare IRAF.
iraf.gemini()
iraf.gemtools()
iraf.gnirs()
iraf.unlearn(iraf.gemini, iraf.gemtools, iraf.gnirs, iraf.imcopy) # reset parameters to default values
# Prepare the IRAF package for GNIRS.
# NSHEADERS lists the header parameters used by the various tasks in the GNIRS package (excluding headers values
# which have values fixed by IRAF or FITS conventions).
iraf.nsheaders("gnirs", logfile=logger.root.handlers[0].baseFilename)
# Set clobber to 'yes' for the script. This still does not make the gemini tasks overwrite files, so: YOU WILL
# LIKELY HAVE TO REMOVE FILES IF YOU RE_RUN THE SCRIPT.
us_clobber = iraf.envget("clobber")
iraf.reset(clobber='yes')
config = ConfigParser.RawConfigParser()
config.optionxform = str # make options case-sensitive
config.read(configfile)
manualMode = config.getboolean('defaults', 'manualMode')
overwrite = config.getboolean('defaults', 'overwrite')
# Order of sections is important to later check for plausible peaks located for science targets by nsextract
nsextractInter = config.getboolean('interactive', 'nsextractInter')
combinedsrc = config.get('runtimeFilenames', 'combinedsrc')
combinedsky = config.get('runtimeFilenames', 'combinedsky')
extractRegularPrefix = config.get('runtimeFilenames', 'extractRegularPrefix')
extractFullSlitPrefix = config.get('runtimeFilenames', 'extractFullSlitPrefix')
extractStepwiseTracePrefix = config.get('runtimeFilenames', 'extractStepwiseTracePrefix')
extractStepwisePrefix = config.get('runtimeFilenames', 'extractStepwisePrefix')
useApall = config.getboolean('extractSpectra1D', 'useApall')
subtractBkg = config.get('extractSpectra1D', 'subtractBkg')
extractApertureRadius = config.getfloat('extractSpectra1D', 'extractApertureRadius')
checkPeaksMatch = config.getboolean('extractSpectra1D', 'checkPeaksMatch')
toleranceOffset = config.getfloat('extractSpectra1D', 'toleranceOffset')
extractFullSlit = config.getboolean('extractSpectra1D', 'extractFullSlit')
extractStepwise = config.getboolean('extractSpectra1D', 'extractStepwise')
extractionStepSize = config.getfloat('extractSpectra1D', 'extractStepSize')
extractApertureWindow = config.getfloat('extractSpectra1D', 'extractApertureWindow')
# gnirsExtractSpectra1D will first check if the reduction truth value of the science and telluric directories is
# True -- if it is, it will then check if the required spectra to be extracted are available in the directories
# (and proceed only if it finds them there); else, it will warn the user and request to provide the spectra for
# extracting. If the reduction truth value of the science and telluric directories is False, the script will skip
# extracting 1D spectra in those directories.
# Loop through all the observation (telluric and science) directories to extract 1D spectra in each one.
# (the Telluric standards must be done first if they are to be used as a reference)
for section in ['TelluricDirectories', 'ScienceDirectories']:
for obspath in config.options(section):
if not config.getboolean(section, obspath): # Only process directories marked True
logger.debug('Skipping extraction of 1D spectra in %s', obspath)
continue
logger.info(' ----------------------- ')
logger.info('| Extracting 1D spectra |')
logger.info(' ----------------------- ')
obspath += '/Intermediate'
logger.info("%s\n", obspath)
iraf.chdir(obspath)
utils.pause(manualMode)
utils.requires([combinedsrc])
calculateSNR = config.getboolean('gnirsPipeline', 'CalculateSNR')
if calculateSNR:
if not utils.exists([combinedsky], overwrite=False):
logger.warning('Could not find combined sky spectra. Setting calculateSNR = False')
calculateSNR = False
orders = utils.get_orders(obspath)
extractApertureWindow = get_window(obspath)
if nsextractInter:
subtractBkg = 'fit'
logger.info('Setting background subtraction method to "fit"')
if useApall:
nsum = 20
else:
nsum = 10
extractSpectra(combinedsrc, extractRegularPrefix, nsextractInter, useApall, nsum, subtractBkg,
extractApertureRadius, overwrite)
if calculateSNR:
logger.info("Extracting the combined sky spectrum reduced without sky subtraction.")
subtractBkg = 'none'
extractSpectra(combinedsky, extractRegularPrefix, nsextractInter, useApall, nsum, subtractBkg,
extractApertureRadius, overwrite)
if 'Science' in section:
# If the extraction was not done interactively check if checkPeaksMatch is set: if yes, check if the
# required telluric extraction reference files available in the telluric /database directory; else,
# warn the user that both nsextractInter and checkPeaksMatch are not set, request the user to
# manually check if the science target peak identified by task nsextract might identify a wrong peak
# if the science target is not bright enough.
# Get symbolic path to the tel database directory in the sci directory
# Relative path/link expected to be at the top level of every sci directory
scidatabasepath = 'database'
logger.info("Science database path: %s", scidatabasepath)
telpath = '../Telluric/Intermediate'
logger.info("Telluric path: %s", telpath)
teldatabasepath = telpath + '/database'
logger.info("Telluric database path: %s", teldatabasepath)
sci_combinedsrc = obspath + '/' + combinedsrc
tel_combinedsrc = telpath + '/' + combinedsrc
if not nsextractInter: # if nsextract was not run interactively
if not checkPeaksMatch:
logger.warning("Parameters 'nsextractInter' and 'checkPeaksMatch' are both set to False.")
logger.warning("Please manually verify that nsextract identified the science peaks correctly.")
else:
logger.info("Finding extraction locations for Telluric standard...")
telpeaks = get_peaks(teldatabasepath)
logger.info("Finding extration locations for Science target...")
scipeaks = get_peaks(scidatabasepath)
logger.info("Comparing the science and Telluric extraction locations...")
reextract, predicted = compare_peaks(obspath, telpath, scipeaks, telpeaks, toleranceOffset)
if any(reextract):
logger.warning("Re-extracting...")
useApall = 'yes'
nsum = 20
reExtractSpectra(reextract, scipeaks, telpeaks, predicted, obspath, telpath, nsum,
extractApertureRadius, useApall, subtractBkg, nsextractInter)
# ------------------------------------------------------------------------------------------------------
if extractFullSlit:
logger.warning('Full-slit extraction is untested')
utils.pause(manualMode)
# Approx. full-slit extraction (science target only)
# Uses +/- 23 pix aperture (6.9", almost whole length of slit), appropriate for objects centred
# along length of slit (q=0). Not sure what the effect is if nsextract finds a spectrum that's
# not centred along the slit.
iraf.nsextract(
inimages='src_comb', outspectra='', outprefix='a', dispaxis=1, database='', line=700,
nsum=20, ylevel='INDEF', upper=23, lower=-23, background='none', fl_vardq='yes', fl_addvar='no',
fl_skylines='yes', fl_inter=nsextractInter, fl_apall=useApall, fl_trace='no',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes', fl_project='yes',
fl_findneg='no', bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3,
tr_function='legendre', tr_order=5, tr_sample='*', tr_naver=1, tr_niter=0, tr_lowrej=3.0,
tr_highrej=3.0, tr_grow=0.0, weights='variance', logfile=logger.root.handlers[0].baseFilename,
verbose='yes', mode='al')
# ------------------------------------------------------------------------------------------------------
if extractStepwise: # Extract in steps on either side of the peak
logger.warning('Step-wise extraction is untestd')
utils.pause(manualMode)
# Calling apall and tracing the peak first to make sure the same part of the object is extracted in
# each step along the slit for all orders (needed when there is complex, spectrally-varying
# structure in a galaxy, for example; otherwise the spectra can have offsets between orders)
# This first nsextract step, outside the loop, gets the trace into the database to be used when we
# do the "real" extraction
iraf.nsextract(
inimages='src_comb', outspectra='trace_ref', outprefix='x', dispaxis=1, database='', line=700,
nsum=20, ylevel='INDEF', upper=3, lower=-3, background='none', fl_vardq='yes', fl_addvar='no',
fl_skylines='yes', fl_inter=nsextractInter, fl_apall='yes', fl_trace='yes',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes' ,fl_project='no',
fl_findneg='no', bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3,
tr_function='legendre', tr_order=5, tr_sample='300:1000', tr_naver=1, tr_niter=0, tr_lowrej=3.0,
tr_highrej=3.0, tr_grow=0.0, weights='variance', logfile=logger.root.handlers[0].baseFilename,
verbose='yes', mode='al')
# This is non-interactive because it uses info from the previous call (and it would be very tedious)
# TODO: Make sure that the stepping range and step size results in an integer number of steps
step = 3
n = 0
for i in range(-21, 21, step):
iraf.nsextract(
inimages='src_comb', outspectra='', outprefix='s'+str(n), dispaxis=1, database='', line=700,
nsum=20, ylevel='INDEF', lower=i, upper=i+step, background='none', fl_vardq='yes',
fl_addvar='no', fl_skylines='yes', fl_inter='no', fl_apall='no', fl_trace='no',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes', fl_project='yes',
fl_findneg='no', bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3,
tr_function='legendre', tr_order=5, tr_sample='*', tr_naver=1, tr_niter=0, tr_lowrej=3.0,
tr_highrej=3.0, tr_grow=0.0, weights='variance',
logfile=logger.root.handlers[0].baseFilename, verbose='yes', mode='al')
n += 1
logger.info("Extraction complete for")
logger.info("%s", obspath)
iraf.chdir(path) # Return to directory script was begun from
return
# ----------------------------------------------------------------------------------------------------------------------
def get_window(path):
logger = log.getLogger('get_window')
# Determine the full-slit extraction window
if 'LB_SXD' in path:
# extractApertureRadius = 23 (+/-23 pixels or 6.9" covers almost the entire slit length,
# but this is only appropriate for objects centred along length of slit (with absolute Q offset of 0).
# [-46/2,46/2+6) [-23.0 -17 -11 -5 1 7 13 19 23 29) warn the user if last step in extract >0.1" away
# from the end of the slit or if extraction proceeding out of the slit
window = 46
elif 'LB_LXD' in path:
window = 33 # [-33/2,33/2+6] [-16.5 -10.5 -4.5 2.5 8.5 14.5 20.5]
elif 'SB_SXD' in path:
window = 46
else:
logger.error("Unknown GNIRS XD configuration.")
raise SystemExit
logger.debug('Window: %s pix', window)
return window
# ----------------------------------------------------------------------------------------------------------------------
def extractSpectra(inimage, outprefix, interactive, apall, nsum, background, radius, overwrite):
"""
Extracting 1D spectra from the combined 2D spectra using nsextract.
background = Type of background to subtract (none|average|median|minimum|fit)
"""
# This is really just a wrapper around nsextract.
# I'm tempted to name this 'nsextract' and call this whenever I need nsextract.
# I guess it might not have all the parameters, but they could be included as optional.
logger = log.getLogger('extractSpectra')
logger.debug('inimage: %s', inimage)
logger.debug('background: %s', background)
logger.debug('radius: %s pix', radius)
logger.debug('nsum: %s pix', nsum)
utils.requires([inimage])
orders = utils.get_orders(os.getcwd())
outfiles = [outprefix + inimage] + \
['database/apsrc_comb_DQ_%d_' % i for i in range(1, len(orders)+1)] + \
['database/apsrc_comb_SCI_%d_' % i for i in range(1, len(orders)+1)]
if utils.exists(outfiles, overwrite):
logger.info('Spectra already extracted.')
return
iraf.nsextract(
inimages=inimage, outspectra='', outprefix=outprefix, dispaxis=1, database='', line=700,
nsum=nsum, ylevel='INDEF', upper=radius, lower=-radius, background=background,
fl_vardq='yes', fl_addvar='no', fl_skylines='yes', fl_inter=interactive, fl_apall=apall, fl_trace='no',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes', fl_project='yes', fl_findneg='no',
bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3, tr_function='legendre', tr_order=5,
tr_sample='*', tr_naver=1, tr_niter=0, tr_lowrej=3.0, tr_highrej=3.0, tr_grow=0.0, weights='variance',
logfile=logger.root.handlers[0].baseFilename, verbose='yes', mode='al')
return
# ----------------------------------------------------------------------------------------------------------------------
def get_peaks(databasepath):
"""
Check the extraction reference files in the telluric directory databases to find the location of the peaks.
"""
logger = log.getLogger('get_peaks')
logger.debug('databasepath: %s', databasepath)
orders = utils.get_orders(os.getcwd())
infiles = ['%s/apsrc_comb_SCI_%d_' % (databasepath, i) for i in range(1, len(orders)+1)]
utils.requires(infiles)
peaks = []
for i in range(1, len(orders)+1):
apfile = '%s/apsrc_comb_SCI_%d_' % (databasepath, i)
with open(apfile, 'r') as f:
p = None
for line in f:
# The peak location is the number in the second column of the line beginning with 'center'
if 'center' in line:
p = float(line.split()[1])
break
peaks.append(p)
if p is None:
logger.warning('Peak not found')
logger.debug('peaks: %s', peaks)
return peaks
# ----------------------------------------------------------------------------------------------------------------------
def compare_peaks(scipath, telpath, scipeaks, telpeaks, tolerance):
"""
Compare locations of the extraction locations of the science and Telluric standard.
For faint targets, NSEXTRACT may find a noise peak instead of the science peak. In such cases, it is advisable
to check the aperture center of extraction of the science with respect to the telluric and re-extract at the
expected location.
Look the science and telluric absolute Q offsets and determine if the relative location of the target
peak was correct. If not, we should re-extract at the expected location.
"""
logger = log.getLogger('gnirsReduce.peaksMatch')
# Get the absolute P,Q offsets from the obslog:
sciinfo = obslog.readcsv(scipath + '/obslog.csv')
telinfo = obslog.readcsv(telpath + '/obslog.csv')
sciA = utils.files_in([scipath + '/nodA.list'])
telA = utils.files_in([telpath + '/nodA.list'])
# Assuming that both the science and Telluric were acquired in the center of the slit, the extraction locations
# should be the same minus any difference in the Q-offset.
# Here I assume that I only need to compare the "A" offset (B might be a sky):
logger.debug('Science "A" offset: %s arcsec', sciinfo[sciA[0]]['Q'])
logger.debug('Telluric "A" offset: %s arcsec', telinfo[telA[0]]['Q'])
# TODO: The PIXSCALE should be in the obslog, but it's not, so for now:
pixscale = 0.15
offset = (float(sciinfo[sciA[0]]['Q']) - float(telinfo[telA[0]]['Q'])) / pixscale
logger.debug('offset: %s pix', offset)
logger.debug('Extraction locations (pixels):')
shifts = []
predicted = []
reextract = []
for s,t in zip(scipeaks, telpeaks):
predicted.append(t + offset)
shifts.append(t + offset - s)
reextract.append(abs(shifts[-1]) > tolerance)
logger.debug('Telluric: %6.2f Predicted sci: %6.2f Actual sci: %6.2f', t, t + offset, s)
logger.debug('predicted: %s', predicted)
logger.debug('shifts: %s', shifts)
logger.debug('reextract: %s', reextract)
if any(reextract):
logger.warning('Some orders are not where they were expected to be.')
# I'm not sure if I should do the tolerance checking here, or pass back the list of shifts,
# or a list of booleans, or do the comparison in the main program...
# nsextract should find the spectrum within a 'tolerance' pixels of expected location. This depends on how well the
# observer centred the target along the slit. Here, we use 5 pixels as a reasonable tolerance level. A more robust
# way would be to use some measure of whether the peak found by nsextract was real, e.g. counts + FWHM. However,
# this information is not recorded in database.
return reextract, predicted
# ----------------------------------------------------------------------------------------------------------------------
def reExtractSpectra(reextract, scipeaks, telpeaks, predicted, scipath, telpath, nsum, aperture, apall, background,
interactive):
# rextract - boolean list of extensions that should be re-extracted
# predicted - float list of new extraction locations
logger = log.getLogger('reExtractSpectra')
logger.debug('scipath: %s', scipath)
logger.debug('rextract: %s', reextract)
logger.debug('predicted: %s', predicted)
# Rename the old extracted spectum for posterity
# Rename the old aperture files for posterity
# Copy in the Telluric aperture files
# Edit the Telluric aperture files to have the predicted science spectra locations
# Run nsextract with fl_trace=no and set the tracing reference image (trace) to the edited Telluric file
# Test by setting the tolerance to be ~1 pix which will force some orders to be reextracted.
logger.debug('Renaming old extracted spectra...')
os.rename('vsrc_comb.fits', 'vsrc_comb_TRACED.fits')
logger.debug('Generating reference files...')
for i in range(len(reextract)):
ext = i+1
oldsciapfile = '%s/database/apsrc_comb_SCI_%d_' % (scipath, ext)
os.rename(oldsciapfile, oldsciapfile + 'TRACED')
telapfile = '%s/database/apsrc_comb_SCI_%d_' % (telpath, ext)
refapfile = '%s/database/apref_comb_SCI_%d_' % (scipath, ext)
shutil.copy(telapfile, refapfile)
with open(refapfile, 'r') as f:
data = f.read()
# Following XDGNIRS replace the Telluric location with either the shifted Telluric or the Science location:
with open(refapfile, 'w') as f:
if reextract[i]:
logger.debug('Substituting predicted position: %s', predicted[i])
f.write(data.replace(str(telpeaks[i]), str(predicted[i])).replace('src_comb', 'ref_comb'))
else:
logger.debug('Substituting science position: %s', scipeaks[i])
f.write(data.replace(str(telpeaks[i]), str(scipeaks[i])).replace('src_comb', 'ref_comb'))
shutil.copy(telpath + '/src_comb.fits', 'ref_comb.fits')
logger.debug('Running nsextract with the modified reference file and trace=no...')
iraf.nsextract(
inimages='src_comb.fits', outspectra='', outprefix='v', dispaxis=1, database='', line=700, nsum=nsum,
ylevel='INDEF', upper=aperture, lower=-aperture, background=background, fl_vardq='yes', fl_addvar='no',
fl_skylines='yes', fl_inter=interactive, fl_apall=apall, fl_trace='no', aptable='gnirs$data/apertures.fits',
fl_usetabap='no', fl_flipped='yes', fl_project='yes', fl_findneg='no', bgsample='*', trace='ref_comb',
tr_nsum=10, tr_step=10, tr_nlost=3, tr_function='legendre', tr_order=5, tr_sample='*', tr_naver=1, tr_niter=0,
tr_lowrej=3.0, tr_highrej=3.0, tr_grow=0.0, weights='variance', logfile=logger.root.handlers[0].baseFilename,
verbose='yes')
# Sometimes nsextract locates the aperture too close to the end of the slit.
# When this happens it fails with "Aperture too large" and spectra are not extracted for that order.
# Check if all file extensions are present in the extracted target file:
extracted_sci_extensions = iraf.gemextn(
inimages='src_comb', check='exists,mef', process='expand', index='', extname='SCI', extversion='', ikparams='',
omit='', replace='', outfile='STDOUT', logfile=logger.root.handlers[0].baseFilename, glogpars='', verbose='yes',
fail_count='0', count='20', status='0', Stdout=1)
logger.debug('extracted_sci_extensions: %s', extracted_sci_extensions)
if len(extracted_sci_extensions) != len(reextract):
logger.error("The combined science image file contains only %d extensions.", len(extracted_sci_extensions))
raise SystemExit
return
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
log.configure('gnirs-pype.log', filelevel='INFO', screenlevel='DEBUG')
start('gnirs-pype.cfg')
|
python
|
def jackpot():
if 10*"@" in first_half and 10*"@" in second_half:
return ["True", "@", 10]
elif 10*"#" in first_half and 10*"#" in second_half:
return ["True", "#", 10]
elif 10*"$" in first_half and 10*"$" in second_half:
return ["True", "$", 10]
elif 10*"^" in first_half and 10*"^" in second_half:
return ["True", "^", 10]
else:
return ["False"]
def winning_symbols():
if 9*"@" in first_half and 9*"@" in second_half:
return ["@", 9]
elif 8*"@" in first_half and 8*"@" in second_half:
return ["@", 8]
elif 7*"@" in first_half and 7*"@" in second_half:
return ["@", 7]
elif 6*"@" in first_half and 6*"@" in second_half:
return ["@", 6]
elif 9*"#" in first_half and 9*"#" in second_half:
return ["#", 9]
elif 8*"#" in first_half and 8*"#" in second_half:
return ["#", 8]
elif 7*"#" in first_half and 7*"#" in second_half:
return ["#", 7]
elif 6*"#" in first_half and 6*"#" in second_half:
return ["#", 6]
elif 9*"$" in first_half and 9*"$" in second_half:
return ["$", 9]
elif 8*"$" in first_half and 8*"$" in second_half:
return ["$", 8]
elif 7*"$" in first_half and 7*"$" in second_half:
return ["$", 7]
elif 6*"$" in first_half and 6*"$" in second_half:
return ["$", 6]
elif 9*"^" in first_half and 9*"^" in second_half:
return ["^", 9]
elif 8*"^" in first_half and 8*"^" in second_half:
return ["^", 8]
elif 7*"^" in first_half and 7*"^" in second_half:
return ["^", 7]
elif 6*"^" in first_half and 6*"^" in second_half:
return ["^", 6]
else:
return "False"
tickets = [i.strip() for i in input().split(", ")]
for ticket in tickets:
first_half = ticket[:((len(ticket))//2)]
second_half = ticket[((len(ticket))//2):]
if len(ticket) != 20:
print("invalid ticket")
continue
list_jackpot = jackpot()
if list_jackpot[0] == 'True':
print(f'ticket "{ticket}" - {list_jackpot[2]}{list_jackpot[1]} Jackpot!')
continue
if winning_symbols() == "False":
print(f'ticket "{ticket}" - no match')
continue
if winning_symbols != "None":
list_ws = winning_symbols()
print(f'ticket "{ticket}" - {list_ws[1]}{list_ws[0]}')
|
python
|
from ...language.base import parse
from ...utils.ast_to_code import ast_to_code
from ..compiled import GraphQLCompiledDocument
from .schema import schema
def test_compileddocument_from_module_dict():
# type: () -> None
document_string = "{ hello }"
document_ast = parse(document_string)
document = GraphQLCompiledDocument.from_module_dict(
schema,
{
"document_string": document_string,
"document_ast": document_ast,
"execute": lambda *_: True,
},
)
assert document.operations_map == {None: "query"}
assert document.document_string == document_string
assert document.document_ast == document_ast
assert document.schema == schema
assert document.execute()
def test_compileddocument_from_code():
# type: () -> None
document_string = "{ hello }"
document_ast = parse(document_string)
code = '''
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from graphql.language import ast
from graphql.language.parser import Loc
from graphql.language.source import Source
schema = None
document_string = """{document_string}"""
source = Source(document_string)
def loc(start, end):
return Loc(start, end, source)
document_ast = {document_ast}
def execute(*_):
return True
'''.format(
document_string=document_string, document_ast=ast_to_code(document_ast)
)
document = GraphQLCompiledDocument.from_code(schema, code)
assert document.operations_map == {None: "query"}
assert document.document_string == document_string
assert document.document_ast == document_ast
assert document.schema == schema
assert document.execute()
|
python
|
import argparse
from random import seed
from yaml import dump
from utils.experiment import test
from utils.utils import *
if __name__ == "__main__":
seed(0)
parser = argparse.ArgumentParser(
description='Test error for a combination of ensembler and weak learner.')
parser.add_argument('dataset', help='dataset filename')
parser.add_argument('ensembler', help='chosen ensembler')
parser.add_argument('weak_learner', help='chosen weak learner')
parser.add_argument('M', metavar='# weak_learners',
help='number of weak learners', type=int)
parser.add_argument(
'trials', help='number of trials (each with different shuffling of the data); defaults to 1', type=int, default=1, nargs='?')
parser.add_argument('--record', action='store_const', const=True,
default=False, help='export the results in YAML format')
args = parser.parse_args()
ensembler = get_ensembler(args.ensembler)
weak_learner = get_weak_learner(args.weak_learner)
data = load_data("data/" + args.dataset)
accuracy, baseline = test(
ensembler, weak_learner, data, args.M, trials=args.trials)
print "Accuracy:"
print accuracy
print "Baseline:"
print baseline[-1]
if args.record:
results = {
'm': args.M,
'accuracy': accuracy,
'baseline': baseline[-1],
'booster': args.ensembler,
'weak_learner': args.weak_learner,
'trials': args.trials,
'seed': 0
}
filename = args.ensembler + "_" + \
args.weak_learner + "_" + str(args.M) + ".yml"
f = open(filename, 'w+')
f.write(dump(results))
|
python
|
# Copyright (c) 2009-2013, Monoidics ltd.
# Copyright (c) 2013-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import subprocess
from . import config
class InferJavacCapture():
def __init__(self, javac_args):
self.javac_args = javac_args
def start(self):
infer = os.path.join(config.BIN_DIRECTORY, 'infer')
# pass --continue to prevent removing the results-dir
cmd = [
infer,
'capture',
'--continue',
'--', 'javac'
] + self.javac_args
try:
return subprocess.check_call(cmd)
except Exception as e:
print('Failed to execute:', ' '.join(cmd))
raise e
def _get_javac_args(javac_args):
# replace any -g:.* flag with -g to preserve debugging symbols
args = map(lambda arg: '-g' if '-g:' in arg else arg, javac_args)
# skip -Werror
args = filter(lambda arg: arg != '-Werror', args)
return args
def create_infer_command(javac_args):
return InferJavacCapture(_get_javac_args(javac_args))
|
python
|
import unittest
from rating.processing import rates
from rating.processing.utils import ConfigurationException
class TestConversion(unittest.TestCase):
def test_conversion_byte_second_to_hour_harder(self):
rating_unit = 'GiB-hours'
metric_unit = 'byte-seconds'
qty = 7e12
converted = rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
self.assertAlmostEqual(converted, 1.8109050061, delta=1e-6)
def test_conversion_core_second_to_hour_basic(self):
rating_unit = 'core-hours'
metric_unit = 'core-seconds'
qty = 10
converted = rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
self.assertAlmostEqual(converted, 0.002777, delta=1e-6)
def test_conversion_core_second_to_hour_harder(self):
rating_unit = 'core-hours'
metric_unit = 'core-seconds'
qty = 24
converted = rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
self.assertAlmostEqual(converted, 0.006666, delta=1e-6)
def test_wrong_conversion(self):
rating_unit = 'some-random-rating_unit'
metric_unit = 'core-seconds'
qty = 1
with self.assertRaisesRegex(ConfigurationException,
'Unsupported key'):
rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
|
python
|
from django.contrib import admin
from users.models import Profile, ConfirmedMail
# Represents Profile and ConfirmedMail models at admin site.
admin.site.register(Profile)
admin.site.register(ConfirmedMail)
|
python
|
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número '))
r = n1 + n2
print('O resultado da soma é {}'.format(r))
r = n1 - n2
print('O resultado da subtração é {}'.format(r))
r = n1 * n2
print('O resultado da multiplicação é {}'.format(r))
r = n1 / n2
print('O resultado da divisão é {}'.format(r))
# Se quiser colocar quantas casas decimais serão mostradas deve escrever da seguinte maneira:
# print('O resultado da divisão é {:.3f}'.format(r)), Assim terão 3 casas decimais.
r = n1 ** n2
print('O resultado da potência é {}'.format(r))
r = n1 // n2
print('O resultado da divisão inteira é {}'.format(r))
r = n1 % n2
print('O resultado do resto da divisão é {}'.format(r))
# Operadores aritiméticos: +, -, *, /, **, //, %
# São respectivamente: adição, subtração, multiplicação, divisão, potência, divisão inteira, resto da divisão
# Ex:
# 5 + 2 = 7
# 5 - 2 = 3
# 5 * 2 = 10
# 5 / 2 = 2.5
# 5 ** 2 = 25
# 5 // 2 = 2 Aqui temos uma divisão onde o resultado será sempre um número inteiro.
# 5 % 2 = 1 Aqui o é o resto da divisão da operação acima.
# Ordem de precedência. Ordem de importância para obter resultado em uma conta onde possui mais de 1 operador.
# 1: ()
# 2: **
# 3: *, /, //, %
# 4: +, -
# Obs. Quando quisermos expressar igual dentro de uma operação demvemos usar o operador ==
# Ex:
# 5 + 2 == 7
# Outro método para utilizar potência é:
# pow(4,3) Onde quero saber o resultado de 4 elevado a 3
# Calcular raiz quadrada. Raiz quadrada é elevar o número pela metade.
# Ex:
# 81**(1/2)==9.0 Ficará dessa maneira.
# Se quiser saber a raiz cúbica de ser feito assim: 127**(1/3)==5.0
# É possível realizar operações com strings.
# Ex:
# 'Oi' + 'Olá' == OiOlá
p1 = input('Digite uma palavra: ')
p2 = input('Digite outra palavra: ')
print('Resultado da Concatenação é:', p1 + p2)
# Quebra de linha: \n
# Ex:
# print('Resultado da Concatenação é:\n', p1 + p2)
# É possível multiplicar também:
# p1*5 == p1p1p1p1p1p1
print('Palavra 1 repetida 5 vezes: ', p1 * 5)
# Continuar print na mesma linha end=' '
# Ex:
# print('Palavra 1 repetida 5 vezes: ', p1 *5, end=' ')
# Isso pode ajudar ao repetir símbolos também:
print('Símbolo de = repetido 20 vezes: ', '=' * 20)
# Outro exemplo de como podemos usar operadores:
nome = input('Digite seu nome: ')
print('Seja bem vindo {:20}!'.format(nome))
print('Seja bem vindo {:=>20}!'.format(nome))
print('Seja bem vindo {:=<20}!'.format(nome))
print('Seja bem vindo {:=^20}!'.format(nome))
|
python
|
import pdb, traceback, sys
try:
1/0
except:
extype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
|
python
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import importlib
import platform
if platform.architecture()[0] == '32bit':
globals().update(importlib.import_module('open3d.win32.32b.open3d').__dict__)
elif platform.architecture()[0] == '64bit':
globals().update(importlib.import_module('open3d.win32.64b.open3d').__dict__)
|
python
|
from copy import copy
from contextlib import contextmanager
from logging import getLogger
import tensorflow as tf
from rl.utils.tf_utils import (purge_orphaned_summaries
as _purge_orphaned_summaries)
USE_DEFAULT = object()
logger = getLogger("rl")
class SummaryManager(object):
def __init__(self, logdir=None, summary_writer=None, summary_period=None,
last_summary_step=None):
if (logdir is None) == (summary_writer is None):
raise ValueError("exactly one of logdir or summary_writer must be set")
if summary_writer is None:
summary_writer = tf.summary.FileWriterCache.get(logdir)
self._summary_writer = summary_writer
self._summary_period = summary_period
self._last_summary_step = last_summary_step
@property
def summary_writer(self):
return self._summary_writer
def copy(self):
return copy(self)
def _get_step(self, step, session=None):
if isinstance(step, (tf.Variable, tf.Tensor)):
if session is None:
raise ValueError("session is None when step is instance %s"
% type(step))
step = session.run(step)
return step
def summary_time(self, step, session=None):
step = self._get_step(step, session)
if self._summary_period is None:
return False
elif self._last_summary_step is None:
return True
else:
return step - self._last_summary_step >= self._summary_period
def add_summary(self, summary, step, session=None,
update_last_summary_step=True):
step = self._get_step(step, session)
if step is None:
step = session.run(self._step)
self._summary_writer.add_summary(summary, global_step=step)
if update_last_summary_step:
self._last_summary_step = step
def add_summary_dict(self, summary_dict, step, session=None,
update_last_summary_step=True):
summary = tf.Summary()
for key, val in summary_dict.items():
summary.value.add(tag=key, simple_value=val)
self.add_summary(summary, step=step, session=session,
update_last_summary_step=update_last_summary_step)
def update_last_summary_step(self, step, session=None):
self._last_summary_step = self._get_step(step, session)
class DistributedTrainer(object):
def __init__(self,
target,
is_chief,
summary_manager=None,
checkpoint_dir=None,
checkpoint_period=None,
checkpoint=None,
config=USE_DEFAULT):
self._target = target
self._is_chief = is_chief
self._summary_manager = summary_manager
if (summary_manager is None
and checkpoint_dir is None
and checkpoint_period is not None):
raise ValueError("Either summary_manager or checkpoint_dir must be"
" specified when checkpoint_period is not None")
if checkpoint_dir is not None and checkpoint_period is None:
raise ValueError("checkpoint_period must be specified"
" when checkpoint_dir is not None")
if checkpoint_period is not None and checkpoint_dir is None:
checkpoint_dir = summary_manager.summary_writer.get_logdir()
self._checkpoint_dir = checkpoint_dir
self._checkpoint_period = checkpoint_period
self._checkpoint = checkpoint
if config == USE_DEFAULT:
config = self._get_default_config()
self._config = config
self._session = None
def _get_default_config(self):
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=2)
# Dynamic memory allocation on gpu.
# https://github.com/tensorflow/tensorflow/issues/1578
config.gpu_options.allow_growth = True
return config
@contextmanager
def managed_session(self, init_vars=None, restore_vars=None,
save_vars=None, hooks=USE_DEFAULT,
purge_orphaned_summaries=True):
if init_vars is None:
init_vars = list(filter(
lambda v: v.device.startswith("/job:ps/"),
tf.global_variables()
))
if restore_vars is None:
restore_vars = init_vars
if save_vars is None:
save_vars = init_vars
ready_op = tf.report_uninitialized_variables(init_vars)
restorer = tf.train.Saver(restore_vars)
scaffold = tf.train.Scaffold(ready_for_local_init_op=ready_op,
ready_op=ready_op, saver=restorer)
if hooks == USE_DEFAULT:
if self._is_chief and self._checkpoint_dir is not None:
saver = tf.train.Saver(save_vars)
hooks = [
tf.train.CheckpointSaverHook(
checkpoint_dir=self._checkpoint_dir, saver=saver,
save_steps=self._checkpoint_period)
]
else:
hooks = None
if self._is_chief:
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold, master=self._target,
config=self._config, checkpoint_filename_with_path=self._checkpoint)
else:
session_creator = tf.train.WorkerSessionCreator(
scaffold=scaffold, master=self._target, config=self._config)
with tf.train.MonitoredSession(session_creator, hooks=hooks) as sess:
if purge_orphaned_summaries and self._summary_manager is not None:
_purge_orphaned_summaries(self._summary_manager.summary_writer,
sess.run(tf.train.get_global_step()))
self._session = sess
yield sess
def step(self, algorithm=None, fetches=None,
feed_dict=None, summary_time=None, sess=None):
if summary_time and algorithm is None:
raise ValueError("algorithm cannot be None when summary_time is True")
if algorithm is None and fetches is None:
raise ValueError("algorithm and fetches cannot both be None")
if sess is None:
sess = self._session or tf.get_default_session()
global_step = tf.train.get_global_step()
step = sess.run(global_step)
if (summary_time is None
and algorithm is not None
and self._summary_manager is not None
and self._summary_manager.summary_time(step=step)):
summary_time = True
run_fetches = {}
if fetches is not None:
run_fetches["fetches"] = fetches
if algorithm is not None:
run_fetches["train_op"] = algorithm.train_op
if summary_time:
run_fetches["logging"] = algorithm.logging_fetches
run_fetches["summaries"] = algorithm.summaries
if feed_dict is None:
feed_dict = {}
if algorithm is not None:
algorithm_feed_dict = algorithm.get_feed_dict(
sess, summary_time=summary_time)
else:
algorithm_feed_dict = {}
if len(algorithm_feed_dict.keys() & feed_dict.keys()) > 0:
intersection = algorithm_feed_dict.keys() & feed_dict.keys()
raise ValueError(
"Algorithm feed dict intersects with the given feed dict: {}"
.format(intersection)
)
feed_dict.update(algorithm_feed_dict)
values = sess.run(run_fetches, feed_dict)
if summary_time:
logger.info("Step #{}, {}".format(step, values["logging"]))
self._summary_manager.add_summary(values["summaries"], step=step)
if "fetches" in values:
return step, values["fetches"]
else:
return step
def train(self, algorithm, num_steps):
global_step = tf.train.get_global_step()
def _train(sess):
step = sess.run(global_step)
while not sess.should_stop() and step < num_steps:
step = self.step(algorithm)
if self._session is not None:
_train(self._session)
else:
with self.managed_session() as sess:
_train(sess)
class SingularTrainer(DistributedTrainer):
def __init__(self,
summary_manager=None,
checkpoint_dir=None,
checkpoint_period=None,
checkpoint=None,
config=USE_DEFAULT):
super(SingularTrainer, self).__init__(
target='',
is_chief=True,
summary_manager=summary_manager,
checkpoint_dir=checkpoint_dir,
checkpoint_period=checkpoint_period,
checkpoint=checkpoint,
config=config)
@contextmanager
def managed_session(self, save_vars=None, restore_vars=None,
hooks=USE_DEFAULT, purge_orphaned_summaries=True):
if self._checkpoint is not None:
restorer = tf.train.Saver(restore_vars)
if hooks == USE_DEFAULT:
if self._checkpoint_dir is not None:
saver = tf.train.Saver(save_vars)
hooks = [
tf.train.CheckpointSaverHook(
self._checkpoint_dir,
saver=saver,
save_steps=self._checkpoint_period
),
]
else:
hooks = None
with tf.train.SingularMonitoredSession(hooks=hooks,
config=self._config) as sess:
if self._checkpoint is not None:
restorer.restore(sess, self._checkpoint)
if purge_orphaned_summaries and self._summary_manager is not None:
_purge_orphaned_summaries(self._summary_manager.summary_writer,
sess.run(tf.train.get_global_step()))
self._session = sess
yield sess
|
python
|
num1 = 11
num2 =222
num3 =3333
|
python
|
""""
Copyright © Krypton 2021 - https://github.com/kkrypt0nn (https://krypt0n.co.uk)
Description:
This is a template to create your own discord bot in python.
Version: 4.1
"""
class UserBlacklisted(Exception):
"""
Thrown when a user is attempting something, but is blacklisted.
"""
def __init__(self, message="User is blacklisted!"):
self.message = message
super().__init__(self.message)
class UserNotOwner(Exception):
"""
Thrown when a user is attempting something, but is not an owner of the bot.
"""
def __init__(self, message="User is not an owner of the bot!"):
self.message = message
super().__init__(self.message)
|
python
|
a = [0, 0, 0, 1, 1, 1, 3, 3, 6, 6, 9, 9]
print(len(a))
def root(x):
while x != a[x]:
a[x] = a[a[x]]
print(x, a[x])
x = a[x]
return x
def root2(x):
if x != a[x]:
a[x] = root2(a[x])
return a[x]
root2(9)
print(a)
|
python
|
"""
@Note: Implementation of Knowledge Distillation Algorithms
@Author: LucasX
"""
import copy
import os
import sys
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from torch.optim import lr_scheduler
from torchvision import models
sys.path.append('../')
from research.kd import data_loader
from research.kd.cfg import cfg
from research.kd.losses import KDLoss, RegularizedTfKDLoss, SelfTfKDLoss
def train_model_with_kd(use_lsr, teacher_model_w_weights, student_model_wo_weights, dataloaders, criterion,
optimizer, scheduler,
num_epochs, inference=False):
"""
train model with Knowledge Distillation
:param use_lsr: whether to use LabelSmoothingRegularization
:param teacher_model_w_weights:
:param student_model_wo_weights:
:param dataloaders:
:param criterion:
:param optimizer:
:param scheduler:
:param num_epochs:
:param inference:
:return:
"""
print(student_model_wo_weights)
model_name = student_model_wo_weights.__class__.__name__
student_model_wo_weights = student_model_wo_weights.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
student_model_wo_weights = student_model_wo_weights.to(device)
teacher_model_w_weights = teacher_model_w_weights.to(device)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
student_model_wo_weights = nn.DataParallel(student_model_wo_weights)
dataset_sizes = {x: len(dataloaders[x].dataset) for x in ['train', 'val', 'test']}
for _ in dataset_sizes.keys():
print('Dataset size of {0} is {1}...'.format(_, dataset_sizes[_]))
if not inference:
print('Start training %s...' % model_name)
since = time.time()
best_model_wts = copy.deepcopy(student_model_wo_weights.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('-' * 100)
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
if torch.__version__ <= '1.1.0':
scheduler.step()
student_model_wo_weights.train() # Set model to training mode
else:
student_model_wo_weights.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for i, data in enumerate(dataloaders[phase], 0):
inputs, types = data['image'], data['type']
inputs = inputs.to(device)
types = types.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
student_outputs = student_model_wo_weights(inputs)
_, preds = torch.max(student_outputs, 1)
if not use_lsr:
teacher_outputs = teacher_model_w_weights(inputs)
if use_lsr:
loss = criterion(student_outputs, types)
else:
loss = criterion(teacher_outputs, student_outputs, types)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == types.data)
if phase == 'train':
if torch.__version__ >= '1.1.0':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
tmp_correct = 0
tmp_total = 0
tmp_y_pred = []
tmp_y_true = []
tmp_filenames = []
for data in dataloaders['val']:
images, types, filename = data['image'], data['type'], data['filename']
images = images.to(device)
types = types.to(device)
outputs = student_model_wo_weights(images)
_, predicted = torch.max(outputs.data, 1)
tmp_total += types.size(0)
tmp_correct += (predicted == types).sum().item()
tmp_y_pred += predicted.to("cpu").detach().numpy().tolist()
tmp_y_true += types.to("cpu").detach().numpy().tolist()
tmp_filenames += filename
tmp_acc = tmp_correct / tmp_total
print('Confusion Matrix of {0} on val set: '.format(model_name))
cm = confusion_matrix(tmp_y_true, tmp_y_pred)
print(cm)
cm = np.array(cm)
print('Accuracy = {0}'.format(tmp_acc))
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(student_model_wo_weights.state_dict())
student_model_wo_weights.load_state_dict(best_model_wts)
model_path_dir = './model'
os.makedirs(model_path_dir, exist_ok=True)
if torch.cuda.device_count() > 1:
torch.save(student_model_wo_weights.module.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
else:
torch.save(student_model_wo_weights.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
student_model_wo_weights.load_state_dict(best_model_wts)
model_path_dir = './model'
os.makedirs(model_path_dir, exist_ok=True)
if torch.cuda.device_count() > 1:
torch.save(student_model_wo_weights.module.state_dict(), './model/%s.pth' % model_name)
else:
torch.save(student_model_wo_weights.state_dict(), './model/%s.pth' % model_name)
else:
print('Start testing %s...' % model_name)
student_model_wo_weights.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model_name)))
student_model_wo_weights.eval()
correct = 0
total = 0
y_pred = []
y_true = []
filenames = []
probs = []
with torch.no_grad():
for data in dataloaders['test']:
images, types, filename = data['image'], data['type'], data['filename']
images = images.to(device)
types = types.to(device)
outputs = student_model_wo_weights(images)
outputs = F.softmax(outputs)
# get TOP-K output labels and corresponding probabilities
topK_prob, topK_label = torch.topk(outputs, 2)
probs += topK_prob.to("cpu").detach().numpy().tolist()
_, predicted = torch.max(outputs.data, 1)
total += types.size(0)
correct += (predicted == types).sum().item()
y_pred += predicted.to("cpu").detach().numpy().tolist()
y_true += types.to("cpu").detach().numpy().tolist()
filenames += filename
print('Accuracy of {0} on test set: {1}% '.format(model_name, 100 * correct / total))
print(
'Confusion Matrix of {0} on test set: '.format(model_name))
cm = confusion_matrix(y_true, y_pred)
print(cm)
cm = np.array(cm)
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print('Precision List: ')
print(precisions)
print('Recall List: ')
print(recalls)
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
print('Output CSV...')
col = ['filename', 'gt', 'pred', 'prob']
df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],
columns=col)
df.to_csv("./%s.csv" % model_name, index=False)
print('CSV has been generated...')
def run_img_classification(teacher_w_weights, student_wo_weights, epoch):
"""
run image classification
:param teacher_w_weights:
:param student_wo_weights:
:param epoch:
:return:
"""
if cfg['use_lsr']:
criterion = RegularizedTfKDLoss(alpha=0.5, temperature=10)
else:
criterion = KDLoss(alpha=0.5, temperature=10) # vanilla KD Loss
teacher_w_weights.eval()
optimizer_ft = optim.SGD(student_wo_weights.parameters(), lr=cfg['init_lr'], momentum=0.9,
weight_decay=cfg['weight_decay'])
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=cfg['lr_decay_step'], gamma=0.1)
# cosine_anneal_warmup_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer_ft, T_0=10, T_mult=10,
# eta_min=1e-5, last_epoch=-1)
trainloader, valloader, testloader = data_loader.load_mengzhucrop_data()
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader,
}
train_model_with_kd(use_lsr=cfg['use_lsr'], teacher_model_w_weights=teacher_w_weights,
student_model_wo_weights=student_wo_weights,
dataloaders=dataloaders, criterion=criterion, optimizer=optimizer_ft,
scheduler=exp_lr_scheduler, num_epochs=epoch, inference=False)
if __name__ == '__main__':
densenet169 = models.densenet169(pretrained=False)
num_ftrs = densenet169.classifier.in_features
densenet169.classifier = nn.Linear(num_ftrs, cfg['out_num'])
densenet169.load_state_dict(torch.load("/home/lucasxu/ModelZoo/DenseNet169.pth"))
# shufflenet_v2 = models.shufflenet_v2_x1_0(pretrained=True)
# num_ftrs = shufflenet_v2.fc.in_features
# shufflenet_v2.fc = nn.Linear(num_ftrs, cfg['out_num'])
mobilenet_v2 = models.mobilenet_v2(pretrained=True)
num_ftrs = mobilenet_v2.classifier[1].in_features
mobilenet_v2.classifier[1] = nn.Linear(num_ftrs, cfg['out_num'])
# resnet18 = models.resnet18(pretrained=True)
# num_ftrs = resnet18.fc.in_features
# resnet18.fc = nn.Linear(num_ftrs, 6)
# mixnet_m = ptcv_get_model("mixnet_m", pretrained=True)
# num_ftrs = mixnet_m.output.in_features
# mixnet_m.output = nn.Linear(num_ftrs, 6)
# condensenet74 = ptcv_get_model("condensenet74_c4_g4", pretrained=True)
# condensenet74.output.linear = nn.Linear(1032, 6, bias=True)
run_img_classification(teacher_w_weights=densenet169, student_wo_weights=mobilenet_v2, epoch=cfg['epoch'])
|
python
|
import tensorflow as tf
import numpy as np
import os
# from sklearn.manifold._utils import _binary_search_perplexity
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
class TSNE:
def __init__(self,n_components=2, perplexity=30, early_exaggeration=12, learning_rate=500, n_iter=1000, momentum=0.8, verbose=0):
self.learning_rate=learning_rate
self.perplexity=perplexity
self.n_components=n_components
self.early_exaggeration=early_exaggeration
self.n_iter=n_iter
self.verbose=verbose
self.momentum=momentum
def fit_transform(self, P_coor):
with tf.Graph().as_default():
p2, p, sigma_mean, dists=TSNE.p_joint(P_coor, self.perplexity)
sigma_mean=tf.Variable(sigma_mean, trainable=False)
P_=tf.Variable(p2*self.early_exaggeration, trainable=False)
P=tf.stop_gradient(P_)
Q_coor=tf.Variable(tf.random_normal([tf.shape(P_coor)[0], self.n_components]))
momentum=tf.Variable(0.8, trainable=False)
Q_coor_loss, grad=TSNE.tsne(P, Q_coor)
opt=TSNE.gradient_descent(Q_coor_loss, grad, Q_coor, self.learning_rate, momentum)
grad_norm=tf.linalg.norm(grad)
# opt=tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=momentum)
# grad=opt.compute_gradients(Q_coor_loss, var_list=[Q_coor])
# grad_norm=tf.linalg.norm(tf.concat([x[0] for x in grad], axis=0))
# update_Q_coor=opt.apply_gradients(grad)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if self.verbose>=2:
print("sigma mean:", sess.run(sigma_mean))
for i in range(self.n_iter):
# if i is 20:
# sess.run(momentum.assign(0.8))
if i is 100:
sess.run(P_.assign(P/self.early_exaggeration))
if self.verbose>=2 :
print("early exaggeration end.") # refering to sklearn
q, _, loss, gn= sess.run([Q_coor, opt, Q_coor_loss,grad_norm])
if self.verbose>=2 and i % 50 == 0:
print("Iteration {} loss: {}, grad norm: {:.6f}".format(i, loss, gn))
return q
@staticmethod
def remove_diag(x):
diag_not_mask=~tf.cast(tf.diag(tf.ones(x.shape[:1])), dtype=tf.bool)
with_out_diag=tf.reshape(tf.boolean_mask(x, diag_not_mask), x.shape-np.array([0,1]))
return with_out_diag
@staticmethod
def add_diag(x):
xshape=tf.shape(x)
tmp=tf.reshape(x, [xshape[1],xshape[0]])
a=tf.zeros([xshape[1], 1], dtype=tf.float32)
tmp=tf.concat([tmp,a], axis=1)
tmp=tf.concat([[0.0], tf.reshape(tmp, [-1])], axis=0)
tmp=tf.reshape(tmp, [xshape[0], xshape[0]])
return tmp
@staticmethod
def squared_dists(x, diag=False): # |x_i - x_j|^2
sum_square=tf.reduce_sum(tf.square(x), axis=1, keepdims=True)
dists=tf.maximum(sum_square -2*[email protected](x) +tf.transpose(sum_square),1e-6) # relu against negtive caused by overflow
if diag:
return dists
else:
return TSNE.remove_diag(dists)
@staticmethod
def set_diag_zero(x):
return tf.linalg.set_diag(x ,tf.zeros(tf.shape(x)[:1]))
@staticmethod
def cross_entropy(x, y, axis=-1):
safe_y = tf.where(tf.equal(x, 0.), tf.ones_like(y), y)
return -tf.reduce_sum(x * tf.log(safe_y), axis)
@staticmethod
def softmax_entropy_with_logits(logits, axis=-1): # H=-sum(p*log(p)) where p=softmax(logits)
P=tf.nn.softmax(logits, axis=axis)
H=tf.reduce_logsumexp(logits, axis=axis)- tf.reduce_sum(P*logits, axis=axis) # LSE(logits)-E(logits)
'''
-sum(p*log(p))
=-sum(p*log_softmax(logits))
=-sum(p*(logits-lse(logits)))
=sum(p*lse(logits)-p*logits)
=sum(p)*lse(logits)-sum(p*logits)
=lse(logits)-E(logits)
'''
return H,P
@staticmethod
def calc_perplexity_and_probs(neg_dists, betas): # betas=1/2*sigmas^2
logits=neg_dists*tf.reshape(betas,[-1,1])
return TSNE.softmax_entropy_with_logits(logits)
@staticmethod
def binary_search_sigma(neg_dists, target, tol=1e-5, max_iter=50, lower=1e-20, upper=1000000.):
#loop initial value
target_entropy=np.log(target)
def body(lows, ups, ans, finding_mask, x):
finding_indices=tf.cast(tf.where(finding_mask), tf.int32)
guess=(lows+ups)/2
val2, _=TSNE.calc_perplexity_and_probs(tf.boolean_mask(neg_dists, finding_mask), tf.boolean_mask(guess, finding_mask)) # !TODO: compare the speed
val=tf.scatter_nd(finding_indices, val2, tf.shape(finding_mask))
diff=val-target_entropy
new_ans_mask= ((tf.abs(diff)<= tol) | tf.equal(x+1, max_iter)) & finding_mask
new_finding_mask= ~new_ans_mask & finding_mask
greater_mask= (diff<- tol) & finding_mask
leq_mask= (diff>tol) & finding_mask
# dependencies=[
# tf.Print(val, [val], "val ",summarize=10),
# tf.Print(val2, [val2], "val2",summarize=10),
# tf.Print(guess, [guess], "guess:",summarize=10),
# tf.Print(greater_mask, [greater_mask], "gm ",summarize=10),
# tf.Print(ups, [ups], "ups ",summarize=10),
# tf.Print(leq_mask, [leq_mask], "lem ",summarize=10),
# tf.Print(lows, [lows], "lows",summarize=10),
# tf.Print(finding_mask, [finding_mask], "\nfm ",summarize=10),
# tf.Print(ans, [ans], "ans",summarize=10),
# tf.Print(new_finding_mask, [new_finding_mask], "nfm ",summarize=10),
# tf.Print(new_ans_mask,[new_ans_mask], 'nam ',summarize=10),
# tf.Print(finding_indices,[finding_indices],'fid ',summarize=10),
# tf.print("x ", x)
# ]
# with tf.control_dependencies(dependencies):
return [tf.where(leq_mask, guess, lows),
tf.where(greater_mask, guess, ups),
tf.where(new_ans_mask, guess, ans),
new_finding_mask,
tf.add(x,1)
]
cond= lambda a,b,ans,finding_mask,x: tf.reduce_any(finding_mask) & (x<max_iter)
nums=tf.shape(neg_dists)[:1]
lows=tf.fill(nums, lower)
ups=tf.fill(nums, upper)
finding_mask=tf.fill(nums, True)
res=tf.while_loop(cond, body ,(lows, ups, lows, finding_mask,0), back_prop=False)
ans=res[2]
pra_iter=res[4]
# with tf.control_dependencies([tf.Assert(pra_iter<max_iter, ['exceeded_max_iter'])]):
# print("[Warning] exceeded mat iter, maybe sigma's precision is not enough.")
return tf.identity(ans, name='betas')
# @staticmethod
# def transpose_without_diag(x):
# xshape=tf.shape(x)
# tmp=tf.reshape(x, xshape[::-1])
# a=tf.zeros([xshape[1], 1], dtype=tf.float32)
# tmp=tf.concat([tmp,a], axis=1)
# tmp=tf.concat([[0.0], tf.reshape(tmp, [-1])], axis=0)
# tmp=tf.reshape(tmp, [xshape[0], xshape[0]])
# # origin got
# tmp=tf.reshape(tf.transpose(tmp),[-1])[1:]
# tmp=tf.reshape(tmp,[-1, xshape[0]+1])[:,:-1]
# tmp=tf.reshape(tmp, xshape)
# return tmp
@staticmethod
def p_joint(x, target_perplexity):
neg_dists_no_diag=-TSNE.squared_dists(x, diag=False)
betas= TSNE.binary_search_sigma(neg_dists_no_diag, target_perplexity)
p=tf.nn.softmax(neg_dists_no_diag*tf.reshape(betas, [-1,1]))
p=TSNE.add_diag(p)
p=p/tf.reduce_sum(p, axis=-1, keepdims=True)
return (p+tf.transpose(p))/(2*tf.cast(tf.shape(x)[0], dtype=tf.float32)), p, tf.reduce_mean(tf.sqrt(1/betas)), neg_dists_no_diag
# sum_square=np.sum(np.square(x), axis=1, keepdims=True)
# dists=np.maximum(sum_square -2*[email protected] + sum_square.T, 1e-6)
# p= _binary_search_perplexity(dists, None, target_perplexity, 6)
# p=(p+p.T)/(2*p.shape[0])
# return p.astype(np.float32), p, tf.constant([1]), dists
@staticmethod
# @tf.custom_gradient
def tsne(p,y):
dists=TSNE.squared_dists(y, diag=True)
q_num=TSNE.set_diag_zero(1/(1+dists))
q=tf.nn.relu(q_num/tf.reduce_sum(q_num))
y=tf.expand_dims(y, axis=-2)
y_cross_diff= y-tf.transpose(y, [1,0,2])
# L2=tf.reduce_sum(dists) grddddd>>>??
loss= -tf.reduce_sum(TSNE.cross_entropy(p,p)-TSNE.cross_entropy(p,q))
grad= tf.reduce_sum((tf.expand_dims((p-q)*q_num, axis=-1))*y_cross_diff, axis=1)
return loss, grad
def gradient_descent(loss, grad, x, lr, momentum, min_gain=0.01):
gains=tf.Variable(tf.ones_like(x, dtype=tf.float32))
update=tf.Variable(tf.zeros_like(x, dtype=tf.float32))
direct= update*grad < 0.0
gains=gains.assign(tf.maximum(tf.where(direct, gains+0.2, gains*0.8), min_gain))
update=update.assign(update*momentum - lr*grad*gains)
return x.assign(x+update)
|
python
|
n = int(input())
print(pow(2, n+1)-2)
|
python
|
import unittest
from typing import Generator, List
from common import open_fixture
BASE_PATTERN = (0, 1, 0, -1)
def decode(s: str) -> List[int]:
return [int(c) for c in s.strip()]
def pattern(position: int) -> Generator[int, None, None]:
skip = True
while True:
for i in range(len(BASE_PATTERN)):
for _ in range(position):
if skip:
skip = False
continue
yield BASE_PATTERN[i]
def fft(signal: List[int]) -> None:
output = [0] * len(signal)
for i in range(len(signal)):
n = 0
gen = pattern(i + 1)
for j in range(len(signal)):
n += signal[j] * next(gen)
output[i] = abs(n) % 10
for i in range(len(signal)):
signal[i] = output[i]
class TestDay16(unittest.TestCase):
def test_part1_example1(self):
A = decode("12345678")
tests = [
decode("48226158"),
decode("34040438"),
decode("03415518"),
decode("01029498"),
]
for test in tests:
fft(A)
self.assertListEqual(A, test)
def test_part1_example2(self):
tests = [
(decode("80871224585914546619083218645595"), decode("24176176")),
(decode("19617804207202209144916044189917"), decode("73745418")),
(decode("69317163492948606335995924319873"), decode("52432133")),
]
for A, expect in tests:
for _ in range(100):
fft(A)
self.assertListEqual(A[:8], expect)
def test_part1(self):
with open_fixture("day16") as fp:
A = decode(fp.readline())
for _ in range(100):
fft(A)
self.assertListEqual(A[:8], decode("96136976"))
def test_part2(self):
# TODO: exploit tge predictable pattern in the last half of the algorithm
# Answer: 85,600,369
# https://www.reddit.com/r/adventofcode/comments/ebf5cy/2019_day_16_part_2_understanding_how_to_come_up/fb4bvw4/
pass
|
python
|
class newNode:
# Construct to create a newNode
def __init__(self, key):
self.data = key
self.left = None
self.right = None
self.hd = 0
# function should print the topView
# of the binary tree
def topview(root) :
if(root == None) :
return
q = []
mp = dict()
head = 0
root.head = head
# push node and horizontal
# distance to queue
q.append(root)
while(len(q)) :
root = q[0]
head = root.head
# count function returns 1 if the
# container contains an element
# whose key is equivalent to hd,
# or returns zero otherwise.
if head not in mp:
mp[head] = root.data
if(root.left) :
root.left.head = head – 1
q.append(root.left)
if(root.right):
root.right.head = head + 1
q.append(root.right)
q.pop(0)
for i in sorted (mp):
print(mp[i], end = “”)
# Driver Code
if __name__ == ‘__main__’:
root = newNode(1)
root.left = newNode(2)
root.right = newNode(3)
root.left.right = newNode(4)
root.left.right.right = newNode(5)
root.left.right.right.right = newNode(6)
print(“Following are nodes in top”,“view of Binary Tree”)
topview(root)
|
python
|
import tensorflow as tf
import dnnlib.tflib as tflib
from training import dataset
from training import misc
from metrics import metric_base
class ACC(metric_base.MetricBase):
def __init__(self, num_images, minibatch_per_gpu, test_data_dir, test_dataset, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.minibatch_per_gpu = minibatch_per_gpu
self.test_data_dir = test_data_dir
self.test_dataset = test_dataset
def _evaluate(self, classifier, Gs_kwargs, num_gpus):
self._set_dataset_obj(dataset.load_dataset(tfrecord_dir=self.test_dataset, data_dir=self.test_data_dir, shuffle_mb=2048))
dataset_object = self._get_dataset_obj()
dataset_object.configure(minibatch_size=self.minibatch_per_gpu)
num_correct = 0
num_total = 0
images_placeholder = tf.placeholder(shape=classifier.input_shapes[0], dtype=tf.float32)
label_placeholder = tf.placeholder(shape=[None, dataset_object.label_size], dtype=tf.float32)
images_adjust = misc.adjust_dynamic_range(images_placeholder, [0, 255], [-1, 1])
prediction = classifier.get_output_for(images_adjust)
one_hot_prediction = tf.one_hot(indices=tf.argmax(prediction, axis=-1), depth=dataset_object.label_size)
num_correct_pred = tf.reduce_sum(one_hot_prediction * label_placeholder)
while num_total < self.num_images:
images, labels = dataset_object.get_minibatch_np(minibatch_size=self.minibatch_per_gpu)
num_correct_pred_out = tflib.run(
num_correct_pred
, feed_dict={
images_placeholder: images,
label_placeholder: labels
})
num_correct += num_correct_pred_out
num_total += self.minibatch_per_gpu
self._report_result(num_correct / num_total)
|
python
|
#! /usr/bin/env python
"""
GaussLaguerre_doughnut.py
Calculates the intensity- and phase distributions of
Laguerre-Gauss doughnut laser modes.
cc Fred van Goor, May 2020.
"""
from LightPipes import *
import matplotlib.pyplot as plt
if LPversion < "2.0.0":
print(r'You need to upgrade LightPipes to run this script.' + '\n'+r'Type at a terminal prompt: $ pip install --upgrade LightPipes')
exit(1)
wavelength = 500*nm
size = 15*mm
N = 200
w0=3*mm
i=0
m_max=6
fig, axs = plt.subplots(nrows=2, ncols=m_max,figsize=(11.0,5.0))
s=r'Doughnut laser modes'
fig.suptitle(s)
F=Begin(size,wavelength,N)
n=0
for m in range(1,m_max+1):
F=GaussBeam(w0,F,doughnut=True,n=n,m=m)
I=Intensity(0,F)
Phi=Phase(F)
s=f'$LG_{n}$' + f'$_{m}$' + '$_*$'
axs[0][m-1].imshow(I,cmap='jet'); axs[0][m-1].axis('off'); axs[0][m-1].set_title(s)
axs[1][m-1].imshow(Phi,cmap='rainbow'); axs[1][m-1].axis('off');
plt.show()
|
python
|
from layers import *
class CNN(object):
"""
Implements Convolutional Neural Network
Input shape: [8, 3, 32, 32]---------->[batch size, channels, height, width]
Model Architecture:
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [8, 8, 32, 32] 224
ReLU-2 [8, 8, 32, 32] 0
MaxPool2d-3 [8, 8, 16, 16] 0
Conv2d-4 [8, 16, 16, 16] 1,168
ReLU-5 [8, 16, 16, 16] 0
MaxPool2d-6 [8, 16, 8, 8] 0
Linear-7 [8, 100] 102,500
ReLU-8 [8, 100] 0
Linear-9 [8, 3] 303
================================================================
Total params: 104,195
Trainable params: 104,195
Non-trainable params: 0
"""
def __init__(self, in_channels, out_dims):
self.in_channels = in_channels
self.out_dims = out_dims
# C1
self.conv1 = Conv2d(in_channels=self.in_channels, out_channels=8, kernel_size=3, strides=1)
self.relu1 = ReLU()
self.max_pool1 = MaxPooling2d(kernel_size=2, strides=2)
# C2
self.conv2 = Conv2d(in_channels=8, out_channels=16, kernel_size=3, strides=1)
self.relu2 = ReLU()
self.max_pool2 = MaxPooling2d(kernel_size=2, strides=2)
self.flatten = Flatten()
self.fc1 = Dense(in_dims=16 * 8 * 8, out_dims=100)
self.relu3 = ReLU()
self.fc2 = Dense(in_dims=100, out_dims=self.out_dims)
self.softmax = function.softmax
self.layers = [self.conv1, self.conv2, self.fc1, self.fc2]
def forward(self, x):
# C1
x = self.conv1.forward(x)
x = self.relu1.forward(x)
x = self.max_pool1.forward(x)
# print(x.shape)
# C2
x = self.conv2.forward(x)
x = self.relu2.forward(x)
x = self.max_pool2.forward(x)
# print(x.shape)
# Flatten
x = self.flatten.forward(x)
# print(x.shape)
# Fully connected layer
x = self.fc1.forward(x)
x = self.relu3.forward(x)
# print(x.shape)
x = self.fc2.forward(x)
# print(x.shape)
output = self.softmax(x)
# print(x.shape)
return output
def backward(self, y, p_y):
deltaL = p_y - y
deltaL = self.fc2.backward(deltaL)
deltaL = self.relu3.backward(deltaL)
deltaL = self.fc1.backward(deltaL)
deltaL = self.flatten.backward(deltaL)
# C2
deltaL = self.max_pool2.backward(deltaL)
deltaL = self.relu2.backward(deltaL)
deltaL = self.conv2.backward(deltaL)
# C1
deltaL = self.max_pool1.backward(deltaL)
deltaL = self.relu1.backward(deltaL)
self.conv1.backward(deltaL)
def params(self):
params = {}
for i, layer in enumerate(self.layers):
params['w' + str(i+1)] = layer.params['w']
params['b' + str(i+1)] = layer.params['b']
return params
def set_params(self, params):
for i, layer in enumerate(self.layers):
layer.params['w'] = params['w' + str(i+1)]
layer.params['b'] = params['b' + str(i+1)]
|
python
|
from collections import Counter
occurrence_list = [item.lower() for item in input().split()]
odd_occurrence = [key for key, value in Counter(occurrence_list).items() if value % 2 != 0]
print(', '.join(list(odd_occurrence)))
|
python
|
import requests
import wget
import time
r = requests.post("http://10.42.0.255:8000/start")
time.sleep(4)
r = requests.post("http://10.42.0.255:8000/stop")
file_url = 'http://10.42.0.100/get/10'
file_name = wget.download(file_url)
file_name.save(/'pictures/10_picture.png')
|
python
|
#
# Get the language breakdown for a repo
# Usage: ghb langs USER/REPO
#
import operator
import sys
import requests
from .helpers import credentials
URL = "https://api.github.com/repos/%s/languages"
def average(total, number):
return round((number / float(total)) * 100, 2)
def main(args):
username, password = credentials.credentials()
headers = {"Accept": "application/vnd.github.v3+json"}
r = requests.get(
URL % args.repo, auth=(username, password), headers=headers
)
response_json = r.json()
if r.status_code != 200:
sys.exit("Failed with error: %s" % (response_json["message"]))
total = sum(response_json.values())
averages = {k: average(total, v) for k, v in response_json.items()}
averages = sorted(
averages.items(), key=operator.itemgetter(1), reverse=True
)
for t in averages:
print("{:>15}: {:8.2f}%".format(t[0], t[1]))
|
python
|
# import all necessarily modules
import os.path
import subprocess
import sys
from configparser import ConfigParser
# working dir and extension types will be passed through CLI
try:
workDir = sys.argv[1]
extType = sys.argv[2]
newExtType = sys.argv[3]
except IndexError:
raise Exception("Usage: python3 autompeg.py <path to workfolder> <old fileformat> <new fileformat>"
"e.g. (Windows) python3 autompeg.py C:\\Users\\Test\\Work .ts .mp4"
"e.g. (Mac) python3 autompeg.py /Volumes/Volume1/Work .ts .mp4")
# Config Parser
config = ConfigParser(allow_no_value=True)
try:
with open('config.ini', 'r') as cfg:
config.read_file(cfg)
path = config.get('Path of ffmpeg', 'path')
except IOError:
print("Couldn't find or open configuration file for ffmpeg. Process is exiting now..")
sys.exit()
# exception-clause to prevent a faulty WorkDir and therefore the following ffmpeg process
if sys.platform.startswith('win32'):
workDir = workDir.replace('/', '\\')
else:
pass
for root, directories, filenames in os.walk(workDir):
for filename in filenames:
filename = os.path.join(root, filename)
newfilename = os.path.splitext(filename)[0] + newExtType
if filename.endswith(extType): # scan for files with the extension given in 'extType'
filepath = filename
newfilepath = newfilename
# no need to include an exception-clause here yet, since ffmpeg automatically detects a faulty filepath
subprocess.run(
[
path, # path of ffmpeg
"-i", # input argument for file
f'{filepath}', # file path of the old media file
"-c:v", # select video stream
"copy", # copy video stream and don't convert it (to prevent quality loss)
"-bsf:a", # select bitstream filter for the audio stream
"aac_adtstoasc", # remove the ADTS header from the audio stream
f'{newfilepath}', # file path of the 'new' media file
]
)
|
python
|
import logging
from riffdog.data_structures import FoundItem
from riffdog.resource import register, ResourceDirectory
from ...aws_resource import AWSRegionalResource
logger = logging.getLogger(__name__)
@register("aws_lambda_function")
class AWSLambdaFunction(AWSRegionalResource):
"""
This is aws Lambda functions
"""
def fetch_real_regional_resources(self, region):
logging.info("Looking for %s resources..." % self.resource_type)
client = self._get_client("lambda", region)
rd = ResourceDirectory()
response = client.list_functions()
for instance in response["Functions"]:
try:
item = rd.get_item(predicted_id=instance["FunctionName"])
item.real_id = instance["FunctionName"]
item.real_data = instance
except KeyError:
# that item isnt predicted!
FoundItem("aws_lambda_function", real_id=instance["FunctionName"], real_data=instance)
def process_state_resource(self, state_resource, state_filename):
logger.info("Found a resource of type %s!" % self.resource_type)
for instance in state_resource["instances"]:
FoundItem("aws_lambda_function", terraform_id=state_resource["name"], predicted_id=instance["attributes"]["id"], state_data=instance)
def compare(self, item, depth):
pass
|
python
|
# using: encoding-utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import os
from six.moves import cPickle
import utils.opts as opts
import models
from utils.dataloader import *
import torch.utils.tensorboard as td
import utils.eval_utils as eval_utils
import utils.utils as utils
from utils.rewards import init_cider_scorer, get_self_critical_reward, get_self_critical_cider_bleu_reward, init_bleu_scorer
opt = opts.parse_opt()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
def train(opt):
opt.use_att = utils.if_use_att(opt.caption_model)
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
td_summary_writer = td.writer.SummaryWriter(opt.ckpt_path)
infos = {
'iter': 0,
'epoch': 0,
'loader_state_dict': None,
'vocab': loader.get_vocab(),
}
histories = {}
if opt.start_from is not None:
# open old infos and check if models are compatible
with open(os.path.join(opt.start_from, 'infos.pkl'), 'rb') as f:
infos = cPickle.load(f, encoding='latin-1')
saved_model_opt = infos['opt']
need_be_same=["caption_model", "rnn_type", "rnn_size", "num_layers", "embed_weight_file"]
for checkme in need_be_same:
assert vars(saved_model_opt)[checkme] == vars(opt)[checkme], "Command line argument and saved model disagree on '%s' " % checkme
if os.path.isfile(os.path.join(opt.start_from, 'histories.pkl')):
with open(os.path.join(opt.start_from, 'histories.pkl'), 'rb') as f:
histories = cPickle.load(f, encoding='latin-1')
iteration = infos.get('iter', 0)
epoch = infos.get('epoch', 0)
iteration = infos['iter']
epoch = infos['epoch']
# For back compatibility
if 'iterators' in infos:
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
val_result_history = histories.get('val_result_history', {})
loss_history = histories.get('loss_history', {})
lr_history = histories.get('lr_history', {})
ss_prob_history = histories.get('ss_prob_history', {})
if opt.load_best_score == 1:
best_val_score = infos.get('best_val_score', None)
model = models.setup(opt)
model.cuda()
update_lr_flag = True
# Assure in training mode
model.train()
crit = utils.LanguageModelCriterion()
rl_crit = utils.RewardCriterion()
optimizer = utils.NewNoamOpt(optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0, betas=(0.9, 0.98), eps=1e-9), max_lr=opt.learning_rate, warmup=opt.newnoamopt_warmup, batchsize=opt.batch_size, decay_start=opt.newnoamopt_decay, datasize=len(loader.dataset.split_ix['train']))
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt.learning_rate, betas=(opt.optim_alpha, opt.optim_beta),
eps=opt.optim_epsilon, weight_decay=opt.weight_decay)
params = list(model.named_parameters())
grad_norm = np.zeros(len(params))
loss_sum = 0
while True:
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after and update_lr_flag and opt.caption_model in ['svbase' ,'umv']:
print('start self critical')
if epoch >= 15 and epoch <20 and opt.learning_rate_decay_start >= 0:
opt.current_lr = opt.learning_rate
elif epoch >= 20 and opt.learning_rate_decay_start >= 0:
opt.current_lr = opt.learning_rate / 2.0
utils.set_lr(optimizer, opt.current_lr)
update_lr_flag = False
# Assign the scheduled sampling prob
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
# If start self critical training
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
sc_flag = True
opt.embed_weight_requires_grad = True
init_cider_scorer(opt.cached_tokens)
init_bleu_scorer()
else:
sc_flag = False
opt.embed_weight_requires_grad = False
start = time.time()
# Load data from train split (0)
data = loader.get_batch('train')
print('Read data:', time.time() - start)
torch.cuda.synchronize()
start = time.time()
num_bbox, att_feats = data['num_bbox'].cuda(), data['att_feats'].cuda()
labels = data['labels'].cuda()
masks = data['masks'].cuda()
optimizer.zero_grad()
if not sc_flag:
loss = crit(model(att_feats, num_bbox, labels), labels[:, 1:], masks[:,1:])
else:
gen_result, sample_logprobs = model.sample(att_feats, num_bbox, opt={'sample_max':0})
reward = get_self_critical_reward(model, att_feats, num_bbox, data, gen_result)
loss = rl_crit(sample_logprobs, gen_result, torch.from_numpy(reward).float().cuda())
loss.backward()
utils.clip_gradient(optimizer, opt.grad_clip)
for grad_wt in range(len(params)):
norm_v = torch.norm(params[grad_wt][1].grad).cpu().data.numpy() if params[grad_wt][
1].grad is not None else 0
grad_norm[grad_wt] += norm_v
if not sc_flag:
optimizer.step(epoch)
else:
optimizer.step()
train_loss = loss.item()
loss_sum += train_loss
torch.cuda.synchronize()
end = time.time()
if not sc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, end - start))
else:
print("lr {} iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}" \
.format(opt.current_lr, iteration, epoch, np.mean(reward[:,0]), end - start))
# Update the iteration and epoch
iteration += 1
if sc_flag:
del gen_result
del sample_logprobs
if data['bounds']['wrapped']:
epoch += 1
update_lr_flag = True
# Write the training loss summary
if (iteration % opt.losses_log_every == 0):
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif not sc_flag:
opt.current_lr = optimizer.rate(epoch)
if td is not None:
td_summary_writer.add_scalar('train_loss', train_loss, iteration)
td_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
td_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
td_summary_writer.add_scalar('avg_reward', np.mean(reward[:,0]), iteration)
# tf_summary_writer.flush()
loss_history[iteration] = train_loss if not sc_flag else np.mean(reward[:,0])
lr_history[iteration] = opt.current_lr
ss_prob_history[iteration] = model.ss_prob
# make evaluation on validation set, and save model
if (iteration % opt.save_checkpoint_every == 0):
# eval model
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
val_loss, predictions, lang_stats = eval_utils.eval_split(model, loader, eval_kwargs)
# Write validation result into summary
if td is not None:
td_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
td_summary_writer.add_scalar(k, v, iteration)
# tf_summary_writer.flush()
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if True: # if true
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
checkpoint_path = os.path.join(opt.ckpt_path, 'model.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.ckpt_path, 'optimizer.pth')
torch.save(optimizer.state_dict(), optimizer_path)
# Dump miscalleous informations
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
with open(os.path.join(opt.ckpt_path, 'infos.pkl'), 'wb') as f:
cPickle.dump(infos, f)
with open(os.path.join(opt.ckpt_path, 'histories_.pkl'), 'wb') as f:
cPickle.dump(histories, f)
if best_flag:
checkpoint_path = os.path.join(opt.ckpt_path, 'model-best.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
with open(os.path.join(opt.ckpt_path, 'infos-best.pkl'), 'wb') as f:
cPickle.dump(infos, f)
loss_sum = 0
grad_norm = np.zeros(len(params))
# Stop if reaching max epochs
if epoch >= opt.max_epochs and opt.max_epochs != -1:
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
val_loss, predictions, lang_stats = eval_utils.eval_split(model, loader, eval_kwargs)
# Write validation result into summary
if td is not None:
td_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
td_summary_writer.add_scalar(k, v, iteration)
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if True: # if true
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
checkpoint_path = os.path.join(opt.ckpt_path, 'model.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.ckpt_path, 'optimizer.pth')
torch.save(optimizer.state_dict(), optimizer_path)
# Dump miscalleous informations
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
with open(os.path.join(opt.ckpt_path, 'infos.pkl'), 'wb') as f:
cPickle.dump(infos, f)
with open(os.path.join(opt.ckpt_path, 'histories.pkl'), 'wb') as f:
cPickle.dump(histories, f)
if best_flag:
checkpoint_path = os.path.join(opt.ckpt_path, 'model-best.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
with open(os.path.join(opt.ckpt_path, 'infos-best.pkl'), 'wb') as f:
cPickle.dump(infos, f)
break
if sc_flag:
del loss
del reward
del att_feats
del num_bbox
del labels
del masks
del data
opt = opts.parse_opt()
train(opt)
|
python
|
# pylint: disable=C0103
import json
class CampaignObject():
def __init__(self, json_def):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.campaignTp = None if 'campaignTp' not in s else s['campaignTp']
self.customerId = None if 'customerId' not in s else s['customerId']
self.dailyBudget = None if 'dailyBudget' not in s else s['dailyBudget']
self.delFlag = None if 'delFlag' not in s else s['delFlag']
self.deliveryMethod = None if 'deliveryMethod' not in s else s['deliveryMethod']
self.editTm = None if 'editTm' not in s else s['editTm']
self.expectCost = None if 'expectCost' not in s else s['expectCost']
self.migType = None if 'migType' not in s else s['migType']
self.name = None if 'name' not in s else s['name']
self.nccCampaignId = None if 'nccCampaignId' not in s else s['nccCampaignId']
self.periodEndDt = None if 'periodEndDt' not in s else s['periodEndDt']
self.periodStartDt = None if 'periodStartDt' not in s else s['periodStartDt']
self.regTm = None if 'regTm' not in s else s['regTm']
self.status = None if 'status' not in s else s['status']
self.statusReason = None if 'statusReason' not in s else s['statusReason']
self.trackingMode = None if 'trackingMode' not in s else s['trackingMode']
self.trackingUrl = None if 'trackingUrl' not in s else s['trackingUrl']
self.useDailyBudget = None if 'useDailyBudget' not in s else s['useDailyBudget']
self.usePeriod = None if 'usePeriod' not in s else s['usePeriod']
self.userLock = None if 'userLock' not in s else s['userLock']
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 bily Huazhong University of Science and Technology
#
# Distributed under terms of the MIT license.
"""Save the paths of crops from the ImageNet VID 2015 dataset in pickle format"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import os.path as osp
import pickle
import sys
import numpy as np
import tensorflow as tf
CURRENT_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURRENT_DIR, '..'))
from utils.misc_utils import sort_nicely
class Config:
### Dataset
# directory where curated dataset is stored
#dataset_dir = 'data/ILSVRC2015-VID-Curation'
dataset_dir = '/data/ILSVRC2015_crops'
save_dir = 'data/'
# percentage of all videos for validation
validation_ratio = 0.1
class DataIter:
"""Container for dataset of one iteration"""
pass
class Dataset:
def __init__(self, config):
self.config = config
def _get_unique_trackids(self, video_dir):
"""Get unique trackids within video_dir"""
x_image_paths = glob.glob(video_dir + '/*.crop.x.jpg')
trackids = [os.path.basename(path).split('.')[1] for path in x_image_paths]
unique_trackids = set(trackids)
return unique_trackids
def dataset_iterator(self, video_dirs):
video_num = len(video_dirs)
iter_size = 150
iter_num = int(np.ceil(video_num / float(iter_size)))
for iter_ in range(iter_num):
iter_start = iter_ * iter_size
iter_videos = video_dirs[iter_start: iter_start + iter_size]
data_iter = DataIter()
num_videos = len(iter_videos)
instance_videos = []
for index in range(num_videos):
print('Processing {}/{}...'.format(iter_start + index, video_num))
video_dir = iter_videos[index]
trackids = self._get_unique_trackids(video_dir)
for trackid in trackids:
instance_image_paths = glob.glob(video_dir + '/*' + trackid + '.crop.x.jpg')
# sort image paths by frame number
instance_image_paths = sort_nicely(instance_image_paths)
# get image absolute path
instance_image_paths = [os.path.abspath(p) for p in instance_image_paths]
instance_videos.append(instance_image_paths)
data_iter.num_videos = len(instance_videos)
data_iter.instance_videos = instance_videos
yield data_iter
def get_all_video_dirs(self):
ann_dir = os.path.join(self.config.dataset_dir, 'Data', 'VID')
all_video_dirs = []
# We have already combined all training and validation videos in ILSVRC2015 and put them in the `train` directory.
# The file structure is like:
# train
# |- a
# |- b
# |_ c
# |- ILSVRC2015_train_00024001
# |- ILSVRC2015_train_00024002
# |_ ILSVRC2015_train_00024003
# |- 000045.00.crop.x.jpg
# |- 000046.00.crop.x.jpg
# |- ...
train_dirs = os.listdir(os.path.join(ann_dir, 'train'))
for dir_ in train_dirs:
train_sub_dir = os.path.join(ann_dir, 'train', dir_)
video_names = os.listdir(train_sub_dir)
train_video_dirs = [os.path.join(train_sub_dir, name) for name in video_names]
all_video_dirs = all_video_dirs + train_video_dirs
return all_video_dirs
def main():
# Get the data.
config = Config()
dataset = Dataset(config)
all_video_dirs = dataset.get_all_video_dirs()
num_validation = int(len(all_video_dirs) * config.validation_ratio)
### validation
validation_dirs = all_video_dirs[:num_validation]
validation_imdb = dict()
validation_imdb['videos'] = []
for i, data_iter in enumerate(dataset.dataset_iterator(validation_dirs)):
validation_imdb['videos'] += data_iter.instance_videos
validation_imdb['n_videos'] = len(validation_imdb['videos'])
validation_imdb['image_shape'] = (255, 255, 3)
### train
train_dirs = all_video_dirs[num_validation:]
train_imdb = dict()
train_imdb['videos'] = []
for i, data_iter in enumerate(dataset.dataset_iterator(train_dirs)):
train_imdb['videos'] += data_iter.instance_videos
train_imdb['n_videos'] = len(train_imdb['videos'])
train_imdb['image_shape'] = (255, 255, 3)
if not tf.gfile.IsDirectory(config.save_dir):
tf.logging.info('Creating training directory: %s', config.save_dir)
tf.gfile.MakeDirs(config.save_dir)
with open(os.path.join(config.save_dir, 'validation_imdb.pickle'), 'wb') as f:
pickle.dump(validation_imdb, f)
with open(os.path.join(config.save_dir, 'train_imdb.pickle'), 'wb') as f:
pickle.dump(train_imdb, f)
if __name__ == '__main__':
main()
|
python
|
import argparse
import datetime as dt
import os
from gpsynth.synthesizer import big_sweep, all_kernels
parser = argparse.ArgumentParser(description='Generate wavetables with Gaussian Processes')
parser.add_argument('path', metavar='path', type=str, nargs='?', default=None,
help='the parent directory, where the result is stored')
parser.add_argument('--lsdiv', metavar='N', type=int, required=False, default=16,
help='the number of lengthscale subdivisions')
parser.add_argument('--wavetables', metavar='N', type=int, required=False, default=7,
help='the number of (randomized) wavetables per setting of kernel and lengthscale')
args = parser.parse_args()
path = args.path
if path is None:
dir_name = dt.datetime.now().strftime('%Y%m%d-%H%M') + '_multiexport'
path = os.path.join(os.getcwd(), dir_name)
os.makedirs(path, exist_ok=True)
big_sweep(all_kernels, path, args.lsdiv, args.wavetables)
|
python
|
# coding=utf-8
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils
from torch.autograd import Variable
from torch.nn import Parameter, init
from torch.nn._functions.rnn import variable_recurrent_factory, StackedRNN
from torch.nn.modules.rnn import RNNCellBase
from torch.nn.utils.rnn import PackedSequence
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
class RecurrentDropoutLSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size, dropout=0.):
super(RecurrentDropoutLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.dropout = dropout
self.W_i = Parameter(torch.Tensor(hidden_size, input_size))
self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_i = Parameter(torch.Tensor(hidden_size))
self.W_f = Parameter(torch.Tensor(hidden_size, input_size))
self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_f = Parameter(torch.Tensor(hidden_size))
self.W_c = Parameter(torch.Tensor(hidden_size, input_size))
self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_c = Parameter(torch.Tensor(hidden_size))
self.W_o = Parameter(torch.Tensor(hidden_size, input_size))
self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_o = Parameter(torch.Tensor(hidden_size))
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
self._input_dropout_mask = self._h_dropout_mask = None
self.reset_parameters()
def reset_parameters(self):
init.orthogonal(self.W_i)
init.orthogonal(self.U_i)
init.orthogonal(self.W_f)
init.orthogonal(self.U_f)
init.orthogonal(self.W_c)
init.orthogonal(self.U_c)
init.orthogonal(self.W_o)
init.orthogonal(self.U_o)
self.bias_ih.data.fill_(0.)
# forget gate set to 1.
self.bias_ih.data[self.hidden_size:2 * self.hidden_size].fill_(1.)
self.bias_hh.data.fill_(0.)
def set_dropout_masks(self, batch_size):
if self.dropout:
if self.training:
new_tensor = self.W_i.data.new
self._input_dropout_mask = Variable(torch.bernoulli(
new_tensor(4, batch_size, self.input_size).fill_(1 - self.dropout)), requires_grad=False)
self._h_dropout_mask = Variable(torch.bernoulli(
new_tensor(4, batch_size, self.hidden_size).fill_(1 - self.dropout)), requires_grad=False)
else:
self._input_dropout_mask = self._h_dropout_mask = [1. - self.dropout] * 4
else:
self._input_dropout_mask = self._h_dropout_mask = [1.] * 4
def forward(self, input, hidden_state):
def get_mask_slice(mask, idx):
if isinstance(mask, list): return mask[idx]
else: return mask[idx][:input.size(0)]
h_tm1, c_tm1 = hidden_state
# if self._input_dropout_mask is None:
# self.set_dropout_masks(input.size(0))
xi_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 0), self.W_i)
xf_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 1), self.W_f)
xc_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 2), self.W_c)
xo_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 3), self.W_o)
hi_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 0), self.U_i)
hf_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 1), self.U_f)
hc_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 2), self.U_c)
ho_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 3), self.U_o)
if input.is_cuda:
igates = torch.cat([xi_t, xf_t, xc_t, xo_t], dim=-1)
hgates = torch.cat([hi_t, hf_t, hc_t, ho_t], dim=-1)
state = fusedBackend.LSTMFused.apply
return state(igates, hgates, c_tm1, self.bias_ih, self.bias_hh)
else:
i_t = F.sigmoid(xi_t + self.bias_ih[:self.hidden_size] + hi_t + self.bias_hh[:self.hidden_size])
f_t = F.sigmoid(xf_t + self.bias_ih[self.hidden_size:2 * self.hidden_size] + hf_t + self.bias_hh[self.hidden_size:2 * self.hidden_size])
c_t = f_t * c_tm1 + i_t * F.tanh(xc_t + self.bias_ih[2 * self.hidden_size:3 * self.hidden_size] + hc_t + self.bias_hh[2 * self.hidden_size:3 * self.hidden_size])
o_t = F.sigmoid(xo_t + self.bias_ih[3 * self.hidden_size:4 * self.hidden_size] + ho_t + self.bias_hh[3 * self.hidden_size:4 * self.hidden_size])
h_t = o_t * F.tanh(c_t)
return h_t, c_t
class ParentFeedingLSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size):
super(ParentFeedingLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.W_i = Parameter(torch.Tensor(hidden_size, input_size))
self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_i_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_i = Parameter(torch.Tensor(hidden_size))
self.W_f = Parameter(torch.Tensor(hidden_size, input_size))
self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_f_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_f = Parameter(torch.Tensor(hidden_size))
self.b_f_p = Parameter(torch.Tensor(hidden_size))
self.W_c = Parameter(torch.Tensor(hidden_size, input_size))
self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_c_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_c = Parameter(torch.Tensor(hidden_size))
self.W_o = Parameter(torch.Tensor(hidden_size, input_size))
self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_o_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_o = Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
init.orthogonal(self.W_i)
init.orthogonal(self.U_i)
init.orthogonal(self.U_i_p)
init.orthogonal(self.W_f)
init.orthogonal(self.U_f)
init.orthogonal(self.U_f_p)
init.orthogonal(self.W_c)
init.orthogonal(self.U_c)
init.orthogonal(self.U_c_p)
init.orthogonal(self.W_o)
init.orthogonal(self.U_o)
init.orthogonal(self.U_o_p)
self.b_i.data.fill_(0.)
self.b_c.data.fill_(0.)
self.b_o.data.fill_(0.)
# forget bias set to 1.
self.b_f.data.fill_(1.)
self.b_f_p.data.fill_(1.)
def forward(self, input, hidden_states):
h_tm1, c_tm1, h_tm1_p, c_tm1_p = hidden_states
i_t = F.sigmoid(F.linear(input, self.W_i) + F.linear(h_tm1, self.U_i) + F.linear(h_tm1_p, self.U_i_p) + self.b_i)
xf_t = F.linear(input, self.W_f)
f_t = F.sigmoid(xf_t + F.linear(h_tm1, self.U_f) + self.b_f)
f_t_p = F.sigmoid(xf_t + F.linear(h_tm1_p, self.U_f_p) + self.b_f_p)
xc_t = F.linear(input, self.W_c) + F.linear(h_tm1, self.U_c) + F.linear(h_tm1_p, self.U_c_p) + self.b_c
c_t = f_t * c_tm1 + f_t_p * c_tm1_p + i_t * F.tanh(xc_t)
o_t = F.sigmoid(F.linear(input, self.W_o) + F.linear(h_tm1, self.U_o) + F.linear(h_tm1_p, self.U_o_p) + self.b_o)
h_t = o_t * F.tanh(c_t)
return h_t, c_t
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, bidirectional=False, dropout=0., cell_factory=RecurrentDropoutLSTMCell):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.dropout = dropout
self.cell_factory = cell_factory
num_directions = 2 if bidirectional else 1
self.lstm_cells = []
for direction in range(num_directions):
cell = cell_factory(input_size, hidden_size, dropout=dropout)
self.lstm_cells.append(cell)
suffix = '_reverse' if direction == 1 else ''
cell_name = 'cell{}'.format(suffix)
self.add_module(cell_name, cell)
def forward(self, input, hidden_state=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes = input
max_batch_size = batch_sizes[0]
else: raise NotImplementedError()
for cell in self.lstm_cells:
cell.set_dropout_masks(max_batch_size)
if hidden_state is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.autograd.Variable(input.data.new(num_directions,
max_batch_size,
self.hidden_size).zero_())
hidden_state = (hx, hx)
rec_factory = variable_recurrent_factory(batch_sizes)
if self.bidirectional:
layer = (rec_factory(lambda x, h: self.cell(x, h)),
rec_factory(lambda x, h: self.cell_reverse(x, h), reverse=True))
else:
layer = (rec_factory(lambda x, h: self.cell(x, h)),)
func = StackedRNN(layer,
num_layers=1,
lstm=True,
dropout=0.,
train=self.training)
next_hidden, output = func(input, hidden_state, weight=[[], []])
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, next_hidden
|
python
|
from flask import Flask, send_from_directory, request
from flask_restful import Api, Resource, reqparse
import json
import numpy as np
import datetime
import csv
import click
from dlw import DLWSubject
STATICS_LOCATION = 'dist'
app = Flask(__name__, static_url_path='', static_folder=STATICS_LOCATION)
api = Api(app)
CALCULATED_RESULTS = None # type: DLWSubject
@app.route('/calculate', methods=['POST'])
def calculate_from_inputs():
input_data = json.loads(request.get_data().decode('utf-8'))
datetimes = [datetime.datetime(l[0], l[1], l[2], l[3], l[4]) for l in input_data['datetimes']]
d_meas = [d if d != "" else np.nan for d in input_data['d_meas']]
o_meas = [d if d != "" else np.nan for d in input_data['o18_meas']]
global CALCULATED_RESULTS
CALCULATED_RESULTS = DLWSubject(d_meas=np.asarray(d_meas, dtype=float),
o18_meas=np.asarray(o_meas, dtype=float),
sample_datetimes=np.asarray(datetimes),
dose_weights=np.asarray(input_data['dose_weights'], dtype=float),
mixed_dose=input_data['mixed_dose'],
dose_enrichments=np.asarray(input_data['dose_enrichments'], dtype=float),
rq=float(input_data['rq']) if input_data['rq'] else None,
subject_weights=np.asarray(input_data['subject_weights'], dtype=float),
subject_id=input_data['subject_id'],
in_permil=input_data['in_permil'],
expo_calc=input_data['exponential'],
pop_avg_rdil=float(input_data['pop_avg_rdil']) if input_data[
'pop_avg_rdil'] else None)
def sort_calculated_results(results):
return {
"rco2_ee_int": {
"rco2_mol_day": ["rCO2 (mol/day)", round(results['co2_int_mol_day'], 2)],
"rco2_l_hr": ["rCO2 (L/day)", round(results['co2_int_L_day'], 1)],
"ee_kcal_day": ["EE (kcal/day)", round(results['tee_int_kcal_day'], 1)],
"ee_mj_day": ["EE (MJ/day)", round(results['tee_int_mj_day'], 2)]
},
"rco2_ee_plat": {
"rco2_mol_day": ["rCO2 (mol/day)", round(results['co2_plat_mol_day'], 2)],
"rco2_l_hr": ["rCO2 (L/day)", round(results['co2_plat_L_day'], 1)],
"ee_kcal_day": ["EE (kcal/day)", round(results['tee_plat_kcal_day'], 1)],
"ee_mj_day": ["EE (MJ/day)", round(results['tee_plat_mj_day'], 2)]
}
}
if np.isnan(CALCULATED_RESULTS.d_ratio_percent):
plateau_2h = ["2H plateau (<5%)", "N/A (missing data)"]
else:
plateau_2h = ["2H plateau (<5%)", str(round(CALCULATED_RESULTS.d_ratio_percent, 2)) + "%"]
if np.isnan(CALCULATED_RESULTS.o18_ratio_percent):
plateau_o18 = ["18O Plateau (<5%)", "N/A (missing data)"]
else:
plateau_o18 = ["18O Plateau (<5%)", str(round(CALCULATED_RESULTS.o18_ratio_percent, 2)) + "%"]
if np.isnan(CALCULATED_RESULTS.ee_check):
ee = ["EE (PD4-ED4 vs. PD5-ED5, <10%)", "N/A (missing data)"]
else:
ee = ["EE (PD4-ED4 vs. PD5-ED5, <10%)", str(round(CALCULATED_RESULTS.ee_check, 4)) + "%"]
return json.dumps({
"results": {
"calculations": {
"ndp_kg": ["NdP (kg)", round(CALCULATED_RESULTS.nd['adj_plat_avg_kg'], 1)],
"kd_hr": ["kd/hour", round(CALCULATED_RESULTS.kd_per_hr, 6)],
"nop_kg": ["NoP (kg)", round(CALCULATED_RESULTS.no['adj_plat_avg_kg'], 1)],
"ko_hr": ["ko/hour", round(CALCULATED_RESULTS.ko_per_hr, 6)],
"body_water_avg_kg": ["Total Body Water Average (kg)",
round(CALCULATED_RESULTS.total_body_water_ave_kg, 1)],
"fat_free_mass_kg": ["Fat Free Mass (kg)", round(CALCULATED_RESULTS.fat_free_mass_kg, 1)],
"fat_mass_kg": ["Fat Mass (kg)", round(CALCULATED_RESULTS.fat_mass_kg, 1)],
"body_fat_percentage": ["Body Fat Percentage", round(CALCULATED_RESULTS.body_fat_percent, 1)]
},
"error_flags": {
"plateau_2h": plateau_2h,
"plateau_18O": plateau_o18,
"ds_ratio": ["DS ratio (1.000 - 1.070)", round(CALCULATED_RESULTS.dil_space_ratio, 4)],
"ee": ee,
"ko_kd": ["Ko/kd (1.1 - 1.7)", round(CALCULATED_RESULTS.ko_kd_ratio, 4)]
},
"schoeller": sort_calculated_results(CALCULATED_RESULTS.schoeller),
"racette": sort_calculated_results(CALCULATED_RESULTS.racette),
"speakman1997": sort_calculated_results(CALCULATED_RESULTS.speakman1997),
"speakman2020": sort_calculated_results(CALCULATED_RESULTS.speakman2020)
}
})
@app.route('/export', methods=['POST'])
def export_to_csv():
return CALCULATED_RESULTS.save_results_csv()
@app.route('/load', methods=['POST'])
def load_csv():
file = request.get_data().decode('utf-8')
rows = file.split('\n')
reader = csv.DictReader(rows)
results = []
for row in reader:
results.append(row)
return json.dumps({'results': results, 'error': False})
@app.route('/')
def root():
return send_from_directory(STATICS_LOCATION, 'index.html')
@click.command()
@click.option('--host', default=None)
@click.option('--port', default=None)
def run_app(host, port):
app.run(debug=False, host=host, port=port)
if __name__ == '__main__':
app.run(debug=True)
|
python
|
#!/usr/bin/env python
from strip import Strip
import random
import time
import signal
import logging
logger = logging.getLogger(__name__)
def init_logging(log_level):
logging.basicConfig(level=log_level)
# catch signals for tidy exit
_exiting = False
def signal_handler(signal, frame):
global _exiting
_exiting = True
NUM_LEDS = 60
SPI_BUS = 0
SPI_DEVICE = 0
def main():
init_logging(logging.INFO)
signal.signal(signal.SIGINT, signal_handler)
strip = Strip(NUM_LEDS, SPI_BUS, SPI_DEVICE)
updates = 0
start_time = time.time()
last_report_time = start_time
while not _exiting:
strip.set_all(random.randint(0, 255), random.randint(0, 255), random.randint(0,255), 1)
strip.update()
time.sleep(0.15)
updates += 1
now = time.time()
if now - last_report_time > 1.0:
elapsed = now - start_time
updates_per_second = updates / elapsed
logger.info("Updates per second: {0}".format(updates_per_second))
last_report_time = now
strip.set_all_off()
strip.update()
if __name__ == "__main__":
main()
|
python
|
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
"""
You can use the extra Flask-AppBuilder fields and Mixin's
AuditMixin will add automatic timestamp of created and modified by who
"""
# This model stores SSH key details - their names and full paths.
class Key(Model):
key_id = Column(Integer, primary_key=True)
key_name = Column(String(100), nullable=False)
key_path = Column(String(50), nullable=False)
def __repr__(self):
return self.key_name
# This model stores details about a particular kind of server - in this case, a production server running various apps.
class ProductionServer(Model):
production_server_id = Column(Integer, primary_key=True)
production_server_name = Column(String(50), nullable=False)
production_server_ip = Column(String(30), nullable=False)
key_id = Column(Integer, ForeignKey('key.key_id'))
key = relationship('Key')
production_server_user = Column(String(30))
def __repr__(self):
return self.production_server_name
# This model stores details about MySQL servers.
class MySQLServer(Model):
mysql_server_id = Column(Integer, primary_key=True)
mysql_server_name = Column(String(50), nullable=False)
mysql_server_ip = Column(String(30), nullable=False)
key_id = Column(Integer, ForeignKey('key.key_id'))
key = relationship('Key')
mysql_server_user = Column(String(30))
def __repr__(self):
return self.mysql_server_name
# This model will store details about RethinkDB servers.
class RethinkDBServer(Model):
rethinkdb_server_id = Column(Integer, primary_key=True)
rethinkdb_server_name = Column(String(50), nullable=False)
rethinkdb_server_ip = Column(String(30), nullable=False)
key_id = Column(Integer, ForeignKey('key.key_id'))
key = relationship('Key')
rethinkdb_server_user = Column(String(30))
def __repr__(self):
return self.rethinkdb_server_name
|
python
|
import pytest
from ergaleia import Mini
@pytest.fixture
def mini():
return Mini('foo value=bar')
def test_default(mini):
assert mini.foo == 'bar'
with pytest.raises(TypeError):
mini['foo']
def test_set_attribute(mini):
mini.foo = 'whatever'
assert mini.foo == 'whatever'
with pytest.raises(KeyError):
mini.bar = 'whatever'
def test_set(mini):
mini.set(foo=1)
assert mini.foo == 1
with pytest.raises(KeyError):
mini.set(bar=2)
m = Mini('a', 'b', 'c')
m.set(a=1, b=2, c=3)
assert m.a + m.b + m.c == 6
def test_load(mini):
mini.load(['foo=10'])
assert mini.foo == '10'
m = Mini('a validator=int')
m.load(['a=10'])
assert m.a == 10
def test_as_tuple(mini):
t = mini.as_tuple()
assert t.foo == 'bar'
|
python
|
import itertools
import pickle
import time
import tarfile
import sys
import uuid
import warnings
from collections import OrderedDict
from pathlib import Path
import hawkeslib as hl
import numpy as np
from joblib import Parallel, delayed
from sklearn.decomposition import NMF
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import normalized_mutual_info_score as nmi
from tick.hawkes import HawkesExpKern, HawkesEM
from tick.hawkes.inference.hawkes_cumulant_matching import HawkesCumulantMatching
sys.path.append("../")
from lrhp.neumann import TruncatedNeumannEstimator
from lrhp.util import hawkeslib_data_to_tick
class Experiment:
def __init__(self, name, fit_args, model, n_clusters=10):
self.name = name
self.fit_args = fit_args
self.model = model
self.n_clusters = n_clusters
def get_spectral_clustering(self, P, is_W=False):
if is_W:
P = P.T.dot(P)
P = np.clip(P, a_min=0, a_max=None)
return SpectralClustering(
n_clusters=self.n_clusters, affinity="precomputed"
).fit_predict(P)
def get_kmeans_clustering(self, P, is_W=False):
if not is_W:
P = np.clip(P, a_min=0, a_max=None)
P = NMF(n_components=self.n_clusters).fit_transform(P)
else:
P = P.T
return KMeans(n_clusters=self.n_clusters).fit_predict(P)
@staticmethod
def _get_tick_phi(model):
phi_getter = {
HawkesEM: lambda m: m.kernel.sum(-1),
HawkesCumulantMatching: lambda m: m.solution,
HawkesExpKern: lambda m: m.adjacency,
}
for k, v in phi_getter.items():
if isinstance(model, k):
return v(model)
raise ValueError("Given model not recognized.")
def run(self):
start_time = time.time()
if "tick" in str(type(self.model)):
self.model.fit(*self.fit_args)
phi = self._get_tick_phi(self.model)
fitting_time = time.time() - start_time
sc = self.get_spectral_clustering(phi)
km = self.get_kmeans_clustering(phi)
else:
assert isinstance(self.model, TruncatedNeumannEstimator)
W, _ = self.model.fit(**self.fit_args)
phi = np.clip(W.T.dot(W), a_min=0, a_max=None).astype(np.float64)
fitting_time = time.time() - start_time
sc = self.get_spectral_clustering(W, is_W=True)
km = self.get_kmeans_clustering(W, is_W=True)
return fitting_time, phi, sc, km
def main(n_clusters=10):
with tarfile.open("../data/synthetic_hawkes_data.tar.gz", "r:gz") as tar:
fp = tar.extractfile("synthetic_hawkes_data")
mu, Phi, beta, t, c = pickle.load(fp)
fp.close()
test_ix = len(t) // 2
t1, c1 = (x[:test_ix] for x in (t, c))
t2, c2 = (x[test_ix:] for x in (t, c))
t2 -= t1[-1]
tickd = hawkeslib_data_to_tick(t1, c1)
SHORT_DATA_LENGTH = 2_000_000
tickd_short = hawkeslib_data_to_tick(t1[:SHORT_DATA_LENGTH], c1[:SHORT_DATA_LENGTH])
baseline_experiments = [
Experiment(
name="NPHC",
fit_args=[tickd],
model=HawkesCumulantMatching(
integration_support=1.0,
verbose=True,
C=1.,
max_iter=1000,
),
n_clusters=n_clusters,
),
Experiment(
name="Hawkes-LS",
fit_args=[tickd_short],
model=HawkesExpKern(
decays=1.,
gofit="least-squares",
C=1,
solver="gd"
),
n_clusters=n_clusters,
),
Experiment(
name="Hawkes-EM",
fit_args=[tickd_short],
model=HawkesEM(
kernel_support=10.,
kernel_size=2,
verbose=True,
print_every=10,
),
n_clusters=n_clusters,
)
]
neumann_experiments = [
Experiment(
name="LRHP-GD",
fit_args=dict(
t=t1, c=c1, num_epochs=int(1e3), learning_rate=1e-2
),
model=TruncatedNeumannEstimator(rank=n_clusters, is_nmf=False),
n_clusters=n_clusters,
),
Experiment(
name="LRHP-GD (NMF)",
fit_args=dict(
t=t1, c=c1, num_epochs=int(5e4), learning_rate=2e-1
),
model=TruncatedNeumannEstimator(rank=10, is_nmf=True),
n_clusters=n_clusters,
),
]
# get original clusters via NMF
nmf = NMF(n_components=n_clusters)
Wo = nmf.fit_transform(Phi)
orig_clus = KMeans(n_clusters=n_clusters).fit_predict(Wo)
# run experiments
warnings.simplefilter("ignore")
all_results = []
for exp in baseline_experiments + neumann_experiments:
time_taken, phi, sc, km = exp.run()
results = OrderedDict(
name=exp.name,
n_clusters=n_clusters,
time_taken=time_taken,
pred_ll=hl.MultivariateExpHawkesProcess.log_likelihood_with_params(
t2, c2, mu, np.clip(phi, a_min=0, a_max=None), beta
) / len(t2),
sc_nmi=nmi(sc, orig_clus),
km_nmi=nmi(km, orig_clus),
)
reslist = list(results.values())
all_results.append(reslist)
print(reslist)
# write out results
out_path = Path("./outputs/")
out_path.mkdir(exist_ok=True)
with open(out_path / f"{str(uuid.uuid4())[:7]}", "w") as fp:
for r in all_results:
print(",".join(map(str, r)), file=fp)
if __name__ == "__main__":
# Parallel(n_jobs=36)(delayed(main)() for _ in range(20))
main()
|
python
|
# Copyright (c) Ye Liu. All rights reserved.
from .dynamic_bce import DynamicBCELoss
from .focal import (FocalLoss, FocalLossStar, GaussianFocalLoss, focal_loss,
focal_loss_star, gaussian_focal_loss)
from .ghm import GHMCLoss
from .lasso import (BalancedL1Loss, L1Loss, SmoothL1Loss, balanced_l1_loss,
l1_loss, smooth_l1_loss)
from .utils import weighted_loss
__all__ = [
'DynamicBCELoss', 'FocalLoss', 'FocalLossStar', 'GaussianFocalLoss',
'focal_loss', 'focal_loss_star', 'gaussian_focal_loss', 'GHMCLoss',
'BalancedL1Loss', 'L1Loss', 'SmoothL1Loss', 'balanced_l1_loss', 'l1_loss',
'smooth_l1_loss', 'weighted_loss'
]
|
python
|
import pandas as pd
t1 = pd.read_csv("lgb_pyst.csv")
t2 = pd.read_csv("lgb_pyst_Keras_4_0.967189916545.csv")
t2['click'] = t2['click']*0.8 +t1['click']*0.2
t2.to_csv('avg_lgb_pyst_Keras_4_2_8.csv', index=False)
|
python
|
# Sum of Polygon Angles
print("Given an n-sided regular polygon n, return the total sum of internal angles (in degrees).")
n_sided = int(input("Enter your n-sided polygon : "))
angles = 2(n_sided)−4×90(n_sided)
print(angles)
|
python
|
#Copyright ReportLab Europe Ltd. 2000-2008
#this test and associates functionality kinds donated by Ian Sparks.
#see license.txt for license details
"""
Tests for internal links and destinations
"""
__version__='''$Id: test_pdfgen_links.py 3288 2008-09-15 11:03:17Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
#
# Fit tests
#
# Modification History
# ====================
#
# 11-Mar-2003 Ian Sparks
# * Initial version.
#
#
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
import unittest
def markPage(c,height=letter[1],width=letter[0]):
height = height / inch
width = width / inch
for y in range(int(height)):
for x in range(int(width)):
c.drawString(x*inch,y*inch,"x=%d y=%d" % (x,y) )
c.line(x*inch,0,x*inch,height*inch)
c.line(0,y*inch,width*inch,y*inch)
fn = outputfile("test_pdfgen_links.pdf")
class LinkTestCase(unittest.TestCase):
"Test classes."
def test1(self):
c = canvas.Canvas(fn,pagesize=letter)
#Page 1
c.setFont("Courier", 10)
markPage(c)
c.bookmarkPage("P1")
c.addOutlineEntry("Page 1","P1")
#Note : XYZ Left is ignored because at this zoom the whole page fits the screen
c.bookmarkPage("P1_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0.5)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=1)
c.bookmarkPage("P1_XYZ2",fit="XYZ",top=7*inch,left=3*inch,zoom=5)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=1)
c.bookmarkPage("P1_FIT",fit="Fit")
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=1)
c.bookmarkPage("P1_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 1 FitH (top = 2 inch)","P1_FITH",level=1)
c.bookmarkPage("P1_FITV",fit="FitV",left=3*inch)
c.addOutlineEntry("Page 1 FitV (left = 3 inch)","P1_FITV",level=1)
c.bookmarkPage("P1_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=1)
c.bookmarkPage("P1_FORWARD")
c.addOutlineEntry("Forward References","P1_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
#Create link to FitR on page 3
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.blue)
c.drawString(inch+20,inch+20,"Click to jump to the meaning of life")
c.linkAbsolute("","MOL",(inch+10,inch+10,6*inch,2*inch))
c.restoreState()
#Create linkAbsolute to page 2
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(4*inch,4*inch,"Jump to 2.5 inch position on page 2")
c.linkAbsolute("","HYPER_1",(3.75*inch,3.75*inch,8.25*inch,4.25*inch))
c.restoreState()
c.showPage()
#Page 2
c.setFont("Helvetica", 10)
markPage(c)
c.bookmarkPage("P2")
c.addOutlineEntry("Page 2","P2")
#Note : This time left will be at 3*inch because the zoom makes the page to big to fit
c.bookmarkPage("P2_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=2)
c.addOutlineEntry("Page 2 XYZ (top=7,left=3,zoom=2.0)","P2_XYZ",level=1)
c.bookmarkPage("P2_FIT",fit="Fit")
c.addOutlineEntry("Page 2 Fit","P2_FIT",level=1)
c.bookmarkPage("P2_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 2 FitH (top = 2 inch)","P2_FITH",level=1)
c.bookmarkPage("P2_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=1)
c.bookmarkPage("P2_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 2 FitR (left=1,bottom=2,right=5,top=6)","P2_FITR",level=1)
c.bookmarkPage("P2_FORWARD")
c.addOutlineEntry("Forward References","P2_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
c.bookmarkPage("P2_BACKWARD")
c.addOutlineEntry("Backward References","P2_BACKWARD",level=2)
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=3)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=3)
#Horizontal absolute test from page 1. Note that because of the page size used on page 3 all this will do
#is put the view centered on the bookmark. If you want to see it "up close and personal" change page3 to be
#the same page size as the other pages.
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(2.5*inch,2.5*inch,"This line is hyperlinked from page 1")
# c.bookmarkHorizontalAbsolute("HYPER_1",3*inch) #slightly higher than the text otherwise text is of screen above.
c.bookmarkPage("HYPER_1",fit="XYZ",top=2.5*inch,bottom=2*inch)
c.restoreState()
#
c.showPage()
#Page 3
c.setFont("Times-Roman", 10)
#Turn the page on its size and make it 2* the normal "width" in order to have something to test FitV against.
c.setPageSize((2*letter[1],letter[0]))
markPage(c,height=letter[0],width=2*letter[1])
c.bookmarkPage("P3")
c.addOutlineEntry("Page 3 (Double-wide landscape page)","P3")
#Note : XYZ with no zoom (set it to something first
c.bookmarkPage("P3_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=1)
#FitV works here because the page is so wide it can"t all fit on the page
c.bookmarkPage("P3_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 3 FitV (left = 10 inch)","P3_FITV",level=1)
c.bookmarkPage("P3_BACKWARD")
c.addOutlineEntry("Backward References","P3_BACKWARD",level=2)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=3)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=3)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=3)
#Add link from page 1
c.saveState()
c.setFont("Courier", 40)
c.setFillColor(colors.green)
c.drawString(5*inch,6*inch,"42")
c.bookmarkPage("MOL",fit="FitR",left=4*inch,top=7*inch,bottom=4*inch,right=6*inch)
c.showOutline()
c.save()
def makeSuite():
return makeSuiteForClasses(LinkTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print "wrote", fn
printLocation()
|
python
|
import os
from setuptools import setup
def pkg_dir(path):
return os.path.join(os.path.dirname(__file__), path)
with open(pkg_dir('VERSION'), 'r') as f:
version = f.read().strip()
with open(pkg_dir('README.md'), 'r') as f:
readme = f.read()
setup(
name='elasticsearch-collectd-plugin',
version=version,
install_requires=[],
py_modules=['elasticsearch-collectd'],
author='',
author_email='[email protected]',
maintainer='MOJDS',
url='https://github.com/ministryofjustice/elasticsearch-collectd-plugin',
description='Collectd plugin to query stats from elasticsearch',
long_description=readme,
license='LICENSE',
keywords=['python', 'ministryofjustice', 'collectd', 'elasticsearch'],
test_suite='tests',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 1 - Alpha',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking :: Time Synchronization']
)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.